2015-12-20 07:00:50 +01:00
|
|
|
package lnwallet
|
2015-12-03 01:49:41 +01:00
|
|
|
|
2015-12-16 21:40:11 +01:00
|
|
|
import (
|
2016-01-05 22:01:42 +01:00
|
|
|
"bytes"
|
2017-03-16 02:56:25 +01:00
|
|
|
"crypto/sha256"
|
2020-01-03 15:53:51 +01:00
|
|
|
"errors"
|
2016-01-05 22:01:42 +01:00
|
|
|
"fmt"
|
2019-10-03 23:10:18 +02:00
|
|
|
"math"
|
2024-09-08 23:00:13 +02:00
|
|
|
"slices"
|
2015-12-16 21:40:11 +01:00
|
|
|
"sync"
|
|
|
|
|
2018-07-12 11:02:53 +02:00
|
|
|
"github.com/btcsuite/btcd/blockchain"
|
2022-02-23 14:48:00 +01:00
|
|
|
"github.com/btcsuite/btcd/btcec/v2"
|
|
|
|
"github.com/btcsuite/btcd/btcec/v2/ecdsa"
|
2023-01-20 04:19:10 +01:00
|
|
|
"github.com/btcsuite/btcd/btcec/v2/schnorr/musig2"
|
2022-02-23 14:48:00 +01:00
|
|
|
"github.com/btcsuite/btcd/btcutil"
|
|
|
|
"github.com/btcsuite/btcd/btcutil/txsort"
|
2019-01-16 15:47:43 +01:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
2023-08-09 04:18:34 +02:00
|
|
|
"github.com/btcsuite/btcd/mempool"
|
2018-06-05 03:34:16 +02:00
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2019-09-24 13:12:53 +02:00
|
|
|
"github.com/btcsuite/btclog"
|
2019-01-16 15:47:43 +01:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2019-09-24 13:12:53 +02:00
|
|
|
"github.com/lightningnetwork/lnd/build"
|
2019-01-16 15:47:43 +01:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2022-11-18 12:15:22 +01:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/models"
|
2024-06-03 18:43:33 +02:00
|
|
|
"github.com/lightningnetwork/lnd/fn"
|
2019-01-16 15:47:43 +01:00
|
|
|
"github.com/lightningnetwork/lnd/input"
|
2023-01-20 04:43:47 +01:00
|
|
|
"github.com/lightningnetwork/lnd/keychain"
|
2024-05-24 15:56:30 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lntypes"
|
2024-07-24 13:31:21 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnutils"
|
2019-10-31 03:43:05 +01:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
2019-01-16 15:47:43 +01:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2023-01-20 04:19:10 +01:00
|
|
|
"github.com/lightningnetwork/lnd/shachain"
|
2024-04-02 05:07:15 +02:00
|
|
|
"github.com/lightningnetwork/lnd/tlv"
|
2015-12-16 21:40:11 +01:00
|
|
|
)
|
|
|
|
|
2016-06-21 06:56:54 +02:00
|
|
|
var (
|
2017-02-24 14:32:33 +01:00
|
|
|
// ErrChanClosing is returned when a caller attempts to close a channel
|
|
|
|
// that has already been closed or is in the process of being closed.
|
2016-06-21 06:56:54 +02:00
|
|
|
ErrChanClosing = fmt.Errorf("channel is being closed, operation disallowed")
|
2017-02-24 14:32:33 +01:00
|
|
|
|
2018-02-07 04:11:11 +01:00
|
|
|
// ErrNoWindow is returned when revocation window is exhausted.
|
2017-02-24 14:32:33 +01:00
|
|
|
ErrNoWindow = fmt.Errorf("unable to sign new commitment, the current" +
|
2016-07-22 01:50:20 +02:00
|
|
|
" revocation window is exhausted")
|
2017-02-24 14:32:33 +01:00
|
|
|
|
|
|
|
// ErrMaxWeightCost is returned when the cost/weight (see segwit)
|
|
|
|
// exceeds the widely used maximum allowed policy weight limit. In this
|
|
|
|
// case the commitment transaction can't be propagated through the
|
|
|
|
// network.
|
2016-11-23 09:29:05 +01:00
|
|
|
ErrMaxWeightCost = fmt.Errorf("commitment transaction exceed max " +
|
2017-02-24 14:32:33 +01:00
|
|
|
"available cost")
|
|
|
|
|
|
|
|
// ErrMaxHTLCNumber is returned when a proposed HTLC would exceed the
|
|
|
|
// maximum number of allowed HTLC's if committed in a state transition
|
2016-11-23 09:29:05 +01:00
|
|
|
ErrMaxHTLCNumber = fmt.Errorf("commitment transaction exceed max " +
|
|
|
|
"htlc number")
|
2017-05-19 17:35:28 +02:00
|
|
|
|
2017-11-29 14:20:02 +01:00
|
|
|
// ErrMaxPendingAmount is returned when a proposed HTLC would exceed
|
|
|
|
// the overall maximum pending value of all HTLCs if committed in a
|
|
|
|
// state transition.
|
|
|
|
ErrMaxPendingAmount = fmt.Errorf("commitment transaction exceed max" +
|
|
|
|
"overall pending htlc value")
|
|
|
|
|
|
|
|
// ErrBelowChanReserve is returned when a proposed HTLC would cause
|
|
|
|
// one of the peer's funds to dip below the channel reserve limit.
|
|
|
|
ErrBelowChanReserve = fmt.Errorf("commitment transaction dips peer " +
|
|
|
|
"below chan reserve")
|
|
|
|
|
|
|
|
// ErrBelowMinHTLC is returned when a proposed HTLC has a value that
|
|
|
|
// is below the minimum HTLC value constraint for either us or our
|
|
|
|
// peer depending on which flags are set.
|
|
|
|
ErrBelowMinHTLC = fmt.Errorf("proposed HTLC value is below minimum " +
|
|
|
|
"allowed HTLC value")
|
2017-11-10 08:06:10 +01:00
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
// ErrFeeBufferNotInitiator is returned when the FeeBuffer is enforced
|
|
|
|
// although the channel was not initiated (opened) locally.
|
|
|
|
ErrFeeBufferNotInitiator = fmt.Errorf("unable to enforce FeeBuffer, " +
|
|
|
|
"not initiator of the channel")
|
|
|
|
|
2020-04-02 18:31:51 +02:00
|
|
|
// ErrInvalidHTLCAmt signals that a proposed HTLC has a value that is
|
|
|
|
// not positive.
|
|
|
|
ErrInvalidHTLCAmt = fmt.Errorf("proposed HTLC value must be positive")
|
|
|
|
|
2017-11-10 08:06:10 +01:00
|
|
|
// ErrCannotSyncCommitChains is returned if, upon receiving a ChanSync
|
|
|
|
// message, the state machine deems that is unable to properly
|
2018-07-12 11:02:53 +02:00
|
|
|
// synchronize states with the remote peer. In this case we should fail
|
|
|
|
// the channel, but we won't automatically force close.
|
2017-11-10 08:06:10 +01:00
|
|
|
ErrCannotSyncCommitChains = fmt.Errorf("unable to sync commit chains")
|
2017-11-14 07:45:57 +01:00
|
|
|
|
|
|
|
// ErrInvalidLastCommitSecret is returned in the case that the
|
|
|
|
// commitment secret sent by the remote party in their
|
|
|
|
// ChannelReestablish message doesn't match the last secret we sent.
|
|
|
|
ErrInvalidLastCommitSecret = fmt.Errorf("commit secret is incorrect")
|
|
|
|
|
2018-07-12 11:02:53 +02:00
|
|
|
// ErrInvalidLocalUnrevokedCommitPoint is returned in the case that the
|
|
|
|
// commitment point sent by the remote party in their
|
|
|
|
// ChannelReestablish message doesn't match the last unrevoked commit
|
|
|
|
// point they sent us.
|
|
|
|
ErrInvalidLocalUnrevokedCommitPoint = fmt.Errorf("unrevoked commit " +
|
|
|
|
"point is invalid")
|
|
|
|
|
|
|
|
// ErrCommitSyncRemoteDataLoss is returned in the case that we receive
|
|
|
|
// a ChannelReestablish message from the remote that advertises a
|
|
|
|
// NextLocalCommitHeight that is lower than what they have already
|
|
|
|
// ACKed, or a RemoteCommitTailHeight that is lower than our revoked
|
|
|
|
// height. In this case we should force close the channel such that
|
|
|
|
// both parties can retrieve their funds.
|
|
|
|
ErrCommitSyncRemoteDataLoss = fmt.Errorf("possible remote commitment " +
|
|
|
|
"state data loss")
|
2022-04-13 16:33:07 +02:00
|
|
|
|
|
|
|
// ErrNoRevocationLogFound is returned when both the returned logs are
|
|
|
|
// nil from querying the revocation log bucket. In theory this should
|
|
|
|
// never happen as the query will return `ErrLogEntryNotFound`, yet
|
|
|
|
// we'd still perform a sanity check to make sure at least one of the
|
|
|
|
// logs is non-nil.
|
|
|
|
ErrNoRevocationLogFound = errors.New("no revocation log found")
|
|
|
|
|
|
|
|
// ErrOutputIndexOutOfRange is returned when an output index is greater
|
|
|
|
// than or equal to the length of a given transaction's outputs.
|
|
|
|
ErrOutputIndexOutOfRange = errors.New("output index is out of range")
|
2023-02-02 09:13:44 +01:00
|
|
|
|
|
|
|
// ErrRevLogDataMissing is returned when a certain wanted optional field
|
|
|
|
// in a revocation log entry is missing.
|
|
|
|
ErrRevLogDataMissing = errors.New("revocation log data missing")
|
2023-09-14 02:15:48 +02:00
|
|
|
|
|
|
|
// ErrForceCloseLocalDataLoss is returned in the case a user (or
|
|
|
|
// another sub-system) attempts to force close when we've detected that
|
|
|
|
// we've likely lost data ourselves.
|
|
|
|
ErrForceCloseLocalDataLoss = errors.New("cannot force close " +
|
|
|
|
"channel with local data loss")
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
|
|
|
|
// errNoNonce is returned when a nonce is required, but none is found.
|
|
|
|
errNoNonce = errors.New("no nonce found")
|
|
|
|
|
|
|
|
// errNoPartialSig is returned when a partial signature is required,
|
|
|
|
// but none is found.
|
|
|
|
errNoPartialSig = errors.New("no partial signature found")
|
2016-06-21 06:56:54 +02:00
|
|
|
)
|
|
|
|
|
2019-09-06 13:14:39 +02:00
|
|
|
// ErrCommitSyncLocalDataLoss is returned in the case that we receive a valid
|
|
|
|
// commit secret within the ChannelReestablish message from the remote node AND
|
|
|
|
// they advertise a RemoteCommitTailHeight higher than our current known
|
|
|
|
// height. This means we have lost some critical data, and must fail the
|
|
|
|
// channel and MUST NOT force close it. Instead we should wait for the remote
|
|
|
|
// to force close it, such that we can attempt to sweep our funds. The
|
2023-09-14 02:15:48 +02:00
|
|
|
// commitment point needed to sweep the remote's force close is encapsulated.
|
2019-09-06 13:14:39 +02:00
|
|
|
type ErrCommitSyncLocalDataLoss struct {
|
|
|
|
// ChannelPoint is the identifier for the channel that experienced data
|
|
|
|
// loss.
|
|
|
|
ChannelPoint wire.OutPoint
|
|
|
|
|
|
|
|
// CommitPoint is the last unrevoked commit point, sent to us by the
|
|
|
|
// remote when we determined we had lost state.
|
|
|
|
CommitPoint *btcec.PublicKey
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns a string representation of the local data loss error.
|
|
|
|
func (e *ErrCommitSyncLocalDataLoss) Error() string {
|
|
|
|
return fmt.Sprintf("ChannelPoint(%v) with CommitPoint(%x) had "+
|
|
|
|
"possible local commitment state data loss", e.ChannelPoint,
|
|
|
|
e.CommitPoint.SerializeCompressed())
|
|
|
|
}
|
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// PaymentHash represents the sha256 of a random value. This hash is used to
|
2016-01-05 22:01:42 +01:00
|
|
|
// uniquely track incoming/outgoing payments within this channel, as well as
|
|
|
|
// payments requested by the wallet/daemon.
|
2016-06-27 08:03:26 +02:00
|
|
|
type PaymentHash [32]byte
|
2015-12-31 07:36:01 +01:00
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// commitment represents a commitment to a new state within an active channel.
|
|
|
|
// New commitments can be initiated by either side. Commitments are ordered
|
|
|
|
// into a commitment chain, with one existing for both parties. Each side can
|
2016-10-15 15:18:38 +02:00
|
|
|
// independently extend the other side's commitment chain, up to a certain
|
|
|
|
// "revocation window", which once reached, disallows new commitments until
|
2016-07-06 02:01:55 +02:00
|
|
|
// the local nodes receives the revocation for the remote node's chain tail.
|
|
|
|
type commitment struct {
|
|
|
|
// height represents the commitment height of this commitment, or the
|
|
|
|
// update number of this commitment.
|
|
|
|
height uint64
|
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
// whoseCommit indicates whether this is the local or remote node's
|
|
|
|
// version of the commitment.
|
|
|
|
whoseCommit lntypes.ChannelParty
|
2017-09-25 22:05:49 +02:00
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// [our|their]MessageIndex are indexes into the HTLC log, up to which
|
|
|
|
// this commitment transaction includes. These indexes allow both sides
|
2016-10-15 15:18:38 +02:00
|
|
|
// to independently, and concurrent send create new commitments. Each
|
2016-07-06 02:01:55 +02:00
|
|
|
// new commitment sent to the remote party includes an index in the
|
|
|
|
// shared log which details which of their updates we're including in
|
|
|
|
// this new commitment.
|
2024-08-09 23:52:21 +02:00
|
|
|
messageIndices lntypes.Dual[uint64]
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2024-03-19 10:05:03 +01:00
|
|
|
// [our|their]HtlcIndex are the current running counters for the HTLCs
|
2017-11-10 07:23:17 +01:00
|
|
|
// offered by either party. This value is incremented each time a party
|
2024-03-19 10:05:03 +01:00
|
|
|
// offers a new HTLC. The log update methods that consume HTLCs will
|
2017-11-10 07:23:17 +01:00
|
|
|
// reference these counters, rather than the running cumulative message
|
|
|
|
// counters.
|
|
|
|
ourHtlcIndex uint64
|
|
|
|
theirHtlcIndex uint64
|
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// txn is the commitment transaction generated by including any HTLC
|
|
|
|
// updates whose index are below the two indexes listed above. If this
|
|
|
|
// commitment is being added to the remote chain, then this txn is
|
|
|
|
// their version of the commitment transactions. If the local commit
|
|
|
|
// chain is being modified, the opposite is true.
|
|
|
|
txn *wire.MsgTx
|
|
|
|
|
|
|
|
// sig is a signature for the above commitment transaction.
|
|
|
|
sig []byte
|
|
|
|
|
|
|
|
// [our|their]Balance represents the settled balances at this point
|
|
|
|
// within the commitment chain. This balance is computed by properly
|
|
|
|
// evaluating all the add/remove/settle log entries before the listed
|
|
|
|
// indexes.
|
2017-11-29 14:20:02 +01:00
|
|
|
//
|
2020-03-06 16:11:46 +01:00
|
|
|
// NOTE: This is the balance *after* subtracting any commitment fee,
|
|
|
|
// AND anchor output values.
|
2017-08-22 08:20:29 +02:00
|
|
|
ourBalance lnwire.MilliSatoshi
|
|
|
|
theirBalance lnwire.MilliSatoshi
|
2016-09-07 19:45:27 +02:00
|
|
|
|
2017-05-01 07:53:54 +02:00
|
|
|
// fee is the amount that will be paid as fees for this commitment
|
2017-11-10 07:23:17 +01:00
|
|
|
// transaction. The fee is recorded here so that it can be added back
|
|
|
|
// and recalculated for each new update to the channel state.
|
2017-05-01 07:53:54 +02:00
|
|
|
fee btcutil.Amount
|
|
|
|
|
2017-07-14 20:38:35 +02:00
|
|
|
// feePerKw is the fee per kw used to calculate this commitment
|
|
|
|
// transaction's fee.
|
2019-10-31 03:43:05 +01:00
|
|
|
feePerKw chainfee.SatPerKWeight
|
2017-07-14 20:38:35 +02:00
|
|
|
|
2017-11-10 07:23:17 +01:00
|
|
|
// dustLimit is the limit on the commitment transaction such that no
|
|
|
|
// output values should be below this amount.
|
2017-09-25 22:05:49 +02:00
|
|
|
dustLimit btcutil.Amount
|
|
|
|
|
2017-07-30 21:46:55 +02:00
|
|
|
// outgoingHTLCs is a slice of all the outgoing HTLC's (from our PoV)
|
|
|
|
// on this commitment transaction.
|
2024-06-15 01:30:28 +02:00
|
|
|
outgoingHTLCs []paymentDescriptor
|
2016-09-07 19:45:27 +02:00
|
|
|
|
2017-07-30 21:46:55 +02:00
|
|
|
// incomingHTLCs is a slice of all the incoming HTLC's (from our PoV)
|
|
|
|
// on this commitment transaction.
|
2024-06-15 01:30:28 +02:00
|
|
|
incomingHTLCs []paymentDescriptor
|
2017-01-20 11:51:48 +01:00
|
|
|
|
2024-04-02 05:07:15 +02:00
|
|
|
// customBlob stores opaque bytes that may be used by custom channels
|
|
|
|
// to store extra data for a given commitment state.
|
|
|
|
customBlob fn.Option[tlv.Blob]
|
|
|
|
|
2017-07-30 21:46:55 +02:00
|
|
|
// [outgoing|incoming]HTLCIndex is an index that maps an output index
|
|
|
|
// on the commitment transaction to the payment descriptor that
|
2017-11-10 07:23:17 +01:00
|
|
|
// represents the HTLC output.
|
|
|
|
//
|
|
|
|
// NOTE: that these fields are only populated if this commitment state
|
|
|
|
// belongs to the local node. These maps are used when validating any
|
|
|
|
// HTLC signatures which are part of the local commitment state. We use
|
|
|
|
// this map in order to locate the details needed to validate an HTLC
|
|
|
|
// signature while iterating of the outputs in the local commitment
|
|
|
|
// view.
|
2024-06-15 01:30:28 +02:00
|
|
|
outgoingHTLCIndex map[int32]*paymentDescriptor
|
|
|
|
incomingHTLCIndex map[int32]*paymentDescriptor
|
2017-07-30 21:46:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// locateOutputIndex is a small helper function to locate the output index of a
|
|
|
|
// particular HTLC within the current commitment transaction. The duplicate map
|
2024-05-16 00:28:43 +02:00
|
|
|
// passed in is to be retained for each output within the commitment
|
2024-03-19 10:05:03 +01:00
|
|
|
// transition. This ensures that we don't assign multiple HTLCs to the same
|
2017-07-30 21:46:55 +02:00
|
|
|
// index within the commitment transaction.
|
2024-06-15 01:30:28 +02:00
|
|
|
func locateOutputIndex(p *paymentDescriptor, tx *wire.MsgTx,
|
2024-07-31 01:44:18 +02:00
|
|
|
whoseCommit lntypes.ChannelParty, dups map[PaymentHash][]int32,
|
|
|
|
cltvs []uint32) (int32, error) {
|
2016-09-07 19:45:27 +02:00
|
|
|
|
2024-03-19 10:05:03 +01:00
|
|
|
// If this is their commitment transaction, we'll be trying to locate
|
2017-07-30 21:46:55 +02:00
|
|
|
// their pkScripts, otherwise we'll be looking for ours. This is
|
|
|
|
// required as the commitment states are asymmetric in order to ascribe
|
|
|
|
// blame in the case of a contract breach.
|
|
|
|
pkScript := p.theirPkScript
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsLocal() {
|
2017-07-30 21:46:55 +02:00
|
|
|
pkScript = p.ourPkScript
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, txOut := range tx.TxOut {
|
2020-03-31 00:50:10 +02:00
|
|
|
cltv := cltvs[i]
|
|
|
|
|
2017-07-30 21:46:55 +02:00
|
|
|
if bytes.Equal(txOut.PkScript, pkScript) &&
|
2020-03-31 00:50:10 +02:00
|
|
|
txOut.Value == int64(p.Amount.ToSatoshis()) &&
|
|
|
|
cltv == p.Timeout {
|
2017-07-30 21:46:55 +02:00
|
|
|
|
|
|
|
// If this payment hash and index has already been
|
|
|
|
// found, then we'll continue in order to avoid any
|
|
|
|
// duplicate indexes.
|
2024-05-16 00:28:43 +02:00
|
|
|
if fn.Elem(int32(i), dups[p.RHash]) {
|
2017-07-30 21:46:55 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
idx := int32(i)
|
|
|
|
dups[p.RHash] = append(dups[p.RHash], idx)
|
|
|
|
return idx, nil
|
2017-01-20 12:37:30 +01:00
|
|
|
}
|
2017-07-30 21:46:55 +02:00
|
|
|
}
|
|
|
|
|
2020-03-31 00:50:10 +02:00
|
|
|
return 0, fmt.Errorf("unable to find htlc: script=%x, value=%v, "+
|
|
|
|
"cltv=%v", pkScript, p.Amount, p.Timeout)
|
2017-07-30 21:46:55 +02:00
|
|
|
}
|
|
|
|
|
2024-03-19 10:05:03 +01:00
|
|
|
// populateHtlcIndexes modifies the set of HTLCs locked-into the target view
|
2017-07-30 21:46:55 +02:00
|
|
|
// to have full indexing information populated. This information is required as
|
|
|
|
// we need to keep track of the indexes of each HTLC in order to properly write
|
2024-06-15 01:30:28 +02:00
|
|
|
// the current state to disk, and also to locate the paymentDescriptor
|
2017-07-30 21:46:55 +02:00
|
|
|
// corresponding to HTLC outputs in the commitment transaction.
|
2020-03-31 00:50:10 +02:00
|
|
|
func (c *commitment) populateHtlcIndexes(chanType channeldb.ChannelType,
|
|
|
|
cltvs []uint32) error {
|
|
|
|
|
2017-07-30 21:46:55 +02:00
|
|
|
// First, we'll set up some state to allow us to locate the output
|
2024-03-19 10:05:03 +01:00
|
|
|
// index of the all the HTLCs within the commitment transaction. We
|
2017-07-30 21:46:55 +02:00
|
|
|
// must keep this index so we can validate the HTLC signatures sent to
|
|
|
|
// us.
|
|
|
|
dups := make(map[PaymentHash][]int32)
|
2024-06-15 01:30:28 +02:00
|
|
|
c.outgoingHTLCIndex = make(map[int32]*paymentDescriptor)
|
|
|
|
c.incomingHTLCIndex = make(map[int32]*paymentDescriptor)
|
2017-07-30 21:46:55 +02:00
|
|
|
|
|
|
|
// populateIndex is a helper function that populates the necessary
|
|
|
|
// indexes within the commitment view for a particular HTLC.
|
2024-06-15 01:30:28 +02:00
|
|
|
populateIndex := func(htlc *paymentDescriptor, incoming bool) error {
|
2021-09-28 17:34:10 +02:00
|
|
|
isDust := HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, incoming, c.whoseCommit, c.feePerKw,
|
2020-03-06 16:11:49 +01:00
|
|
|
htlc.Amount.ToSatoshis(), c.dustLimit,
|
|
|
|
)
|
2017-07-30 21:46:55 +02:00
|
|
|
|
|
|
|
var err error
|
|
|
|
switch {
|
|
|
|
|
|
|
|
// If this is our commitment transaction, and this is a dust
|
|
|
|
// output then we mark it as such using a -1 index.
|
2024-07-31 01:44:18 +02:00
|
|
|
case c.whoseCommit.IsLocal() && isDust:
|
2017-07-30 21:46:55 +02:00
|
|
|
htlc.localOutputIndex = -1
|
|
|
|
|
|
|
|
// If this is the commitment transaction of the remote party,
|
|
|
|
// and this is a dust output then we mark it as such using a -1
|
|
|
|
// index.
|
2024-07-31 01:44:18 +02:00
|
|
|
case c.whoseCommit.IsRemote() && isDust:
|
2017-07-30 21:46:55 +02:00
|
|
|
htlc.remoteOutputIndex = -1
|
|
|
|
|
|
|
|
// If this is our commitment transaction, then we'll need to
|
|
|
|
// locate the output and the index so we can verify an HTLC
|
|
|
|
// signatures.
|
2024-07-31 01:44:18 +02:00
|
|
|
case c.whoseCommit.IsLocal():
|
2017-11-10 07:28:35 +01:00
|
|
|
htlc.localOutputIndex, err = locateOutputIndex(
|
2024-07-31 01:44:18 +02:00
|
|
|
htlc, c.txn, c.whoseCommit, dups, cltvs,
|
2017-11-10 07:28:35 +01:00
|
|
|
)
|
2017-07-30 21:46:55 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// As this is our commitment transactions, we need to
|
|
|
|
// keep track of the locations of each output on the
|
|
|
|
// transaction so we can verify any HTLC signatures
|
|
|
|
// sent to us after we construct the HTLC view.
|
|
|
|
if incoming {
|
|
|
|
c.incomingHTLCIndex[htlc.localOutputIndex] = htlc
|
|
|
|
} else {
|
2017-09-25 20:25:58 +02:00
|
|
|
c.outgoingHTLCIndex[htlc.localOutputIndex] = htlc
|
2017-07-30 21:46:55 +02:00
|
|
|
}
|
2017-03-25 00:25:59 +01:00
|
|
|
|
2017-07-30 21:46:55 +02:00
|
|
|
// Otherwise, this is there remote party's commitment
|
|
|
|
// transaction and we only need to populate the remote output
|
|
|
|
// index within the HTLC index.
|
2024-07-31 01:44:18 +02:00
|
|
|
case c.whoseCommit.IsRemote():
|
2017-11-10 07:28:35 +01:00
|
|
|
htlc.remoteOutputIndex, err = locateOutputIndex(
|
2024-07-31 01:44:18 +02:00
|
|
|
htlc, c.txn, c.whoseCommit, dups, cltvs,
|
2017-11-10 07:28:35 +01:00
|
|
|
)
|
2017-07-30 21:46:55 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-11-21 06:54:18 +01:00
|
|
|
}
|
2017-07-30 21:46:55 +02:00
|
|
|
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("invalid commitment configuration")
|
2016-11-21 06:54:18 +01:00
|
|
|
}
|
2017-07-30 21:46:55 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll need to locate the index within the commitment
|
|
|
|
// transaction of all the HTLC outputs. This index will be required
|
|
|
|
// later when we write the commitment state to disk, and also when
|
|
|
|
// generating signatures for each of the HTLC transactions.
|
|
|
|
for i := 0; i < len(c.outgoingHTLCs); i++ {
|
|
|
|
htlc := &c.outgoingHTLCs[i]
|
|
|
|
if err := populateIndex(htlc, false); err != nil {
|
2018-05-04 13:18:31 +02:00
|
|
|
return err
|
2017-01-20 13:20:02 +01:00
|
|
|
}
|
2017-07-30 21:46:55 +02:00
|
|
|
}
|
|
|
|
for i := 0; i < len(c.incomingHTLCs); i++ {
|
|
|
|
htlc := &c.incomingHTLCs[i]
|
|
|
|
if err := populateIndex(htlc, true); err != nil {
|
2018-05-04 13:18:31 +02:00
|
|
|
return err
|
2017-07-30 21:46:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
// toDiskCommit converts the target commitment into a format suitable to be
|
2017-07-30 21:46:55 +02:00
|
|
|
// written to disk after an accepted state transition.
|
2024-07-31 01:44:18 +02:00
|
|
|
func (c *commitment) toDiskCommit(
|
|
|
|
whoseCommit lntypes.ChannelParty) *channeldb.ChannelCommitment {
|
|
|
|
|
2017-08-15 19:09:16 +02:00
|
|
|
numHtlcs := len(c.outgoingHTLCs) + len(c.incomingHTLCs)
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
commit := &channeldb.ChannelCommitment{
|
|
|
|
CommitHeight: c.height,
|
2024-08-09 23:52:21 +02:00
|
|
|
LocalLogIndex: c.messageIndices.Local,
|
2017-11-10 07:51:03 +01:00
|
|
|
LocalHtlcIndex: c.ourHtlcIndex,
|
2024-08-09 23:52:21 +02:00
|
|
|
RemoteLogIndex: c.messageIndices.Remote,
|
2017-11-10 07:51:03 +01:00
|
|
|
RemoteHtlcIndex: c.theirHtlcIndex,
|
|
|
|
LocalBalance: c.ourBalance,
|
|
|
|
RemoteBalance: c.theirBalance,
|
|
|
|
CommitFee: c.fee,
|
2018-02-13 14:43:58 +01:00
|
|
|
FeePerKw: btcutil.Amount(c.feePerKw),
|
2017-11-10 07:51:03 +01:00
|
|
|
CommitTx: c.txn,
|
|
|
|
CommitSig: c.sig,
|
|
|
|
Htlcs: make([]channeldb.HTLC, 0, numHtlcs),
|
2024-04-02 05:07:15 +02:00
|
|
|
CustomBlob: c.customBlob,
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, htlc := range c.outgoingHTLCs {
|
2017-07-30 21:46:55 +02:00
|
|
|
outputIndex := htlc.localOutputIndex
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsRemote() {
|
2017-07-30 21:46:55 +02:00
|
|
|
outputIndex = htlc.remoteOutputIndex
|
2017-01-20 14:39:15 +01:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
h := channeldb.HTLC{
|
|
|
|
RHash: htlc.RHash,
|
|
|
|
Amt: htlc.Amount,
|
|
|
|
RefundTimeout: htlc.Timeout,
|
|
|
|
OutputIndex: outputIndex,
|
|
|
|
HtlcIndex: htlc.HtlcIndex,
|
|
|
|
LogIndex: htlc.LogIndex,
|
|
|
|
Incoming: false,
|
2024-06-15 02:14:25 +02:00
|
|
|
OnionBlob: htlc.OnionBlob,
|
2024-04-02 14:46:14 +02:00
|
|
|
BlindingPoint: htlc.BlindingPoint,
|
2024-04-02 04:56:50 +02:00
|
|
|
CustomRecords: htlc.CustomRecords.Copy(),
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsLocal() && htlc.sig != nil {
|
2017-11-10 07:51:03 +01:00
|
|
|
h.Signature = htlc.sig.Serialize()
|
|
|
|
}
|
|
|
|
|
|
|
|
commit.Htlcs = append(commit.Htlcs, h)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, htlc := range c.incomingHTLCs {
|
|
|
|
outputIndex := htlc.localOutputIndex
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsRemote() {
|
2017-11-10 07:51:03 +01:00
|
|
|
outputIndex = htlc.remoteOutputIndex
|
2017-08-15 19:09:16 +02:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
h := channeldb.HTLC{
|
|
|
|
RHash: htlc.RHash,
|
|
|
|
Amt: htlc.Amount,
|
|
|
|
RefundTimeout: htlc.Timeout,
|
|
|
|
OutputIndex: outputIndex,
|
|
|
|
HtlcIndex: htlc.HtlcIndex,
|
|
|
|
LogIndex: htlc.LogIndex,
|
|
|
|
Incoming: true,
|
2024-06-15 02:14:25 +02:00
|
|
|
OnionBlob: htlc.OnionBlob,
|
2024-04-02 14:46:14 +02:00
|
|
|
BlindingPoint: htlc.BlindingPoint,
|
2024-04-02 04:56:50 +02:00
|
|
|
CustomRecords: htlc.CustomRecords.Copy(),
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsLocal() && htlc.sig != nil {
|
2017-07-30 21:46:55 +02:00
|
|
|
h.Signature = htlc.sig.Serialize()
|
2016-09-07 19:45:27 +02:00
|
|
|
}
|
2017-07-30 21:46:55 +02:00
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
commit.Htlcs = append(commit.Htlcs, h)
|
2016-09-07 19:45:27 +02:00
|
|
|
}
|
2017-03-25 00:25:59 +01:00
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
return commit
|
|
|
|
}
|
|
|
|
|
|
|
|
// diskHtlcToPayDesc converts an HTLC previously written to disk within a
|
|
|
|
// commitment state to the form required to manipulate in memory within the
|
|
|
|
// commitment struct and updateLog. This function is used when we need to
|
2024-03-19 10:05:03 +01:00
|
|
|
// restore commitment state written to disk back into memory once we need to
|
2017-11-10 07:51:03 +01:00
|
|
|
// restart a channel session.
|
2019-10-31 03:43:05 +01:00
|
|
|
func (lc *LightningChannel) diskHtlcToPayDesc(feeRate chainfee.SatPerKWeight,
|
2024-08-23 15:13:19 +02:00
|
|
|
htlc *channeldb.HTLC, commitKeys lntypes.Dual[*CommitmentKeyRing],
|
2024-04-25 19:01:37 +02:00
|
|
|
whoseCommit lntypes.ChannelParty,
|
2024-06-15 01:30:28 +02:00
|
|
|
auxLeaf input.AuxTapLeaf) (paymentDescriptor, error) {
|
2017-11-10 07:51:03 +01:00
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
// The proper pkScripts for this paymentDescriptor must be
|
2017-11-10 07:51:03 +01:00
|
|
|
// generated so we can easily locate them within the commitment
|
|
|
|
// transaction in the future.
|
|
|
|
var (
|
|
|
|
ourP2WSH, theirP2WSH []byte
|
|
|
|
ourWitnessScript, theirWitnessScript []byte
|
2024-06-15 01:30:28 +02:00
|
|
|
pd paymentDescriptor
|
2020-03-06 16:11:45 +01:00
|
|
|
chanType = lc.channelState.ChanType
|
2017-11-10 07:51:03 +01:00
|
|
|
)
|
|
|
|
|
2024-03-19 10:05:03 +01:00
|
|
|
// If the either output is dust from the local or remote node's
|
2017-11-10 07:51:03 +01:00
|
|
|
// perspective, then we don't need to generate the scripts as we only
|
|
|
|
// generate them in order to locate the outputs within the commitment
|
|
|
|
// transaction. As we'll mark dust with a special output index in the
|
|
|
|
// on-disk state snapshot.
|
2021-09-28 17:34:10 +02:00
|
|
|
isDustLocal := HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, htlc.Incoming, lntypes.Local, feeRate,
|
2020-03-06 16:11:49 +01:00
|
|
|
htlc.Amt.ToSatoshis(), lc.channelState.LocalChanCfg.DustLimit,
|
|
|
|
)
|
2024-08-23 15:13:19 +02:00
|
|
|
localCommitKeys := commitKeys.GetForParty(lntypes.Local)
|
2017-11-10 07:51:03 +01:00
|
|
|
if !isDustLocal && localCommitKeys != nil {
|
2023-03-02 06:38:53 +01:00
|
|
|
scriptInfo, err := genHtlcScript(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, htlc.Incoming, lntypes.Local,
|
|
|
|
htlc.RefundTimeout, htlc.RHash, localCommitKeys,
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf,
|
2020-03-06 16:11:45 +01:00
|
|
|
)
|
2017-11-10 07:51:03 +01:00
|
|
|
if err != nil {
|
|
|
|
return pd, err
|
|
|
|
}
|
2023-08-08 06:09:58 +02:00
|
|
|
ourP2WSH = scriptInfo.PkScript()
|
|
|
|
ourWitnessScript = scriptInfo.WitnessScriptToSign()
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
2021-09-28 17:34:10 +02:00
|
|
|
isDustRemote := HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, htlc.Incoming, lntypes.Remote, feeRate,
|
2020-03-06 16:11:49 +01:00
|
|
|
htlc.Amt.ToSatoshis(), lc.channelState.RemoteChanCfg.DustLimit,
|
|
|
|
)
|
2024-08-23 15:13:19 +02:00
|
|
|
remoteCommitKeys := commitKeys.GetForParty(lntypes.Remote)
|
2017-11-10 07:51:03 +01:00
|
|
|
if !isDustRemote && remoteCommitKeys != nil {
|
2023-03-02 06:38:53 +01:00
|
|
|
scriptInfo, err := genHtlcScript(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, htlc.Incoming, lntypes.Remote,
|
|
|
|
htlc.RefundTimeout, htlc.RHash, remoteCommitKeys,
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf,
|
2020-03-06 16:11:45 +01:00
|
|
|
)
|
2017-11-10 07:51:03 +01:00
|
|
|
if err != nil {
|
|
|
|
return pd, err
|
|
|
|
}
|
2023-08-08 06:09:58 +02:00
|
|
|
theirP2WSH = scriptInfo.PkScript()
|
|
|
|
theirWitnessScript = scriptInfo.WitnessScriptToSign()
|
2017-08-15 19:09:16 +02:00
|
|
|
}
|
2017-07-30 21:46:55 +02:00
|
|
|
|
2020-03-31 00:49:16 +02:00
|
|
|
// Reconstruct the proper local/remote output indexes from the HTLC's
|
|
|
|
// persisted output index depending on whose commitment we are
|
|
|
|
// generating.
|
|
|
|
var (
|
|
|
|
localOutputIndex int32
|
|
|
|
remoteOutputIndex int32
|
|
|
|
)
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsLocal() {
|
2020-03-31 00:49:16 +02:00
|
|
|
localOutputIndex = htlc.OutputIndex
|
|
|
|
} else {
|
|
|
|
remoteOutputIndex = htlc.OutputIndex
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
// With the scripts reconstructed (depending on if this is our commit
|
|
|
|
// vs theirs or a pending commit for the remote party), we can now
|
|
|
|
// re-create the original payment descriptor.
|
2024-06-15 01:30:28 +02:00
|
|
|
return paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: lc.ChannelID(),
|
2017-11-10 07:51:03 +01:00
|
|
|
RHash: htlc.RHash,
|
|
|
|
Timeout: htlc.RefundTimeout,
|
|
|
|
Amount: htlc.Amt,
|
|
|
|
EntryType: Add,
|
|
|
|
HtlcIndex: htlc.HtlcIndex,
|
|
|
|
LogIndex: htlc.LogIndex,
|
2024-06-15 02:14:25 +02:00
|
|
|
OnionBlob: htlc.OnionBlob,
|
2020-03-31 00:49:16 +02:00
|
|
|
localOutputIndex: localOutputIndex,
|
|
|
|
remoteOutputIndex: remoteOutputIndex,
|
2017-11-10 07:51:03 +01:00
|
|
|
ourPkScript: ourP2WSH,
|
|
|
|
ourWitnessScript: ourWitnessScript,
|
|
|
|
theirPkScript: theirP2WSH,
|
|
|
|
theirWitnessScript: theirWitnessScript,
|
2024-04-02 14:46:14 +02:00
|
|
|
BlindingPoint: htlc.BlindingPoint,
|
2024-04-02 04:56:50 +02:00
|
|
|
CustomRecords: htlc.CustomRecords.Copy(),
|
2024-04-02 14:46:14 +02:00
|
|
|
}, nil
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// extractPayDescs will convert all HTLC's present within a disk commit state
|
|
|
|
// to a set of incoming and outgoing payment descriptors. Once reconstructed,
|
|
|
|
// these payment descriptors can be re-inserted into the in-memory updateLog
|
|
|
|
// for each side.
|
2024-05-16 00:32:03 +02:00
|
|
|
func (lc *LightningChannel) extractPayDescs(feeRate chainfee.SatPerKWeight,
|
2024-08-23 15:13:19 +02:00
|
|
|
htlcs []channeldb.HTLC, commitKeys lntypes.Dual[*CommitmentKeyRing],
|
2024-04-25 19:01:37 +02:00
|
|
|
whoseCommit lntypes.ChannelParty,
|
2024-06-15 01:30:28 +02:00
|
|
|
auxLeaves fn.Option[CommitAuxLeaves]) ([]paymentDescriptor,
|
|
|
|
[]paymentDescriptor, error) {
|
2017-11-10 07:51:03 +01:00
|
|
|
|
|
|
|
var (
|
2024-06-15 01:30:28 +02:00
|
|
|
incomingHtlcs []paymentDescriptor
|
|
|
|
outgoingHtlcs []paymentDescriptor
|
2017-11-10 07:51:03 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// For each included HTLC within this commitment state, we'll convert
|
2024-06-15 01:30:28 +02:00
|
|
|
// the disk format into our in memory paymentDescriptor format,
|
2017-11-10 07:51:03 +01:00
|
|
|
// partitioning based on if we offered or received the HTLC.
|
|
|
|
for _, htlc := range htlcs {
|
|
|
|
// TODO(roasbeef): set isForwarded to false for all? need to
|
|
|
|
// persist state w.r.t to if forwarded or not, or can
|
|
|
|
// inadvertently trigger replays
|
|
|
|
|
2023-11-23 15:50:45 +01:00
|
|
|
htlc := htlc
|
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
leaves := l.OutgoingHtlcLeaves
|
|
|
|
if htlc.Incoming {
|
|
|
|
leaves = l.IncomingHtlcLeaves
|
|
|
|
}
|
|
|
|
|
|
|
|
return leaves[htlc.HtlcIndex].AuxTapLeaf
|
|
|
|
},
|
|
|
|
)(auxLeaves)
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
payDesc, err := lc.diskHtlcToPayDesc(
|
2024-04-25 19:01:37 +02:00
|
|
|
feeRate, &htlc, commitKeys, whoseCommit, auxLeaf,
|
2017-11-10 07:51:03 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return incomingHtlcs, outgoingHtlcs, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if htlc.Incoming {
|
|
|
|
incomingHtlcs = append(incomingHtlcs, payDesc)
|
|
|
|
} else {
|
|
|
|
outgoingHtlcs = append(outgoingHtlcs, payDesc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return incomingHtlcs, outgoingHtlcs, nil
|
|
|
|
}
|
|
|
|
|
2018-05-26 03:37:45 +02:00
|
|
|
// diskCommitToMemCommit converts the on-disk commitment format to our
|
2017-11-10 07:51:03 +01:00
|
|
|
// in-memory commitment format which is needed in order to properly resume
|
|
|
|
// channel operations after a restart.
|
2024-07-31 01:44:18 +02:00
|
|
|
func (lc *LightningChannel) diskCommitToMemCommit(
|
|
|
|
whoseCommit lntypes.ChannelParty,
|
2018-05-28 22:09:24 +02:00
|
|
|
diskCommit *channeldb.ChannelCommitment, localCommitPoint,
|
|
|
|
remoteCommitPoint *btcec.PublicKey) (*commitment, error) {
|
2017-11-10 07:51:03 +01:00
|
|
|
|
|
|
|
// First, we'll need to re-derive the commitment key ring for each
|
|
|
|
// party used within this particular state. If this is a pending commit
|
|
|
|
// (we extended but weren't able to complete the commitment dance
|
|
|
|
// before shutdown), then the localCommitPoint won't be set as we
|
|
|
|
// haven't yet received a responding commitment from the remote party.
|
2024-08-23 15:13:19 +02:00
|
|
|
var commitKeys lntypes.Dual[*CommitmentKeyRing]
|
2017-11-10 07:51:03 +01:00
|
|
|
if localCommitPoint != nil {
|
2024-08-23 15:13:19 +02:00
|
|
|
commitKeys.SetForParty(lntypes.Local, DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
localCommitPoint, lntypes.Local,
|
|
|
|
lc.channelState.ChanType,
|
2020-01-06 11:42:02 +01:00
|
|
|
&lc.channelState.LocalChanCfg,
|
|
|
|
&lc.channelState.RemoteChanCfg,
|
2024-08-23 15:13:19 +02:00
|
|
|
))
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
if remoteCommitPoint != nil {
|
2024-08-23 15:13:19 +02:00
|
|
|
commitKeys.SetForParty(lntypes.Remote, DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
remoteCommitPoint, lntypes.Remote,
|
|
|
|
lc.channelState.ChanType,
|
2020-01-06 11:42:02 +01:00
|
|
|
&lc.channelState.LocalChanCfg,
|
|
|
|
&lc.channelState.RemoteChanCfg,
|
2024-08-23 15:13:19 +02:00
|
|
|
))
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
auxResult, err := fn.MapOptionZ(
|
|
|
|
lc.leafStore,
|
|
|
|
func(s AuxLeafStore) fn.Result[CommitDiffAuxResult] {
|
|
|
|
return s.FetchLeavesFromCommit(
|
|
|
|
NewAuxChanState(lc.channelState), *diskCommit,
|
|
|
|
*commitKeys.GetForParty(whoseCommit),
|
|
|
|
)
|
|
|
|
},
|
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch aux leaves: %w", err)
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
// With the key rings re-created, we'll now convert all the on-disk
|
2024-06-15 01:30:28 +02:00
|
|
|
// HTLC"s into paymentDescriptor's so we can re-insert them into our
|
2017-11-10 07:51:03 +01:00
|
|
|
// update log.
|
|
|
|
incomingHtlcs, outgoingHtlcs, err := lc.extractPayDescs(
|
2019-10-31 03:43:05 +01:00
|
|
|
chainfee.SatPerKWeight(diskCommit.FeePerKw),
|
2024-04-25 19:01:37 +02:00
|
|
|
diskCommit.Htlcs, commitKeys, whoseCommit, auxResult.AuxLeaves,
|
2017-11-10 07:51:03 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-08-09 23:52:21 +02:00
|
|
|
messageIndices := lntypes.Dual[uint64]{
|
|
|
|
Local: diskCommit.LocalLogIndex,
|
|
|
|
Remote: diskCommit.RemoteLogIndex,
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
// With the necessary items generated, we'll now re-construct the
|
|
|
|
// commitment state as it was originally present in memory.
|
|
|
|
commit := &commitment{
|
2024-08-09 23:52:21 +02:00
|
|
|
height: diskCommit.CommitHeight,
|
|
|
|
whoseCommit: whoseCommit,
|
|
|
|
ourBalance: diskCommit.LocalBalance,
|
|
|
|
theirBalance: diskCommit.RemoteBalance,
|
|
|
|
messageIndices: messageIndices,
|
|
|
|
ourHtlcIndex: diskCommit.LocalHtlcIndex,
|
|
|
|
theirHtlcIndex: diskCommit.RemoteHtlcIndex,
|
|
|
|
txn: diskCommit.CommitTx,
|
|
|
|
sig: diskCommit.CommitSig,
|
|
|
|
fee: diskCommit.CommitFee,
|
|
|
|
feePerKw: chainfee.SatPerKWeight(diskCommit.FeePerKw),
|
|
|
|
incomingHTLCs: incomingHtlcs,
|
|
|
|
outgoingHTLCs: outgoingHtlcs,
|
|
|
|
customBlob: diskCommit.CustomBlob,
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsLocal() {
|
2017-11-10 07:51:03 +01:00
|
|
|
commit.dustLimit = lc.channelState.LocalChanCfg.DustLimit
|
|
|
|
} else {
|
|
|
|
commit.dustLimit = lc.channelState.RemoteChanCfg.DustLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
return commit, nil
|
|
|
|
}
|
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// LightningChannel implements the state machine which corresponds to the
|
|
|
|
// current commitment protocol wire spec. The state machine implemented allows
|
|
|
|
// for asynchronous fully desynchronized, batched+pipelined updates to
|
|
|
|
// commitment transactions allowing for a high degree of non-blocking
|
|
|
|
// bi-directional payment throughput.
|
|
|
|
//
|
|
|
|
// In order to allow updates to be fully non-blocking, either side is able to
|
|
|
|
// create multiple new commitment states up to a pre-determined window size.
|
|
|
|
// This window size is encoded within InitialRevocationWindow. Before the start
|
|
|
|
// of a session, both side should send out revocation messages with nil
|
|
|
|
// preimages in order to populate their revocation window for the remote party.
|
|
|
|
//
|
|
|
|
// The state machine has for main methods:
|
2022-08-22 20:58:42 +02:00
|
|
|
// - .SignNextCommitment()
|
2024-03-19 10:05:03 +01:00
|
|
|
// - Called once when one wishes to sign the next commitment, either
|
|
|
|
// initiating a new state update, or responding to a received commitment.
|
2022-08-22 20:58:42 +02:00
|
|
|
// - .ReceiveNewCommitment()
|
|
|
|
// - Called upon receipt of a new commitment from the remote party. If the
|
|
|
|
// new commitment is valid, then a revocation should immediately be
|
|
|
|
// generated and sent.
|
|
|
|
// - .RevokeCurrentCommitment()
|
|
|
|
// - Revokes the current commitment. Should be called directly after
|
|
|
|
// receiving a new commitment.
|
|
|
|
// - .ReceiveRevocation()
|
|
|
|
// - Processes a revocation from the remote party. If successful creates a
|
2016-07-06 02:01:55 +02:00
|
|
|
// new defacto broadcastable state.
|
|
|
|
//
|
|
|
|
// See the individual comments within the above methods for further details.
|
2015-12-18 22:37:13 +01:00
|
|
|
type LightningChannel struct {
|
2018-04-30 00:40:59 +02:00
|
|
|
// Signer is the main signer instances that will be responsible for
|
2017-07-30 21:25:41 +02:00
|
|
|
// signing any HTLC and commitment transaction generated by the state
|
|
|
|
// machine.
|
2019-01-16 15:47:43 +01:00
|
|
|
Signer input.Signer
|
2017-07-30 21:25:41 +02:00
|
|
|
|
2024-03-17 21:53:38 +01:00
|
|
|
// leafStore is used to retrieve extra tapscript leaves for special
|
|
|
|
// custom channel types.
|
|
|
|
leafStore fn.Option[AuxLeafStore]
|
|
|
|
|
2017-07-30 21:25:41 +02:00
|
|
|
// signDesc is the primary sign descriptor that is capable of signing
|
|
|
|
// the commitment transaction that spends the multi-sig output.
|
2019-01-16 15:47:43 +01:00
|
|
|
signDesc *input.SignDescriptor
|
2016-08-13 00:50:47 +02:00
|
|
|
|
2024-01-31 03:06:49 +01:00
|
|
|
isClosed bool
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
|
2017-07-30 21:50:57 +02:00
|
|
|
// sigPool is a pool of workers that are capable of signing and
|
|
|
|
// validating signatures in parallel. This is utilized as an
|
|
|
|
// optimization to void serially signing or validating the HTLC
|
|
|
|
// signatures, of which there may be hundreds.
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
sigPool *SigPool
|
2017-07-30 21:50:57 +02:00
|
|
|
|
2024-04-09 04:47:26 +02:00
|
|
|
// auxSigner is a special signer used to obtain opaque signatures for
|
|
|
|
// custom channel variants.
|
|
|
|
auxSigner fn.Option[AuxSigner]
|
|
|
|
|
2024-06-04 07:58:57 +02:00
|
|
|
// auxResolver is an optional component that can be used to modify the
|
|
|
|
// way contracts are resolved.
|
|
|
|
auxResolver fn.Option[AuxContractResolver]
|
|
|
|
|
2017-12-18 03:40:05 +01:00
|
|
|
// Capacity is the total capacity of this channel.
|
2016-08-13 00:50:47 +02:00
|
|
|
Capacity btcutil.Amount
|
2016-06-21 06:56:54 +02:00
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// currentHeight is the current height of our local commitment chain.
|
|
|
|
// This is also the same as the number of updates to the channel we've
|
|
|
|
// accepted.
|
|
|
|
currentHeight uint64
|
|
|
|
|
2024-08-09 21:47:58 +02:00
|
|
|
// commitChains is a Dual of the local and remote node's commitment
|
|
|
|
// chains. Any new commitments we initiate are added to Remote chain's
|
|
|
|
// tip. The Local portion of this field is our local commitment chain.
|
|
|
|
// Any new commitments received are added to the tip of this chain.
|
|
|
|
// The tail (or lowest height) in this chain is our current accepted
|
|
|
|
// state, which we are able to broadcast safely.
|
|
|
|
commitChains lntypes.Dual[*commitmentChain]
|
2015-12-17 05:58:01 +01:00
|
|
|
|
2016-01-07 01:17:18 +01:00
|
|
|
channelState *channeldb.OpenChannel
|
2015-12-17 05:58:01 +01:00
|
|
|
|
2020-01-06 11:42:03 +01:00
|
|
|
commitBuilder *CommitmentBuilder
|
|
|
|
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
// [local|remote]Log is a (mostly) append-only log storing all the HTLC
|
2016-07-06 02:01:55 +02:00
|
|
|
// updates to this channel. The log is walked backwards as HTLC updates
|
|
|
|
// are applied in order to re-construct a commitment transaction from a
|
|
|
|
// commitment. The log is compacted once a revocation is received.
|
2024-08-09 22:00:59 +02:00
|
|
|
updateLogs lntypes.Dual[*updateLog]
|
2015-12-31 07:36:01 +01:00
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
// log is a channel-specific logging instance.
|
|
|
|
log btclog.Logger
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
// taprootNonceProducer is used to generate a shachain tree for the
|
|
|
|
// purpose of generating verification nonces for taproot channels.
|
|
|
|
taprootNonceProducer shachain.Producer
|
|
|
|
|
2023-01-20 04:19:10 +01:00
|
|
|
// musigSessions holds the current musig2 pair session for the channel.
|
|
|
|
musigSessions *MusigPairSession
|
|
|
|
|
|
|
|
// pendingVerificationNonce is the initial verification nonce generated
|
|
|
|
// for musig2 channels when the state machine is intiated. Once we know
|
2023-01-20 04:24:48 +01:00
|
|
|
// the verification nonce of the remote party, then we can start to use
|
2023-01-20 04:19:10 +01:00
|
|
|
// the channel as normal.
|
|
|
|
pendingVerificationNonce *musig2.Nonces
|
|
|
|
|
|
|
|
// fundingOutput is the funding output (script+value).
|
|
|
|
fundingOutput wire.TxOut
|
|
|
|
|
2023-10-13 17:00:49 +02:00
|
|
|
// opts is the set of options that channel was initialized with.
|
|
|
|
opts *channelOpts
|
|
|
|
|
2017-07-30 21:25:41 +02:00
|
|
|
sync.RWMutex
|
2015-12-16 21:40:11 +01:00
|
|
|
}
|
|
|
|
|
2023-01-20 04:19:10 +01:00
|
|
|
// ChannelOpt is a functional option that lets callers modify how a new channel
|
|
|
|
// is created.
|
|
|
|
type ChannelOpt func(*channelOpts)
|
|
|
|
|
2023-10-13 17:00:49 +02:00
|
|
|
// channelOpts is the set of options used to create a new channel.
|
|
|
|
type channelOpts struct {
|
|
|
|
localNonce *musig2.Nonces
|
|
|
|
remoteNonce *musig2.Nonces
|
|
|
|
|
2024-06-04 07:58:57 +02:00
|
|
|
leafStore fn.Option[AuxLeafStore]
|
|
|
|
auxSigner fn.Option[AuxSigner]
|
|
|
|
auxResolver fn.Option[AuxContractResolver]
|
2024-03-17 21:53:38 +01:00
|
|
|
|
2023-10-13 17:00:49 +02:00
|
|
|
skipNonceInit bool
|
|
|
|
}
|
|
|
|
|
2023-01-20 04:19:10 +01:00
|
|
|
// WithLocalMusigNonces is used to bind an existing verification/local nonce to
|
|
|
|
// a new channel.
|
|
|
|
func WithLocalMusigNonces(nonce *musig2.Nonces) ChannelOpt {
|
|
|
|
return func(o *channelOpts) {
|
|
|
|
o.localNonce = nonce
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// WithRemoteMusigNonces is used to bind the remote party's local/verification
|
|
|
|
// nonce to a new channel.
|
|
|
|
func WithRemoteMusigNonces(nonces *musig2.Nonces) ChannelOpt {
|
|
|
|
return func(o *channelOpts) {
|
|
|
|
o.remoteNonce = nonces
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-13 17:00:49 +02:00
|
|
|
// WithSkipNonceInit is used to modify the way nonces are handled during
|
|
|
|
// channel initialization for taproot channels. If this option is specified,
|
|
|
|
// then when we receive the chan reest message from the remote party, we won't
|
|
|
|
// modify our nonce state. This is needed if we create a channel, get a channel
|
|
|
|
// ready message, then also get the chan reest message after that.
|
|
|
|
func WithSkipNonceInit() ChannelOpt {
|
|
|
|
return func(o *channelOpts) {
|
|
|
|
o.skipNonceInit = true
|
|
|
|
}
|
2023-01-20 04:19:10 +01:00
|
|
|
}
|
|
|
|
|
2024-03-17 21:53:38 +01:00
|
|
|
// WithLeafStore is used to specify a custom leaf store for the channel.
|
|
|
|
func WithLeafStore(store AuxLeafStore) ChannelOpt {
|
|
|
|
return func(o *channelOpts) {
|
|
|
|
o.leafStore = fn.Some[AuxLeafStore](store)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-09 04:47:26 +02:00
|
|
|
// WithAuxSigner is used to specify a custom aux signer for the channel.
|
|
|
|
func WithAuxSigner(signer AuxSigner) ChannelOpt {
|
|
|
|
return func(o *channelOpts) {
|
|
|
|
o.auxSigner = fn.Some[AuxSigner](signer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-04 07:58:57 +02:00
|
|
|
// WithAuxResolver is used to specify a custom aux contract resolver for the
|
|
|
|
// channel.
|
|
|
|
func WithAuxResolver(resolver AuxContractResolver) ChannelOpt {
|
|
|
|
return func(o *channelOpts) {
|
|
|
|
o.auxResolver = fn.Some[AuxContractResolver](resolver)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-20 04:19:10 +01:00
|
|
|
// defaultChannelOpts returns the set of default options for a new channel.
|
|
|
|
func defaultChannelOpts() *channelOpts {
|
|
|
|
return &channelOpts{}
|
|
|
|
}
|
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// NewLightningChannel creates a new, active payment channel given an
|
2016-09-12 21:33:22 +02:00
|
|
|
// implementation of the chain notifier, channel database, and the current
|
|
|
|
// settled channel state. Throughout state transitions, then channel will
|
|
|
|
// automatically persist pertinent state to the database in an efficient
|
|
|
|
// manner.
|
2019-04-15 14:24:43 +02:00
|
|
|
func NewLightningChannel(signer input.Signer,
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
state *channeldb.OpenChannel,
|
2023-01-20 04:19:10 +01:00
|
|
|
sigPool *SigPool, chanOpts ...ChannelOpt) (*LightningChannel, error) {
|
|
|
|
|
|
|
|
opts := defaultChannelOpts()
|
|
|
|
for _, optFunc := range chanOpts {
|
|
|
|
optFunc(opts)
|
|
|
|
}
|
2015-12-17 05:58:01 +01:00
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
localCommit := state.LocalCommitment
|
|
|
|
remoteCommit := state.RemoteCommitment
|
|
|
|
|
|
|
|
// First, initialize the update logs with their current counter values
|
|
|
|
// from the local and remote commitments.
|
|
|
|
localUpdateLog := newUpdateLog(
|
2018-05-16 13:40:43 +02:00
|
|
|
remoteCommit.LocalLogIndex, remoteCommit.LocalHtlcIndex,
|
2017-11-10 07:51:03 +01:00
|
|
|
)
|
|
|
|
remoteUpdateLog := newUpdateLog(
|
2018-05-16 13:40:43 +02:00
|
|
|
localCommit.RemoteLogIndex, localCommit.RemoteHtlcIndex,
|
2017-11-10 07:51:03 +01:00
|
|
|
)
|
2024-08-09 22:00:59 +02:00
|
|
|
updateLogs := lntypes.Dual[*updateLog]{
|
|
|
|
Local: localUpdateLog,
|
|
|
|
Remote: remoteUpdateLog,
|
|
|
|
}
|
2017-11-10 07:51:03 +01:00
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
logPrefix := fmt.Sprintf("ChannelPoint(%v):", state.FundingOutpoint)
|
|
|
|
|
2023-07-12 03:56:15 +02:00
|
|
|
taprootNonceProducer, err := channeldb.DeriveMusig2Shachain(
|
2023-07-11 03:25:06 +02:00
|
|
|
state.RevocationProducer,
|
|
|
|
)
|
2023-01-20 04:24:48 +01:00
|
|
|
if err != nil {
|
2023-07-23 17:35:20 +02:00
|
|
|
return nil, fmt.Errorf("unable to derive shachain: %w", err)
|
2023-01-20 04:24:48 +01:00
|
|
|
}
|
|
|
|
|
2024-08-09 21:47:58 +02:00
|
|
|
commitChains := lntypes.Dual[*commitmentChain]{
|
|
|
|
Local: newCommitmentChain(),
|
|
|
|
Remote: newCommitmentChain(),
|
|
|
|
}
|
|
|
|
|
2015-12-31 07:36:01 +01:00
|
|
|
lc := &LightningChannel{
|
2024-08-09 21:47:58 +02:00
|
|
|
Signer: signer,
|
|
|
|
leafStore: opts.leafStore,
|
2024-04-09 04:47:26 +02:00
|
|
|
auxSigner: opts.auxSigner,
|
2024-06-04 07:58:57 +02:00
|
|
|
auxResolver: opts.auxResolver,
|
2024-08-09 21:47:58 +02:00
|
|
|
sigPool: sigPool,
|
|
|
|
currentHeight: localCommit.CommitHeight,
|
|
|
|
commitChains: commitChains,
|
|
|
|
channelState: state,
|
2024-03-17 21:53:38 +01:00
|
|
|
commitBuilder: NewCommitmentBuilder(
|
|
|
|
state, opts.leafStore,
|
|
|
|
),
|
2024-08-09 22:00:59 +02:00
|
|
|
updateLogs: updateLogs,
|
2023-01-20 04:24:48 +01:00
|
|
|
Capacity: state.Capacity,
|
|
|
|
taprootNonceProducer: taprootNonceProducer,
|
|
|
|
log: build.NewPrefixLog(logPrefix, walletLog),
|
2023-10-13 17:00:49 +02:00
|
|
|
opts: opts,
|
2023-01-20 04:24:48 +01:00
|
|
|
}
|
|
|
|
|
2023-07-24 21:20:57 +02:00
|
|
|
switch {
|
2023-01-20 04:24:48 +01:00
|
|
|
// At this point, we may already have nonces that were passed in, so
|
2023-01-20 04:19:10 +01:00
|
|
|
// we'll check that now as this lets us skip some steps later.
|
2023-07-24 21:20:57 +02:00
|
|
|
case state.ChanType.IsTaproot() && opts.localNonce != nil:
|
2023-01-20 04:19:10 +01:00
|
|
|
lc.pendingVerificationNonce = opts.localNonce
|
2023-07-24 21:20:57 +02:00
|
|
|
|
|
|
|
// Otherwise, we'll generate the nonces here ourselves. This ensures
|
|
|
|
// we'll be ablve to process the chan syncmessag efrom the remote
|
|
|
|
// party.
|
|
|
|
case state.ChanType.IsTaproot() && opts.localNonce == nil:
|
|
|
|
_, err := lc.GenMusigNonces()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-01-20 04:19:10 +01:00
|
|
|
}
|
|
|
|
if lc.pendingVerificationNonce != nil && opts.remoteNonce != nil {
|
|
|
|
err := lc.InitRemoteMusigNonces(opts.remoteNonce)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
// With the main channel struct reconstructed, we'll now restore the
|
|
|
|
// commitment state in memory and also the update logs themselves.
|
2023-01-20 04:24:48 +01:00
|
|
|
err = lc.restoreCommitState(&localCommit, &remoteCommit)
|
2017-11-10 07:51:03 +01:00
|
|
|
if err != nil {
|
2017-03-25 00:25:59 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-15 19:09:16 +02:00
|
|
|
|
2016-09-12 21:33:22 +02:00
|
|
|
// Create the sign descriptor which we'll be using very frequently to
|
|
|
|
// request a signature for the 2-of-2 multi-sig from the signer in
|
|
|
|
// order to complete channel state transitions.
|
2019-03-11 00:38:45 +01:00
|
|
|
if err := lc.createSignDesc(); err != nil {
|
2015-12-31 07:36:01 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
2017-09-26 06:47:39 +02:00
|
|
|
|
|
|
|
return lc, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createSignDesc derives the SignDescriptor for commitment transactions from
|
|
|
|
// other fields on the LightningChannel.
|
|
|
|
func (lc *LightningChannel) createSignDesc() error {
|
|
|
|
|
2023-01-20 04:19:10 +01:00
|
|
|
var (
|
|
|
|
fundingPkScript, multiSigScript []byte
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
chanState := lc.channelState
|
|
|
|
localKey := chanState.LocalChanCfg.MultiSigKey.PubKey
|
|
|
|
remoteKey := chanState.RemoteChanCfg.MultiSigKey.PubKey
|
|
|
|
|
|
|
|
if chanState.ChanType.IsTaproot() {
|
|
|
|
fundingPkScript, _, err = input.GenTaprootFundingScript(
|
|
|
|
localKey, remoteKey, int64(lc.channelState.Capacity),
|
2024-03-13 15:54:49 +01:00
|
|
|
chanState.TapscriptRoot,
|
2023-01-20 04:19:10 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
multiSigScript, err = input.GenMultiSigScript(
|
2023-07-23 17:35:20 +02:00
|
|
|
localKey.SerializeCompressed(),
|
|
|
|
remoteKey.SerializeCompressed(),
|
2023-01-20 04:19:10 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
fundingPkScript, err = input.WitnessScriptHash(multiSigScript)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-09-26 06:47:39 +02:00
|
|
|
}
|
|
|
|
|
2023-01-20 04:19:10 +01:00
|
|
|
lc.fundingOutput = wire.TxOut{
|
|
|
|
PkScript: fundingPkScript,
|
|
|
|
Value: int64(lc.channelState.Capacity),
|
2017-09-26 06:47:39 +02:00
|
|
|
}
|
2019-01-16 15:47:43 +01:00
|
|
|
lc.signDesc = &input.SignDescriptor{
|
2020-01-06 11:42:02 +01:00
|
|
|
KeyDesc: lc.channelState.LocalChanCfg.MultiSigKey,
|
2017-07-30 21:25:41 +02:00
|
|
|
WitnessScript: multiSigScript,
|
2023-01-20 04:19:10 +01:00
|
|
|
Output: &lc.fundingOutput,
|
|
|
|
HashType: txscript.SigHashAll,
|
|
|
|
InputIndex: 0,
|
2016-08-13 00:50:47 +02:00
|
|
|
}
|
|
|
|
|
2017-09-26 06:47:39 +02:00
|
|
|
return nil
|
|
|
|
}
|
2017-07-30 21:50:57 +02:00
|
|
|
|
2017-11-23 20:49:48 +01:00
|
|
|
// ResetState resets the state of the channel back to the default state. This
|
|
|
|
// ensures that any active goroutines which need to act based on on-chain
|
|
|
|
// events do so properly.
|
|
|
|
func (lc *LightningChannel) ResetState() {
|
|
|
|
lc.Lock()
|
2024-01-31 03:06:49 +01:00
|
|
|
lc.isClosed = false
|
2017-11-23 20:49:48 +01:00
|
|
|
lc.Unlock()
|
2017-07-30 21:50:57 +02:00
|
|
|
}
|
2017-07-30 22:11:33 +02:00
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
// logUpdateToPayDesc converts a LogUpdate into a matching paymentDescriptor
|
2017-11-10 07:51:03 +01:00
|
|
|
// entry that can be re-inserted into the update log. This method is used when
|
|
|
|
// we extended a state to the remote party, but the connection was obstructed
|
|
|
|
// before we could finish the commitment dance. In this case, we need to
|
|
|
|
// re-insert the original entries back into the update log so we can resume as
|
|
|
|
// if nothing happened.
|
|
|
|
func (lc *LightningChannel) logUpdateToPayDesc(logUpdate *channeldb.LogUpdate,
|
|
|
|
remoteUpdateLog *updateLog, commitHeight uint64,
|
2019-10-31 03:43:05 +01:00
|
|
|
feeRate chainfee.SatPerKWeight, remoteCommitKeys *CommitmentKeyRing,
|
2024-04-25 19:01:37 +02:00
|
|
|
remoteDustLimit btcutil.Amount,
|
2024-06-15 01:30:28 +02:00
|
|
|
auxLeaves fn.Option[CommitAuxLeaves]) (*paymentDescriptor, error) {
|
2017-11-10 07:51:03 +01:00
|
|
|
|
|
|
|
// Depending on the type of update message we'll map that to a distinct
|
2024-06-15 01:30:28 +02:00
|
|
|
// paymentDescriptor instance.
|
|
|
|
var pd *paymentDescriptor
|
2017-11-10 07:51:03 +01:00
|
|
|
|
|
|
|
switch wireMsg := logUpdate.UpdateMsg.(type) {
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
// For offered HTLC's, we'll map that to a paymentDescriptor with the
|
2017-11-10 07:51:03 +01:00
|
|
|
// type Add, ensuring we restore the necessary fields. From the PoV of
|
2017-12-18 03:40:05 +01:00
|
|
|
// the commitment chain, this HTLC was included in the remote chain,
|
2017-11-10 07:51:03 +01:00
|
|
|
// but not the local chain.
|
|
|
|
case *lnwire.UpdateAddHTLC:
|
|
|
|
// First, we'll map all the relevant fields in the
|
|
|
|
// UpdateAddHTLC message to their corresponding fields in the
|
2024-06-15 01:30:28 +02:00
|
|
|
// paymentDescriptor struct. We also set addCommitHeightRemote
|
2017-11-10 07:51:03 +01:00
|
|
|
// as we've included this HTLC in our local commitment chain
|
|
|
|
// for the remote party.
|
2024-06-15 01:30:28 +02:00
|
|
|
pd = &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
RHash: wireMsg.PaymentHash,
|
|
|
|
Timeout: wireMsg.Expiry,
|
|
|
|
Amount: wireMsg.Amount,
|
|
|
|
EntryType: Add,
|
|
|
|
HtlcIndex: wireMsg.ID,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
OnionBlob: wireMsg.OnionBlob,
|
|
|
|
BlindingPoint: wireMsg.BlindingPoint,
|
|
|
|
CustomRecords: wireMsg.CustomRecords.Copy(),
|
|
|
|
addCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
2021-09-28 17:34:10 +02:00
|
|
|
isDustRemote := HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
lc.channelState.ChanType, false, lntypes.Remote,
|
|
|
|
feeRate, wireMsg.Amount.ToSatoshis(), remoteDustLimit,
|
2020-03-06 16:11:49 +01:00
|
|
|
)
|
2017-11-10 07:51:03 +01:00
|
|
|
if !isDustRemote {
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
leaves := l.OutgoingHtlcLeaves
|
|
|
|
return leaves[pd.HtlcIndex].AuxTapLeaf
|
|
|
|
},
|
|
|
|
)(auxLeaves)
|
|
|
|
|
2023-03-02 06:38:53 +01:00
|
|
|
scriptInfo, err := genHtlcScript(
|
2024-07-31 01:44:18 +02:00
|
|
|
lc.channelState.ChanType, false, lntypes.Remote,
|
2020-03-06 16:11:45 +01:00
|
|
|
wireMsg.Expiry, wireMsg.PaymentHash,
|
2024-04-25 19:01:37 +02:00
|
|
|
remoteCommitKeys, auxLeaf,
|
2018-05-26 03:37:45 +02:00
|
|
|
)
|
2017-11-10 07:51:03 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-05-26 03:37:45 +02:00
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
pd.theirPkScript = scriptInfo.PkScript()
|
|
|
|
pd.theirWitnessScript = scriptInfo.WitnessScriptToSign()
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
2018-04-18 04:03:27 +02:00
|
|
|
// For HTLC's we're offered we'll fetch the original offered HTLC
|
2017-11-10 07:51:03 +01:00
|
|
|
// from the remote party's update log so we can retrieve the same
|
2024-06-15 01:30:28 +02:00
|
|
|
// paymentDescriptor that SettleHTLC would produce.
|
2018-02-07 04:11:11 +01:00
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
2017-11-10 07:51:03 +01:00
|
|
|
ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
pd = &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
Amount: ogHTLC.Amount,
|
|
|
|
RHash: ogHTLC.RHash,
|
|
|
|
RPreimage: wireMsg.PaymentPreimage,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
ParentIndex: ogHTLC.HtlcIndex,
|
|
|
|
EntryType: Settle,
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we sent a failure for a prior incoming HTLC, then we'll consult
|
|
|
|
// the update log of the remote party so we can retrieve the
|
|
|
|
// information of the original HTLC we're failing. We also set the
|
|
|
|
// removal height for the remote commitment.
|
|
|
|
case *lnwire.UpdateFailHTLC:
|
|
|
|
ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
pd = &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
Amount: ogHTLC.Amount,
|
|
|
|
RHash: ogHTLC.RHash,
|
|
|
|
ParentIndex: ogHTLC.HtlcIndex,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
EntryType: Fail,
|
|
|
|
FailReason: wireMsg.Reason[:],
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// HTLC fails due to malformed onion blobs are treated the exact same
|
|
|
|
// way as regular HTLC fails.
|
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
|
|
|
ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
|
|
|
|
// TODO(roasbeef): err if nil?
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
pd = &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
Amount: ogHTLC.Amount,
|
|
|
|
RHash: ogHTLC.RHash,
|
|
|
|
ParentIndex: ogHTLC.HtlcIndex,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
EntryType: MalformedFail,
|
|
|
|
FailCode: wireMsg.FailureCode,
|
|
|
|
ShaOnionBlob: wireMsg.ShaOnionBlob,
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
2019-01-10 12:23:56 +01:00
|
|
|
|
|
|
|
// For fee updates we'll create a FeeUpdate type to add to the log. We
|
|
|
|
// reuse the amount field to hold the fee rate. Since the amount field
|
|
|
|
// is denominated in msat we won't lose precision when storing the
|
|
|
|
// sat/kw denominated feerate. Note that we set both the add and remove
|
|
|
|
// height to the same value, as we consider the fee update locked in by
|
|
|
|
// adding and removing it at the same height.
|
|
|
|
case *lnwire.UpdateFee:
|
2024-06-15 01:30:28 +02:00
|
|
|
pd = &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
2019-01-10 12:23:56 +01:00
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
Amount: lnwire.NewMSatFromSatoshis(
|
|
|
|
btcutil.Amount(wireMsg.FeePerKw),
|
|
|
|
),
|
2024-07-17 00:35:58 +02:00
|
|
|
EntryType: FeeUpdate,
|
|
|
|
addCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
2019-01-10 12:23:56 +01:00
|
|
|
}
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return pd, nil
|
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
// localLogUpdateToPayDesc converts a LogUpdate into a matching
|
|
|
|
// paymentDescriptor entry that can be re-inserted into the local update log.
|
|
|
|
// This method is used when we sent an update+sig, receive a revocation, but
|
|
|
|
// drop right before the counterparty can sign for the update we just sent. In
|
|
|
|
// this case, we need to re-insert the original entries back into the update
|
|
|
|
// log so we'll be expecting the peer to sign them. The height of the remote
|
|
|
|
// commitment is expected to be provided and we restore all log update entries
|
|
|
|
// with this height, even though the real height may be lower. In the way these
|
|
|
|
// fields are used elsewhere, this doesn't change anything.
|
2020-07-02 08:16:04 +02:00
|
|
|
func (lc *LightningChannel) localLogUpdateToPayDesc(logUpdate *channeldb.LogUpdate,
|
2024-06-15 01:30:28 +02:00
|
|
|
remoteUpdateLog *updateLog, commitHeight uint64) (*paymentDescriptor,
|
2020-07-02 08:16:04 +02:00
|
|
|
error) {
|
|
|
|
|
|
|
|
// Since Add updates aren't saved to disk under this key, the update will
|
|
|
|
// never be an Add.
|
|
|
|
switch wireMsg := logUpdate.UpdateMsg.(type) {
|
|
|
|
// For HTLCs that we settled, we'll fetch the original offered HTLC from
|
2024-06-15 01:30:28 +02:00
|
|
|
// the remote update log so we can retrieve the same paymentDescriptor
|
|
|
|
// that ReceiveHTLCSettle would produce.
|
2020-07-02 08:16:04 +02:00
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
|
|
|
ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
return &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
Amount: ogHTLC.Amount,
|
|
|
|
RHash: ogHTLC.RHash,
|
|
|
|
RPreimage: wireMsg.PaymentPreimage,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
ParentIndex: ogHTLC.HtlcIndex,
|
|
|
|
EntryType: Settle,
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
2020-07-02 08:16:04 +02:00
|
|
|
}, nil
|
|
|
|
|
|
|
|
// If we sent a failure for a prior incoming HTLC, then we'll consult the
|
|
|
|
// remote update log so we can retrieve the information of the original
|
|
|
|
// HTLC we're failing.
|
|
|
|
case *lnwire.UpdateFailHTLC:
|
|
|
|
ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
return &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
Amount: ogHTLC.Amount,
|
|
|
|
RHash: ogHTLC.RHash,
|
|
|
|
ParentIndex: ogHTLC.HtlcIndex,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
EntryType: Fail,
|
|
|
|
FailReason: wireMsg.Reason[:],
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
2020-07-02 08:16:04 +02:00
|
|
|
}, nil
|
|
|
|
|
|
|
|
// HTLC fails due to malformed onion blocks are treated the exact same
|
|
|
|
// way as regular HTLC fails.
|
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
|
|
|
ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
return &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
Amount: ogHTLC.Amount,
|
|
|
|
RHash: ogHTLC.RHash,
|
|
|
|
ParentIndex: ogHTLC.HtlcIndex,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
EntryType: MalformedFail,
|
|
|
|
FailCode: wireMsg.FailureCode,
|
|
|
|
ShaOnionBlob: wireMsg.ShaOnionBlob,
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
2020-07-02 08:16:04 +02:00
|
|
|
}, nil
|
|
|
|
|
|
|
|
case *lnwire.UpdateFee:
|
2024-06-15 01:30:28 +02:00
|
|
|
return &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
2020-07-02 08:16:04 +02:00
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
Amount: lnwire.NewMSatFromSatoshis(
|
|
|
|
btcutil.Amount(wireMsg.FeePerKw),
|
|
|
|
),
|
2024-07-17 00:35:58 +02:00
|
|
|
EntryType: FeeUpdate,
|
|
|
|
addCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Remote: commitHeight,
|
|
|
|
},
|
2020-07-02 08:16:04 +02:00
|
|
|
}, nil
|
|
|
|
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unknown message type: %T", wireMsg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-03 15:53:51 +01:00
|
|
|
// remoteLogUpdateToPayDesc converts a LogUpdate into a matching
|
2024-06-15 01:30:28 +02:00
|
|
|
// paymentDescriptor entry that can be re-inserted into the update log. This
|
2020-01-03 15:53:51 +01:00
|
|
|
// method is used when we revoked a local commitment, but the connection was
|
|
|
|
// obstructed before we could sign a remote commitment that contains these
|
|
|
|
// updates. In this case, we need to re-insert the original entries back into
|
|
|
|
// the update log so we can resume as if nothing happened. The height of the
|
|
|
|
// latest local commitment is also expected to be provided. We are restoring all
|
|
|
|
// log update entries with this height, even though the real commitment height
|
|
|
|
// may be lower. In the way these fields are used elsewhere, this doesn't change
|
|
|
|
// anything.
|
|
|
|
func (lc *LightningChannel) remoteLogUpdateToPayDesc(logUpdate *channeldb.LogUpdate,
|
2024-06-15 01:30:28 +02:00
|
|
|
localUpdateLog *updateLog, commitHeight uint64) (*paymentDescriptor,
|
2020-01-03 15:53:51 +01:00
|
|
|
error) {
|
|
|
|
|
|
|
|
switch wireMsg := logUpdate.UpdateMsg.(type) {
|
|
|
|
case *lnwire.UpdateAddHTLC:
|
2024-06-15 01:30:28 +02:00
|
|
|
pd := &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
RHash: wireMsg.PaymentHash,
|
|
|
|
Timeout: wireMsg.Expiry,
|
|
|
|
Amount: wireMsg.Amount,
|
|
|
|
EntryType: Add,
|
|
|
|
HtlcIndex: wireMsg.ID,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
OnionBlob: wireMsg.OnionBlob,
|
|
|
|
BlindingPoint: wireMsg.BlindingPoint,
|
|
|
|
CustomRecords: wireMsg.CustomRecords.Copy(),
|
|
|
|
addCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Local: commitHeight,
|
|
|
|
},
|
2020-01-03 15:53:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// We don't need to generate an htlc script yet. This will be
|
|
|
|
// done once we sign our remote commitment.
|
|
|
|
|
|
|
|
return pd, nil
|
|
|
|
|
|
|
|
// For HTLCs that the remote party settled, we'll fetch the original
|
|
|
|
// offered HTLC from the local update log so we can retrieve the same
|
2024-06-15 01:30:28 +02:00
|
|
|
// paymentDescriptor that ReceiveHTLCSettle would produce.
|
2020-01-03 15:53:51 +01:00
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
|
|
|
ogHTLC := localUpdateLog.lookupHtlc(wireMsg.ID)
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
return &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
Amount: ogHTLC.Amount,
|
|
|
|
RHash: ogHTLC.RHash,
|
|
|
|
RPreimage: wireMsg.PaymentPreimage,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
ParentIndex: ogHTLC.HtlcIndex,
|
|
|
|
EntryType: Settle,
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Local: commitHeight,
|
|
|
|
},
|
2020-01-03 15:53:51 +01:00
|
|
|
}, nil
|
|
|
|
|
|
|
|
// If we received a failure for a prior outgoing HTLC, then we'll
|
|
|
|
// consult the local update log so we can retrieve the information of
|
|
|
|
// the original HTLC we're failing.
|
|
|
|
case *lnwire.UpdateFailHTLC:
|
|
|
|
ogHTLC := localUpdateLog.lookupHtlc(wireMsg.ID)
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
return &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
Amount: ogHTLC.Amount,
|
|
|
|
RHash: ogHTLC.RHash,
|
|
|
|
ParentIndex: ogHTLC.HtlcIndex,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
EntryType: Fail,
|
|
|
|
FailReason: wireMsg.Reason[:],
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Local: commitHeight,
|
|
|
|
},
|
2020-01-03 15:53:51 +01:00
|
|
|
}, nil
|
|
|
|
|
|
|
|
// HTLC fails due to malformed onion blobs are treated the exact same
|
|
|
|
// way as regular HTLC fails.
|
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
|
|
|
ogHTLC := localUpdateLog.lookupHtlc(wireMsg.ID)
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
return &paymentDescriptor{
|
2024-07-17 00:35:58 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
|
|
|
Amount: ogHTLC.Amount,
|
|
|
|
RHash: ogHTLC.RHash,
|
|
|
|
ParentIndex: ogHTLC.HtlcIndex,
|
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
EntryType: MalformedFail,
|
|
|
|
FailCode: wireMsg.FailureCode,
|
|
|
|
ShaOnionBlob: wireMsg.ShaOnionBlob,
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Local: commitHeight,
|
|
|
|
},
|
2020-01-03 15:53:51 +01:00
|
|
|
}, nil
|
|
|
|
|
|
|
|
// For fee updates we'll create a FeeUpdate type to add to the log. We
|
|
|
|
// reuse the amount field to hold the fee rate. Since the amount field
|
|
|
|
// is denominated in msat we won't lose precision when storing the
|
|
|
|
// sat/kw denominated feerate. Note that we set both the add and remove
|
|
|
|
// height to the same value, as we consider the fee update locked in by
|
|
|
|
// adding and removing it at the same height.
|
|
|
|
case *lnwire.UpdateFee:
|
2024-06-15 01:30:28 +02:00
|
|
|
return &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: wireMsg.ChanID,
|
2020-01-03 15:53:51 +01:00
|
|
|
LogIndex: logUpdate.LogIndex,
|
|
|
|
Amount: lnwire.NewMSatFromSatoshis(
|
|
|
|
btcutil.Amount(wireMsg.FeePerKw),
|
|
|
|
),
|
2024-07-17 00:35:58 +02:00
|
|
|
EntryType: FeeUpdate,
|
|
|
|
addCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Local: commitHeight,
|
|
|
|
},
|
|
|
|
removeCommitHeights: lntypes.Dual[uint64]{
|
|
|
|
Local: commitHeight,
|
|
|
|
},
|
2020-01-03 15:53:51 +01:00
|
|
|
}, nil
|
|
|
|
|
|
|
|
default:
|
|
|
|
return nil, errors.New("unknown message type")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
// restoreCommitState will restore the local commitment chain and updateLog
|
2018-05-26 03:37:45 +02:00
|
|
|
// state to a consistent in-memory representation of the passed disk commitment.
|
2017-11-10 07:51:03 +01:00
|
|
|
// This method is to be used upon reconnection to our channel counter party.
|
|
|
|
// Once the connection has been established, we'll prepare our in memory state
|
|
|
|
// to re-sync states with the remote party, and also verify/extend new proposed
|
|
|
|
// commitment states.
|
|
|
|
func (lc *LightningChannel) restoreCommitState(
|
2018-05-04 13:42:15 +02:00
|
|
|
localCommitState, remoteCommitState *channeldb.ChannelCommitment) error {
|
2017-11-10 07:51:03 +01:00
|
|
|
|
|
|
|
// In order to reconstruct the pkScripts on each of the pending HTLC
|
|
|
|
// outputs (if any) we'll need to regenerate the current revocation for
|
|
|
|
// this current un-revoked state as well as retrieve the current
|
|
|
|
// revocation for the remote party.
|
|
|
|
ourRevPreImage, err := lc.channelState.RevocationProducer.AtIndex(
|
|
|
|
lc.currentHeight,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-01-16 15:47:43 +01:00
|
|
|
localCommitPoint := input.ComputeCommitmentPoint(ourRevPreImage[:])
|
2017-11-10 07:51:03 +01:00
|
|
|
remoteCommitPoint := lc.channelState.RemoteCurrentRevocation
|
|
|
|
|
|
|
|
// With the revocation state reconstructed, we can now convert the disk
|
|
|
|
// commitment into our in-memory commitment format, inserting it into
|
|
|
|
// the local commitment chain.
|
|
|
|
localCommit, err := lc.diskCommitToMemCommit(
|
2024-07-31 01:44:18 +02:00
|
|
|
lntypes.Local, localCommitState, localCommitPoint,
|
2017-11-10 07:51:03 +01:00
|
|
|
remoteCommitPoint,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-08-09 21:47:58 +02:00
|
|
|
lc.commitChains.Local.addCommitment(localCommit)
|
2017-11-10 07:51:03 +01:00
|
|
|
|
2021-12-12 20:56:20 +01:00
|
|
|
lc.log.Tracef("starting local commitment: %v",
|
2024-08-09 21:47:58 +02:00
|
|
|
lnutils.SpewLogClosure(lc.commitChains.Local.tail()))
|
2017-11-10 07:51:03 +01:00
|
|
|
|
|
|
|
// We'll also do the same for the remote commitment chain.
|
|
|
|
remoteCommit, err := lc.diskCommitToMemCommit(
|
2024-07-31 01:44:18 +02:00
|
|
|
lntypes.Remote, remoteCommitState, localCommitPoint,
|
2017-11-10 07:51:03 +01:00
|
|
|
remoteCommitPoint,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-08-09 21:47:58 +02:00
|
|
|
lc.commitChains.Remote.addCommitment(remoteCommit)
|
2017-11-10 07:51:03 +01:00
|
|
|
|
2021-12-12 20:56:20 +01:00
|
|
|
lc.log.Tracef("starting remote commitment: %v",
|
2024-08-09 21:47:58 +02:00
|
|
|
lnutils.SpewLogClosure(lc.commitChains.Remote.tail()))
|
2017-11-10 07:51:03 +01:00
|
|
|
|
|
|
|
var (
|
|
|
|
pendingRemoteCommit *commitment
|
|
|
|
pendingRemoteCommitDiff *channeldb.CommitDiff
|
2018-01-18 22:45:30 +01:00
|
|
|
pendingRemoteKeyChain *CommitmentKeyRing
|
2017-11-10 07:51:03 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// Next, we'll check to see if we have an un-acked commitment state we
|
|
|
|
// extended to the remote party but which was never ACK'd.
|
|
|
|
pendingRemoteCommitDiff, err = lc.channelState.RemoteCommitChainTip()
|
|
|
|
if err != nil && err != channeldb.ErrNoPendingCommit {
|
2018-05-04 13:18:31 +02:00
|
|
|
return err
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if pendingRemoteCommitDiff != nil {
|
|
|
|
// If we have a pending remote commitment, then we'll also
|
|
|
|
// reconstruct the original commitment for that state,
|
|
|
|
// inserting it into the remote party's commitment chain. We
|
|
|
|
// don't pass our commit point as we don't have the
|
|
|
|
// corresponding state for the local commitment chain.
|
|
|
|
pendingCommitPoint := lc.channelState.RemoteNextRevocation
|
|
|
|
pendingRemoteCommit, err = lc.diskCommitToMemCommit(
|
2024-07-31 01:44:18 +02:00
|
|
|
lntypes.Remote, &pendingRemoteCommitDiff.Commitment,
|
2017-11-10 07:51:03 +01:00
|
|
|
nil, pendingCommitPoint,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-08-09 21:47:58 +02:00
|
|
|
lc.commitChains.Remote.addCommitment(pendingRemoteCommit)
|
2017-11-10 07:51:03 +01:00
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Debugf("pending remote commitment: %v",
|
2024-08-09 21:47:58 +02:00
|
|
|
lnutils.SpewLogClosure(lc.commitChains.Remote.tip()))
|
2018-05-04 13:20:11 +02:00
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
// We'll also re-create the set of commitment keys needed to
|
|
|
|
// fully re-derive the state.
|
2019-09-17 04:06:19 +02:00
|
|
|
pendingRemoteKeyChain = DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
pendingCommitPoint, lntypes.Remote,
|
|
|
|
lc.channelState.ChanType,
|
|
|
|
&lc.channelState.LocalChanCfg,
|
|
|
|
&lc.channelState.RemoteChanCfg,
|
2017-11-10 07:51:03 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-01-03 15:53:51 +01:00
|
|
|
// Fetch remote updates that we have acked but not yet signed for.
|
|
|
|
unsignedAckedUpdates, err := lc.channelState.UnsignedAckedUpdates()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-07-02 08:16:04 +02:00
|
|
|
// Fetch the local updates the peer still needs to sign for.
|
|
|
|
remoteUnsignedLocalUpdates, err := lc.channelState.RemoteUnsignedLocalUpdates()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
// Finally, with the commitment states restored, we'll now restore the
|
|
|
|
// state logs based on the current local+remote commit, and any pending
|
|
|
|
// remote commit that exists.
|
2019-03-11 00:38:45 +01:00
|
|
|
err = lc.restoreStateLogs(
|
|
|
|
localCommit, remoteCommit, pendingRemoteCommit,
|
2017-11-10 07:51:03 +01:00
|
|
|
pendingRemoteCommitDiff, pendingRemoteKeyChain,
|
2020-07-02 08:16:04 +02:00
|
|
|
unsignedAckedUpdates, remoteUnsignedLocalUpdates,
|
2017-11-10 07:51:03 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// restoreStateLogs runs through the current locked-in HTLCs from the point of
|
|
|
|
// view of the channel and insert corresponding log entries (both local and
|
|
|
|
// remote) for each HTLC read from disk. This method is required to sync the
|
|
|
|
// in-memory state of the state machine with that read from persistent storage.
|
|
|
|
func (lc *LightningChannel) restoreStateLogs(
|
|
|
|
localCommitment, remoteCommitment, pendingRemoteCommit *commitment,
|
|
|
|
pendingRemoteCommitDiff *channeldb.CommitDiff,
|
2020-01-03 15:53:51 +01:00
|
|
|
pendingRemoteKeys *CommitmentKeyRing,
|
2020-07-02 08:16:04 +02:00
|
|
|
unsignedAckedUpdates,
|
|
|
|
remoteUnsignedLocalUpdates []channeldb.LogUpdate) error {
|
2017-11-10 07:51:03 +01:00
|
|
|
|
2018-05-28 22:09:24 +02:00
|
|
|
// We make a map of incoming HTLCs to the height of the remote
|
|
|
|
// commitment they were first added, and outgoing HTLCs to the height
|
|
|
|
// of the local commit they were first added. This will be used when we
|
|
|
|
// restore the update logs below.
|
|
|
|
incomingRemoteAddHeights := make(map[uint64]uint64)
|
|
|
|
outgoingLocalAddHeights := make(map[uint64]uint64)
|
|
|
|
|
|
|
|
// We start by setting the height of the incoming HTLCs on the pending
|
|
|
|
// remote commitment. We set these heights first since if there are
|
|
|
|
// duplicates, these will be overwritten by the lower height of the
|
|
|
|
// remoteCommitment below.
|
|
|
|
if pendingRemoteCommit != nil {
|
|
|
|
for _, r := range pendingRemoteCommit.incomingHTLCs {
|
|
|
|
incomingRemoteAddHeights[r.HtlcIndex] =
|
|
|
|
pendingRemoteCommit.height
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now set the remote commit height of all incoming HTLCs found on the
|
|
|
|
// remote commitment.
|
|
|
|
for _, r := range remoteCommitment.incomingHTLCs {
|
|
|
|
incomingRemoteAddHeights[r.HtlcIndex] = remoteCommitment.height
|
|
|
|
}
|
|
|
|
|
|
|
|
// And finally we can do the same for the outgoing HTLCs.
|
|
|
|
for _, l := range localCommitment.outgoingHTLCs {
|
|
|
|
outgoingLocalAddHeights[l.HtlcIndex] = localCommitment.height
|
|
|
|
}
|
|
|
|
|
2020-07-09 21:57:50 +02:00
|
|
|
// If we have any unsigned acked updates to sign for, then the add is no
|
|
|
|
// longer on our local commitment, but is still on the remote's commitment.
|
|
|
|
// <---fail---
|
|
|
|
// <---sig----
|
|
|
|
// ----rev--->
|
|
|
|
// To ensure proper channel operation, we restore the add's addCommitHeightLocal
|
|
|
|
// field to the height of our local commitment.
|
|
|
|
for _, logUpdate := range unsignedAckedUpdates {
|
|
|
|
var htlcIdx uint64
|
|
|
|
switch wireMsg := logUpdate.UpdateMsg.(type) {
|
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
|
|
|
htlcIdx = wireMsg.ID
|
|
|
|
case *lnwire.UpdateFailHTLC:
|
|
|
|
htlcIdx = wireMsg.ID
|
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
|
|
|
htlcIdx = wireMsg.ID
|
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// The htlcIdx is stored in the map with the local commitment
|
|
|
|
// height so the related add's addCommitHeightLocal field can be
|
|
|
|
// restored.
|
|
|
|
outgoingLocalAddHeights[htlcIdx] = localCommitment.height
|
|
|
|
}
|
|
|
|
|
2020-07-02 08:16:04 +02:00
|
|
|
// If there are local updates that the peer needs to sign for, then the
|
|
|
|
// corresponding add is no longer on the remote commitment, but is still on
|
|
|
|
// our local commitment.
|
|
|
|
// ----fail--->
|
|
|
|
// ----sig---->
|
|
|
|
// <---rev-----
|
|
|
|
// To ensure proper channel operation, we restore the add's addCommitHeightRemote
|
|
|
|
// field to the height of the remote commitment.
|
|
|
|
for _, logUpdate := range remoteUnsignedLocalUpdates {
|
|
|
|
var htlcIdx uint64
|
|
|
|
switch wireMsg := logUpdate.UpdateMsg.(type) {
|
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
|
|
|
htlcIdx = wireMsg.ID
|
|
|
|
case *lnwire.UpdateFailHTLC:
|
|
|
|
htlcIdx = wireMsg.ID
|
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
|
|
|
htlcIdx = wireMsg.ID
|
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// The htlcIdx is stored in the map with the remote commitment
|
|
|
|
// height so the related add's addCommitHeightRemote field can be
|
|
|
|
// restored.
|
|
|
|
incomingRemoteAddHeights[htlcIdx] = remoteCommitment.height
|
|
|
|
}
|
|
|
|
|
2018-05-16 14:39:22 +02:00
|
|
|
// For each incoming HTLC within the local commitment, we add it to the
|
|
|
|
// remote update log. Since HTLCs are added first to the receiver's
|
|
|
|
// commitment, we don't have to restore outgoing HTLCs, as they will be
|
|
|
|
// restored from the remote commitment below.
|
2017-11-10 07:51:03 +01:00
|
|
|
for i := range localCommitment.incomingHTLCs {
|
|
|
|
htlc := localCommitment.incomingHTLCs[i]
|
2018-05-28 22:09:24 +02:00
|
|
|
|
|
|
|
// We'll need to set the add height of the HTLC. Since it is on
|
|
|
|
// this local commit, we can use its height as local add
|
|
|
|
// height. As remote add height we consult the incoming HTLC
|
|
|
|
// map we created earlier. Note that if this HTLC is not in
|
|
|
|
// incomingRemoteAddHeights, the remote add height will be set
|
|
|
|
// to zero, which indicates that it is not added yet.
|
2024-07-17 00:35:58 +02:00
|
|
|
htlc.addCommitHeights.Local = localCommitment.height
|
|
|
|
htlc.addCommitHeights.Remote =
|
|
|
|
incomingRemoteAddHeights[htlc.HtlcIndex]
|
2018-05-28 22:09:24 +02:00
|
|
|
|
|
|
|
// Restore the htlc back to the remote log.
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.restoreHtlc(&htlc)
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
2018-05-16 14:39:22 +02:00
|
|
|
// Similarly, we'll do the same for the outgoing HTLCs within the
|
|
|
|
// remote commitment, adding them to the local update log.
|
2017-11-10 07:51:03 +01:00
|
|
|
for i := range remoteCommitment.outgoingHTLCs {
|
|
|
|
htlc := remoteCommitment.outgoingHTLCs[i]
|
2018-05-28 22:09:24 +02:00
|
|
|
|
|
|
|
// As for the incoming HTLCs, we'll use the current remote
|
|
|
|
// commit height as remote add height, and consult the map
|
|
|
|
// created above for the local add height.
|
2024-07-17 00:35:58 +02:00
|
|
|
htlc.addCommitHeights.Remote = remoteCommitment.height
|
|
|
|
htlc.addCommitHeights.Local =
|
|
|
|
outgoingLocalAddHeights[htlc.HtlcIndex]
|
2018-05-28 22:09:24 +02:00
|
|
|
|
|
|
|
// Restore the htlc back to the local log.
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.restoreHtlc(&htlc)
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
2020-01-06 13:14:25 +01:00
|
|
|
// If we have a dangling (un-acked) commit for the remote party, then we
|
|
|
|
// restore the updates leading up to this commit.
|
|
|
|
if pendingRemoteCommit != nil {
|
|
|
|
err := lc.restorePendingLocalUpdates(
|
|
|
|
pendingRemoteCommitDiff, pendingRemoteKeys,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
2020-01-03 15:53:51 +01:00
|
|
|
// Restore unsigned acked remote log updates so that we can include them
|
|
|
|
// in our next signature.
|
|
|
|
err := lc.restorePendingRemoteUpdates(
|
|
|
|
unsignedAckedUpdates, localCommitment.height,
|
2020-07-13 21:34:47 +02:00
|
|
|
pendingRemoteCommit,
|
2020-01-03 15:53:51 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-07-02 08:16:04 +02:00
|
|
|
// Restore unsigned acked local log updates so we expect the peer to
|
|
|
|
// sign for them.
|
|
|
|
return lc.restorePeerLocalUpdates(
|
|
|
|
remoteUnsignedLocalUpdates, remoteCommitment.height,
|
|
|
|
)
|
2020-01-03 15:53:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// restorePendingRemoteUpdates restores the acked remote log updates that we
|
|
|
|
// haven't yet signed for.
|
|
|
|
func (lc *LightningChannel) restorePendingRemoteUpdates(
|
|
|
|
unsignedAckedUpdates []channeldb.LogUpdate,
|
2020-07-13 21:34:47 +02:00
|
|
|
localCommitmentHeight uint64,
|
|
|
|
pendingRemoteCommit *commitment) error {
|
2020-01-03 15:53:51 +01:00
|
|
|
|
|
|
|
lc.log.Debugf("Restoring %v dangling remote updates",
|
|
|
|
len(unsignedAckedUpdates))
|
|
|
|
|
|
|
|
for _, logUpdate := range unsignedAckedUpdates {
|
|
|
|
logUpdate := logUpdate
|
|
|
|
|
|
|
|
payDesc, err := lc.remoteLogUpdateToPayDesc(
|
2024-08-09 22:00:59 +02:00
|
|
|
&logUpdate, lc.updateLogs.Local, localCommitmentHeight,
|
2020-01-03 15:53:51 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-07-13 21:34:47 +02:00
|
|
|
logIdx := payDesc.LogIndex
|
|
|
|
|
2020-01-03 15:53:51 +01:00
|
|
|
// Sanity check that we are not restoring a remote log update
|
|
|
|
// that we haven't received a sig for.
|
2024-08-09 22:00:59 +02:00
|
|
|
if logIdx >= lc.updateLogs.Remote.logIndex {
|
2020-01-03 15:53:51 +01:00
|
|
|
return fmt.Errorf("attempted to restore an "+
|
|
|
|
"unsigned remote update: log_index=%v",
|
2020-07-13 21:34:47 +02:00
|
|
|
logIdx)
|
|
|
|
}
|
|
|
|
|
2022-01-13 17:29:43 +01:00
|
|
|
// We previously restored Adds along with all the other updates,
|
2020-07-13 21:34:47 +02:00
|
|
|
// but this Add restoration was a no-op as every single one of
|
|
|
|
// these Adds was already restored since they're all incoming
|
|
|
|
// htlcs on the local commitment.
|
|
|
|
if payDesc.EntryType == Add {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
height uint64
|
|
|
|
heightSet bool
|
|
|
|
)
|
|
|
|
|
|
|
|
// If we have a pending commitment for them, and this update
|
|
|
|
// is included in that commit, then we'll use this commitment
|
|
|
|
// height as this commitment will include these updates for
|
|
|
|
// their new remote commitment.
|
|
|
|
if pendingRemoteCommit != nil {
|
2024-08-09 23:52:21 +02:00
|
|
|
if logIdx < pendingRemoteCommit.messageIndices.Remote {
|
2020-07-13 21:34:47 +02:00
|
|
|
height = pendingRemoteCommit.height
|
|
|
|
heightSet = true
|
|
|
|
}
|
2020-01-03 15:53:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Insert the update into the log. The log update index doesn't
|
|
|
|
// need to be incremented (hence the restore calls), because its
|
|
|
|
// final value was properly persisted with the last local
|
|
|
|
// commitment update.
|
|
|
|
switch payDesc.EntryType {
|
2020-07-13 21:34:47 +02:00
|
|
|
case FeeUpdate:
|
|
|
|
if heightSet {
|
2024-07-17 00:35:58 +02:00
|
|
|
payDesc.addCommitHeights.Remote = height
|
|
|
|
payDesc.removeCommitHeights.Remote = height
|
2020-01-03 15:53:51 +01:00
|
|
|
}
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.restoreUpdate(payDesc)
|
2020-01-03 15:53:51 +01:00
|
|
|
|
|
|
|
default:
|
2020-07-13 21:34:47 +02:00
|
|
|
if heightSet {
|
2024-07-17 00:35:58 +02:00
|
|
|
payDesc.removeCommitHeights.Remote = height
|
2020-07-13 21:34:47 +02:00
|
|
|
}
|
2020-01-03 15:53:51 +01:00
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.restoreUpdate(payDesc)
|
|
|
|
lc.updateLogs.Local.markHtlcModified(
|
|
|
|
payDesc.ParentIndex,
|
|
|
|
)
|
2020-01-03 15:53:51 +01:00
|
|
|
}
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
|
2020-01-06 13:14:25 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-02 08:16:04 +02:00
|
|
|
// restorePeerLocalUpdates restores the acked local log updates the peer still
|
|
|
|
// needs to sign for.
|
|
|
|
func (lc *LightningChannel) restorePeerLocalUpdates(updates []channeldb.LogUpdate,
|
|
|
|
remoteCommitmentHeight uint64) error {
|
|
|
|
|
|
|
|
lc.log.Debugf("Restoring %v local updates that the peer should sign",
|
|
|
|
len(updates))
|
|
|
|
|
|
|
|
for _, logUpdate := range updates {
|
|
|
|
logUpdate := logUpdate
|
|
|
|
|
|
|
|
payDesc, err := lc.localLogUpdateToPayDesc(
|
2024-08-09 22:00:59 +02:00
|
|
|
&logUpdate, lc.updateLogs.Remote,
|
|
|
|
remoteCommitmentHeight,
|
2020-07-02 08:16:04 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.restoreUpdate(payDesc)
|
2020-07-02 08:16:04 +02:00
|
|
|
|
|
|
|
// Since Add updates are not stored and FeeUpdates don't have a
|
|
|
|
// corresponding entry in the remote update log, we only need to
|
|
|
|
// mark the htlc as modified if the update was Settle, Fail, or
|
|
|
|
// MalformedFail.
|
|
|
|
if payDesc.EntryType != FeeUpdate {
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.markHtlcModified(
|
|
|
|
payDesc.ParentIndex,
|
|
|
|
)
|
2020-07-02 08:16:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-06 13:14:25 +01:00
|
|
|
// restorePendingLocalUpdates restores the local log updates leading up to the
|
|
|
|
// given pending remote commitment.
|
|
|
|
func (lc *LightningChannel) restorePendingLocalUpdates(
|
|
|
|
pendingRemoteCommitDiff *channeldb.CommitDiff,
|
|
|
|
pendingRemoteKeys *CommitmentKeyRing) error {
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
pendingCommit := pendingRemoteCommitDiff.Commitment
|
|
|
|
pendingHeight := pendingCommit.CommitHeight
|
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
auxResult, err := fn.MapOptionZ(
|
|
|
|
lc.leafStore,
|
|
|
|
func(s AuxLeafStore) fn.Result[CommitDiffAuxResult] {
|
|
|
|
return s.FetchLeavesFromCommit(
|
|
|
|
NewAuxChanState(lc.channelState), pendingCommit,
|
|
|
|
*pendingRemoteKeys,
|
|
|
|
)
|
|
|
|
},
|
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to fetch aux leaves: %w", err)
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
// If we did have a dangling commit, then we'll examine which updates
|
|
|
|
// we included in that state and re-insert them into our update log.
|
|
|
|
for _, logUpdate := range pendingRemoteCommitDiff.LogUpdates {
|
2021-04-21 23:37:21 +02:00
|
|
|
logUpdate := logUpdate
|
|
|
|
|
2017-11-10 07:51:03 +01:00
|
|
|
payDesc, err := lc.logUpdateToPayDesc(
|
2024-08-09 22:00:59 +02:00
|
|
|
&logUpdate, lc.updateLogs.Remote, pendingHeight,
|
2019-10-31 03:43:05 +01:00
|
|
|
chainfee.SatPerKWeight(pendingCommit.FeePerKw),
|
|
|
|
pendingRemoteKeys,
|
2017-11-10 07:51:03 +01:00
|
|
|
lc.channelState.RemoteChanCfg.DustLimit,
|
2024-04-25 19:01:37 +02:00
|
|
|
auxResult.AuxLeaves,
|
2017-11-10 07:51:03 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-01-29 14:13:31 +01:00
|
|
|
// Earlier versions did not write the log index to disk for fee
|
|
|
|
// updates, so they will be unset. To account for this we set
|
|
|
|
// them to to current update log index.
|
|
|
|
if payDesc.EntryType == FeeUpdate && payDesc.LogIndex == 0 &&
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.logIndex > 0 {
|
2019-01-29 14:13:31 +01:00
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
payDesc.LogIndex = lc.updateLogs.Local.logIndex
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Debugf("Found FeeUpdate on "+
|
2019-01-29 14:13:31 +01:00
|
|
|
"pendingRemoteCommitDiff without logIndex, "+
|
|
|
|
"using %v", payDesc.LogIndex)
|
|
|
|
}
|
|
|
|
|
2019-01-29 14:28:19 +01:00
|
|
|
// At this point the restored update's logIndex must be equal
|
2022-01-13 17:29:43 +01:00
|
|
|
// to the update log, otherwise something is horribly wrong.
|
2024-08-09 22:00:59 +02:00
|
|
|
if payDesc.LogIndex != lc.updateLogs.Local.logIndex {
|
2019-01-29 14:28:19 +01:00
|
|
|
panic(fmt.Sprintf("log index mismatch: "+
|
|
|
|
"%v vs %v", payDesc.LogIndex,
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.logIndex))
|
2019-01-29 14:28:19 +01:00
|
|
|
}
|
|
|
|
|
2019-01-10 12:23:57 +01:00
|
|
|
switch payDesc.EntryType {
|
|
|
|
case Add:
|
2018-05-04 13:42:57 +02:00
|
|
|
// The HtlcIndex of the added HTLC _must_ be equal to
|
|
|
|
// the log's htlcCounter at this point. If it is not we
|
|
|
|
// panic to catch this.
|
|
|
|
// TODO(halseth): remove when cause of htlc entry bug
|
|
|
|
// is found.
|
2024-08-09 22:00:59 +02:00
|
|
|
if payDesc.HtlcIndex !=
|
|
|
|
lc.updateLogs.Local.htlcCounter {
|
|
|
|
|
2018-05-04 13:42:57 +02:00
|
|
|
panic(fmt.Sprintf("htlc index mismatch: "+
|
|
|
|
"%v vs %v", payDesc.HtlcIndex,
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.htlcCounter))
|
2018-05-04 13:42:57 +02:00
|
|
|
}
|
2018-05-26 03:39:16 +02:00
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.appendHtlc(payDesc)
|
2019-01-10 12:23:57 +01:00
|
|
|
|
|
|
|
case FeeUpdate:
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.appendUpdate(payDesc)
|
2019-01-10 12:23:57 +01:00
|
|
|
|
|
|
|
default:
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.appendUpdate(payDesc)
|
2018-05-26 03:39:16 +02:00
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.markHtlcModified(
|
|
|
|
payDesc.ParentIndex,
|
|
|
|
)
|
2017-11-10 07:51:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-30 22:11:33 +02:00
|
|
|
// HtlcRetribution contains all the items necessary to seep a revoked HTLC
|
2017-11-27 21:24:00 +01:00
|
|
|
// transaction from a revoked commitment transaction broadcast by the remote
|
2017-07-30 22:11:33 +02:00
|
|
|
// party.
|
|
|
|
type HtlcRetribution struct {
|
|
|
|
// SignDesc is a design descriptor capable of generating the necessary
|
|
|
|
// signatures to satisfy the revocation clause of the HTLC's public key
|
|
|
|
// script.
|
2019-01-16 15:47:43 +01:00
|
|
|
SignDesc input.SignDescriptor
|
2017-07-30 22:11:33 +02:00
|
|
|
|
|
|
|
// OutPoint is the target outpoint of this HTLC pointing to the
|
|
|
|
// breached commitment transaction.
|
|
|
|
OutPoint wire.OutPoint
|
2017-09-06 22:38:01 +02:00
|
|
|
|
2018-01-21 05:12:49 +01:00
|
|
|
// SecondLevelWitnessScript is the witness script that will be created
|
|
|
|
// if the second level HTLC transaction for this output is
|
|
|
|
// broadcast/confirmed. We provide this as if the remote party attempts
|
2018-04-18 04:03:27 +02:00
|
|
|
// to go to the second level to claim the HTLC then we'll need to
|
2018-01-21 05:12:49 +01:00
|
|
|
// update the SignDesc above accordingly to sweep properly.
|
|
|
|
SecondLevelWitnessScript []byte
|
|
|
|
|
2023-03-02 07:17:30 +01:00
|
|
|
// SecondLevelTapTweak is the tap tweak value needed to spend the
|
|
|
|
// second level output in case the breaching party attempts to publish
|
|
|
|
// it.
|
|
|
|
SecondLevelTapTweak [32]byte
|
|
|
|
|
2017-09-06 22:38:01 +02:00
|
|
|
// IsIncoming is a boolean flag that indicates whether or not this
|
|
|
|
// HTLC was accepted from the counterparty. A false value indicates that
|
|
|
|
// this HTLC was offered by us. This flag is used determine the exact
|
|
|
|
// witness type should be used to sweep the output.
|
|
|
|
IsIncoming bool
|
2024-06-04 07:58:57 +02:00
|
|
|
|
|
|
|
// ResolutionBlob is a blob used for aux channels that permits a
|
|
|
|
// spender of this output to claim all funds.
|
|
|
|
ResolutionBlob fn.Option[tlv.Blob]
|
2017-07-30 22:11:33 +02:00
|
|
|
}
|
|
|
|
|
2016-11-21 07:54:35 +01:00
|
|
|
// BreachRetribution contains all the data necessary to bring a channel
|
2017-01-13 06:01:50 +01:00
|
|
|
// counterparty to justice claiming ALL lingering funds within the channel in
|
2016-11-21 07:54:35 +01:00
|
|
|
// the scenario that they broadcast a revoked commitment transaction. A
|
|
|
|
// BreachRetribution is created by the closeObserver if it detects an
|
|
|
|
// uncooperative close of the channel which uses a revoked commitment
|
|
|
|
// transaction. The BreachRetribution is then sent over the ContractBreach
|
|
|
|
// channel in order to allow the subscriber of the channel to dispatch justice.
|
|
|
|
type BreachRetribution struct {
|
2022-04-10 19:58:55 +02:00
|
|
|
// BreachTxHash is the transaction hash which breached the channel
|
2016-11-21 07:54:35 +01:00
|
|
|
// contract by spending from the funding multi-sig with a revoked
|
|
|
|
// commitment transaction.
|
2022-04-10 19:58:55 +02:00
|
|
|
BreachTxHash chainhash.Hash
|
2016-11-21 07:54:35 +01:00
|
|
|
|
2017-11-21 08:57:33 +01:00
|
|
|
// BreachHeight records the block height confirming the breach
|
|
|
|
// transaction, used as a height hint when registering for
|
|
|
|
// confirmations.
|
|
|
|
BreachHeight uint32
|
|
|
|
|
2017-11-10 08:06:10 +01:00
|
|
|
// ChainHash is the chain that the contract beach was identified
|
|
|
|
// within. This is also the resident chain of the contract (the chain
|
|
|
|
// the contract was created on).
|
|
|
|
ChainHash chainhash.Hash
|
|
|
|
|
2016-11-21 07:54:35 +01:00
|
|
|
// RevokedStateNum is the revoked state number which was broadcast.
|
|
|
|
RevokedStateNum uint64
|
|
|
|
|
|
|
|
// LocalOutputSignDesc is a SignDescriptor which is capable of
|
|
|
|
// generating the signature necessary to sweep the output within the
|
2022-04-10 19:58:55 +02:00
|
|
|
// breach transaction that pays directly us.
|
2017-11-10 08:06:10 +01:00
|
|
|
//
|
2017-09-19 21:45:51 +02:00
|
|
|
// NOTE: A nil value indicates that the local output is considered dust
|
|
|
|
// according to the remote party's dust limit.
|
2019-01-16 15:47:43 +01:00
|
|
|
LocalOutputSignDesc *input.SignDescriptor
|
2016-11-21 07:54:35 +01:00
|
|
|
|
|
|
|
// LocalOutpoint is the outpoint of the output paying to us (the local
|
|
|
|
// party) within the breach transaction.
|
|
|
|
LocalOutpoint wire.OutPoint
|
|
|
|
|
2020-03-06 16:11:47 +01:00
|
|
|
// LocalDelay is the CSV delay for the to_remote script on the breached
|
|
|
|
// commitment.
|
|
|
|
LocalDelay uint32
|
|
|
|
|
2016-11-21 07:54:35 +01:00
|
|
|
// RemoteOutputSignDesc is a SignDescriptor which is capable of
|
|
|
|
// generating the signature required to claim the funds as described
|
|
|
|
// within the revocation clause of the remote party's commitment
|
|
|
|
// output.
|
2017-11-10 08:06:10 +01:00
|
|
|
//
|
2017-09-19 21:45:51 +02:00
|
|
|
// NOTE: A nil value indicates that the local output is considered dust
|
|
|
|
// according to the remote party's dust limit.
|
2019-01-16 15:47:43 +01:00
|
|
|
RemoteOutputSignDesc *input.SignDescriptor
|
2016-11-21 07:54:35 +01:00
|
|
|
|
2017-09-25 20:25:58 +02:00
|
|
|
// RemoteOutpoint is the outpoint of the output paying to the remote
|
2016-11-21 07:54:35 +01:00
|
|
|
// party within the breach transaction.
|
|
|
|
RemoteOutpoint wire.OutPoint
|
2017-07-30 22:11:33 +02:00
|
|
|
|
2020-03-06 16:11:47 +01:00
|
|
|
// RemoteDelay specifies the CSV delay applied to to-local scripts on
|
|
|
|
// the breaching commitment transaction.
|
|
|
|
RemoteDelay uint32
|
|
|
|
|
2017-07-30 22:11:33 +02:00
|
|
|
// HtlcRetributions is a slice of HTLC retributions for each output
|
|
|
|
// active HTLC output within the breached commitment transaction.
|
|
|
|
HtlcRetributions []HtlcRetribution
|
2018-12-20 06:50:44 +01:00
|
|
|
|
|
|
|
// KeyRing contains the derived public keys used to construct the
|
|
|
|
// breaching commitment transaction. This allows downstream clients to
|
|
|
|
// have access to the public keys used in the scripts.
|
|
|
|
KeyRing *CommitmentKeyRing
|
2024-06-04 07:58:57 +02:00
|
|
|
|
|
|
|
// LocalResolutionBlob is a blob used for aux channels that permits an
|
|
|
|
// honest party to sweep the local commitment output.
|
|
|
|
LocalResolutionBlob fn.Option[tlv.Blob]
|
|
|
|
|
|
|
|
// RemoteResolutionBlob is a blob used for aux channels that permits an
|
|
|
|
// honest party to sweep the remote commitment output.
|
|
|
|
RemoteResolutionBlob fn.Option[tlv.Blob]
|
2016-11-21 07:54:35 +01:00
|
|
|
}
|
|
|
|
|
2018-01-18 22:48:30 +01:00
|
|
|
// NewBreachRetribution creates a new fully populated BreachRetribution for the
|
2023-02-02 09:13:44 +01:00
|
|
|
// passed channel, at a particular revoked state number. If the spend
|
|
|
|
// transaction that the breach retribution should target is known, then it can
|
|
|
|
// be provided via the spendTx parameter. Otherwise, if the spendTx parameter is
|
|
|
|
// nil, then the revocation log will be checked to see if it contains the info
|
|
|
|
// required to construct the BreachRetribution. If the revocation log is missing
|
|
|
|
// the required fields then ErrRevLogDataMissing will be returned.
|
2018-01-18 22:48:30 +01:00
|
|
|
func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64,
|
2024-04-25 19:00:42 +02:00
|
|
|
breachHeight uint32, spendTx *wire.MsgTx,
|
2024-06-04 07:58:57 +02:00
|
|
|
leafStore fn.Option[AuxLeafStore],
|
|
|
|
auxResolver fn.Option[AuxContractResolver]) (*BreachRetribution,
|
|
|
|
error) {
|
2016-11-21 07:54:35 +01:00
|
|
|
|
|
|
|
// Query the on-disk revocation log for the snapshot which was recorded
|
2022-04-13 16:33:07 +02:00
|
|
|
// at this particular state num. Based on whether a legacy revocation
|
|
|
|
// log is returned or not, we will process them differently.
|
|
|
|
revokedLog, revokedLogLegacy, err := chanState.FindPreviousState(
|
|
|
|
stateNum,
|
|
|
|
)
|
2016-11-21 07:54:35 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-04-13 16:33:07 +02:00
|
|
|
// Sanity check that at least one of the logs is returned.
|
|
|
|
if revokedLog == nil && revokedLogLegacy == nil {
|
|
|
|
return nil, ErrNoRevocationLogFound
|
|
|
|
}
|
2018-12-20 06:50:46 +01:00
|
|
|
|
2016-12-14 15:01:48 +01:00
|
|
|
// With the state number broadcast known, we can now derive/restore the
|
|
|
|
// proper revocation preimage necessary to sweep the remote party's
|
2016-11-21 07:54:35 +01:00
|
|
|
// output.
|
2016-12-14 15:01:48 +01:00
|
|
|
revocationPreimage, err := chanState.RevocationStore.LookUp(stateNum)
|
2016-11-21 07:54:35 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-04-05 02:41:05 +02:00
|
|
|
commitmentSecret, commitmentPoint := btcec.PrivKeyFromBytes(
|
2022-02-23 14:48:00 +01:00
|
|
|
revocationPreimage[:],
|
2018-04-05 02:41:05 +02:00
|
|
|
)
|
2017-07-30 22:11:33 +02:00
|
|
|
|
|
|
|
// With the commitment point generated, we can now generate the four
|
|
|
|
// keys we'll need to reconstruct the commitment state,
|
2019-09-17 04:06:19 +02:00
|
|
|
keyRing := DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
commitmentPoint, lntypes.Remote, chanState.ChanType,
|
2019-08-01 05:10:45 +02:00
|
|
|
&chanState.LocalChanCfg, &chanState.RemoteChanCfg,
|
|
|
|
)
|
2016-11-21 07:54:35 +01:00
|
|
|
|
|
|
|
// Next, reconstruct the scripts as they were present at this state
|
|
|
|
// number so we can have the proper witness script to sign and include
|
|
|
|
// within the final witness.
|
2021-07-15 02:16:13 +02:00
|
|
|
var leaseExpiry uint32
|
|
|
|
if chanState.ChanType.HasLeaseExpiration() {
|
|
|
|
leaseExpiry = chanState.ThawHeight
|
|
|
|
}
|
2020-01-06 11:42:04 +01:00
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
auxResult, err := fn.MapOptionZ(
|
|
|
|
leafStore, func(s AuxLeafStore) fn.Result[CommitDiffAuxResult] {
|
|
|
|
return s.FetchLeavesFromRevocation(revokedLog)
|
|
|
|
},
|
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch aux leaves: %w", err)
|
|
|
|
}
|
2024-03-17 21:53:38 +01:00
|
|
|
|
2022-04-13 16:33:07 +02:00
|
|
|
// Since it is the remote breach we are reconstructing, the output
|
|
|
|
// going to us will be a to-remote script with our local params.
|
2024-04-25 19:01:37 +02:00
|
|
|
remoteAuxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
return l.RemoteAuxLeaf
|
|
|
|
},
|
|
|
|
)(auxResult.AuxLeaves)
|
2022-04-13 16:33:07 +02:00
|
|
|
isRemoteInitiator := !chanState.IsInitiator
|
2020-03-06 16:11:47 +01:00
|
|
|
ourScript, ourDelay, err := CommitScriptToRemote(
|
2021-07-15 02:16:13 +02:00
|
|
|
chanState.ChanType, isRemoteInitiator, keyRing.ToRemoteKey,
|
2024-04-25 19:01:37 +02:00
|
|
|
leaseExpiry, remoteAuxLeaf,
|
2020-01-06 11:42:04 +01:00
|
|
|
)
|
2017-07-30 22:11:33 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-11-21 07:54:35 +01:00
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
localAuxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
return l.LocalAuxLeaf
|
|
|
|
},
|
|
|
|
)(auxResult.AuxLeaves)
|
2022-04-13 16:33:07 +02:00
|
|
|
theirDelay := uint32(chanState.RemoteChanCfg.CsvDelay)
|
|
|
|
theirScript, err := CommitScriptToSelf(
|
|
|
|
chanState.ChanType, isRemoteInitiator, keyRing.ToLocalKey,
|
2024-04-25 19:01:37 +02:00
|
|
|
keyRing.RevocationKey, theirDelay, leaseExpiry, localAuxLeaf,
|
2022-04-13 16:33:07 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-11-21 07:54:35 +01:00
|
|
|
}
|
2022-04-13 16:33:07 +02:00
|
|
|
|
|
|
|
// Define an empty breach retribution that will be overwritten based on
|
|
|
|
// different version of the revocation log found.
|
|
|
|
var br *BreachRetribution
|
|
|
|
|
|
|
|
// Define our and their amounts, that will be overwritten below.
|
|
|
|
var ourAmt, theirAmt int64
|
|
|
|
|
|
|
|
// If the returned *RevocationLog is non-nil, use it to derive the info
|
|
|
|
// we need.
|
|
|
|
if revokedLog != nil {
|
|
|
|
br, ourAmt, theirAmt, err = createBreachRetribution(
|
|
|
|
revokedLog, spendTx, chanState, keyRing,
|
2024-04-25 19:01:37 +02:00
|
|
|
commitmentSecret, leaseExpiry, auxResult.AuxLeaves,
|
2022-04-13 16:33:07 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// The returned revocation log is in legacy format, which is a
|
|
|
|
// *ChannelCommitment.
|
|
|
|
//
|
|
|
|
// NOTE: this branch is kept for compatibility such that for
|
|
|
|
// old nodes which refuse to migrate the legacy revocation log
|
|
|
|
// data can still function. This branch can be deleted once we
|
|
|
|
// are confident that no legacy format is in use.
|
|
|
|
br, ourAmt, theirAmt, err = createBreachRetributionLegacy(
|
|
|
|
revokedLogLegacy, chanState, keyRing, commitmentSecret,
|
|
|
|
ourScript, theirScript, leaseExpiry,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-09-12 21:33:22 +02:00
|
|
|
}
|
2016-11-21 07:54:35 +01:00
|
|
|
}
|
|
|
|
|
2017-09-19 21:45:51 +02:00
|
|
|
// Conditionally instantiate a sign descriptor for each of the
|
|
|
|
// commitment outputs. If either is considered dust using the remote
|
|
|
|
// party's dust limit, the respective sign descriptor will be nil.
|
2022-04-13 16:33:07 +02:00
|
|
|
//
|
2020-01-06 11:42:04 +01:00
|
|
|
// If our balance exceeds the remote party's dust limit, instantiate
|
|
|
|
// the sign descriptor for our output.
|
2022-04-13 16:33:07 +02:00
|
|
|
if ourAmt >= int64(chanState.RemoteChanCfg.DustLimit) {
|
2023-08-09 07:22:12 +02:00
|
|
|
// As we're about to sweep our own output w/o a delay, we'll
|
|
|
|
// obtain the witness script for the success/delay path.
|
2023-08-08 06:09:58 +02:00
|
|
|
witnessScript, err := ourScript.WitnessScriptForPath(
|
|
|
|
input.ScriptPathDelay,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-04-13 16:33:07 +02:00
|
|
|
br.LocalOutputSignDesc = &input.SignDescriptor{
|
2018-01-18 22:45:30 +01:00
|
|
|
SingleTweak: keyRing.LocalCommitKeyTweak,
|
2018-02-18 00:17:40 +01:00
|
|
|
KeyDesc: chanState.LocalChanCfg.PaymentBasePoint,
|
2023-08-08 06:09:58 +02:00
|
|
|
WitnessScript: witnessScript,
|
2017-09-19 21:45:51 +02:00
|
|
|
Output: &wire.TxOut{
|
2023-08-08 06:09:58 +02:00
|
|
|
PkScript: ourScript.PkScript(),
|
2022-04-13 16:33:07 +02:00
|
|
|
Value: ourAmt,
|
2017-09-19 21:45:51 +02:00
|
|
|
},
|
2023-08-19 00:17:51 +02:00
|
|
|
HashType: sweepSigHash(chanState.ChanType),
|
2017-09-19 21:45:51 +02:00
|
|
|
}
|
2023-03-02 06:45:27 +01:00
|
|
|
|
|
|
|
// For taproot channels, we'll make sure to set the script path
|
|
|
|
// spend (as our output on their revoked tx still needs the
|
|
|
|
// delay), and set the control block.
|
2023-08-08 06:09:58 +02:00
|
|
|
if scriptTree, ok := ourScript.(input.TapscriptDescriptor); ok {
|
2023-08-09 07:22:12 +02:00
|
|
|
//nolint:lll
|
2023-03-02 06:45:27 +01:00
|
|
|
br.LocalOutputSignDesc.SignMethod = input.TaprootScriptSpendSignMethod
|
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
ctrlBlock, err := scriptTree.CtrlBlockForPath(
|
|
|
|
input.ScriptPathDelay,
|
2023-03-02 06:45:27 +01:00
|
|
|
)
|
2023-08-08 06:09:58 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-08-09 07:22:12 +02:00
|
|
|
//nolint:lll
|
2023-03-02 06:45:27 +01:00
|
|
|
br.LocalOutputSignDesc.ControlBlock, err = ctrlBlock.ToBytes()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2024-06-04 07:58:57 +02:00
|
|
|
|
|
|
|
// At this point, we'll check to see if we need any extra
|
|
|
|
// resolution data for this output.
|
|
|
|
resolveReq := ResolutionReq{
|
|
|
|
ChanPoint: chanState.FundingOutpoint,
|
|
|
|
ShortChanID: chanState.ShortChanID(),
|
|
|
|
Initiator: chanState.IsInitiator,
|
|
|
|
FundingBlob: chanState.CustomBlob,
|
|
|
|
Type: input.TaprootRemoteCommitSpend,
|
|
|
|
CloseType: Breach,
|
|
|
|
CommitTx: spendTx,
|
|
|
|
SignDesc: *br.LocalOutputSignDesc,
|
|
|
|
KeyRing: keyRing,
|
|
|
|
CsvDelay: ourDelay,
|
|
|
|
BreachCsvDelay: fn.Some(theirDelay),
|
|
|
|
CommitFee: chanState.RemoteCommitment.CommitFee,
|
|
|
|
}
|
|
|
|
if revokedLog != nil {
|
|
|
|
resolveReq.CommitBlob = revokedLog.CustomBlob.ValOpt()
|
|
|
|
}
|
|
|
|
|
|
|
|
resolveBlob := fn.MapOptionZ(
|
|
|
|
auxResolver,
|
|
|
|
func(a AuxContractResolver) fn.Result[tlv.Blob] {
|
|
|
|
return a.ResolveContract(resolveReq)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err := resolveBlob.Err(); err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to aux resolve: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
br.LocalResolutionBlob = resolveBlob.Option()
|
2017-09-19 21:45:51 +02:00
|
|
|
}
|
|
|
|
|
2020-01-06 11:42:04 +01:00
|
|
|
// Similarly, if their balance exceeds the remote party's dust limit,
|
|
|
|
// assemble the sign descriptor for their output, which we can sweep.
|
2022-04-13 16:33:07 +02:00
|
|
|
if theirAmt >= int64(chanState.RemoteChanCfg.DustLimit) {
|
2023-08-08 06:09:58 +02:00
|
|
|
// As we're trying to defend the channel against a breach
|
|
|
|
// attempt from the remote party, we want to obain the
|
|
|
|
// revocation witness script here.
|
|
|
|
witnessScript, err := theirScript.WitnessScriptForPath(
|
|
|
|
input.ScriptPathRevocation,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-04-13 16:33:07 +02:00
|
|
|
br.RemoteOutputSignDesc = &input.SignDescriptor{
|
|
|
|
KeyDesc: chanState.LocalChanCfg.
|
|
|
|
RevocationBasePoint,
|
2017-09-19 21:45:51 +02:00
|
|
|
DoubleTweak: commitmentSecret,
|
2023-08-08 06:09:58 +02:00
|
|
|
WitnessScript: witnessScript,
|
2017-09-19 21:45:51 +02:00
|
|
|
Output: &wire.TxOut{
|
2023-08-08 06:09:58 +02:00
|
|
|
PkScript: theirScript.PkScript(),
|
2022-04-13 16:33:07 +02:00
|
|
|
Value: theirAmt,
|
2017-09-19 21:45:51 +02:00
|
|
|
},
|
2023-08-19 00:17:51 +02:00
|
|
|
HashType: sweepSigHash(chanState.ChanType),
|
2017-09-19 21:45:51 +02:00
|
|
|
}
|
2023-03-02 06:45:27 +01:00
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
// For taproot channels, the remote output (the revoked output)
|
|
|
|
// is spent with a script path to ensure all information 3rd
|
|
|
|
// parties need to sweep anchors is revealed on chain.
|
2023-08-09 07:22:12 +02:00
|
|
|
scriptTree, ok := theirScript.(input.TapscriptDescriptor)
|
|
|
|
if ok {
|
2023-08-08 06:09:58 +02:00
|
|
|
//nolint:lll
|
|
|
|
br.RemoteOutputSignDesc.SignMethod = input.TaprootScriptSpendSignMethod
|
2023-03-02 06:45:27 +01:00
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
ctrlBlock, err := scriptTree.CtrlBlockForPath(
|
|
|
|
input.ScriptPathRevocation,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-08-09 07:22:12 +02:00
|
|
|
//nolint:lll
|
2023-08-08 06:09:58 +02:00
|
|
|
br.RemoteOutputSignDesc.ControlBlock, err = ctrlBlock.ToBytes()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-03-02 06:45:27 +01:00
|
|
|
}
|
2024-06-04 07:58:57 +02:00
|
|
|
|
|
|
|
// At this point, we'll check to see if we need any extra
|
|
|
|
// resolution data for this output.
|
|
|
|
resolveReq := ResolutionReq{
|
|
|
|
ChanPoint: chanState.FundingOutpoint,
|
|
|
|
ShortChanID: chanState.ShortChanID(),
|
|
|
|
Initiator: chanState.IsInitiator,
|
|
|
|
FundingBlob: chanState.CustomBlob,
|
|
|
|
Type: input.TaprootCommitmentRevoke,
|
|
|
|
CloseType: Breach,
|
|
|
|
CommitTx: spendTx,
|
|
|
|
SignDesc: *br.RemoteOutputSignDesc,
|
|
|
|
KeyRing: keyRing,
|
|
|
|
CsvDelay: theirDelay,
|
|
|
|
BreachCsvDelay: fn.Some(theirDelay),
|
|
|
|
CommitFee: chanState.RemoteCommitment.CommitFee,
|
|
|
|
}
|
|
|
|
if revokedLog != nil {
|
|
|
|
resolveReq.CommitBlob = revokedLog.CustomBlob.ValOpt()
|
|
|
|
}
|
|
|
|
resolveBlob := fn.MapOptionZ(
|
|
|
|
auxResolver,
|
|
|
|
func(a AuxContractResolver) fn.Result[tlv.Blob] {
|
|
|
|
return a.ResolveContract(resolveReq)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err := resolveBlob.Err(); err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to aux resolve: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
br.RemoteResolutionBlob = resolveBlob.Option()
|
2017-09-19 21:45:51 +02:00
|
|
|
}
|
|
|
|
|
2022-04-13 16:33:07 +02:00
|
|
|
// Finally, with all the necessary data constructed, we can pad the
|
|
|
|
// BreachRetribution struct which houses all the data necessary to
|
|
|
|
// swiftly bring justice to the cheating remote party.
|
|
|
|
br.BreachHeight = breachHeight
|
|
|
|
br.RevokedStateNum = stateNum
|
|
|
|
br.LocalDelay = ourDelay
|
|
|
|
br.RemoteDelay = theirDelay
|
|
|
|
|
|
|
|
return br, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createHtlcRetribution is a helper function to construct an HtlcRetribution
|
|
|
|
// based on the passed params.
|
|
|
|
func createHtlcRetribution(chanState *channeldb.OpenChannel,
|
|
|
|
keyRing *CommitmentKeyRing, commitHash chainhash.Hash,
|
|
|
|
commitmentSecret *btcec.PrivateKey, leaseExpiry uint32,
|
2024-04-25 19:01:37 +02:00
|
|
|
htlc *channeldb.HTLCEntry,
|
|
|
|
auxLeaves fn.Option[CommitAuxLeaves]) (HtlcRetribution, error) {
|
2022-04-13 16:33:07 +02:00
|
|
|
|
|
|
|
var emptyRetribution HtlcRetribution
|
|
|
|
|
|
|
|
theirDelay := uint32(chanState.RemoteChanCfg.CsvDelay)
|
|
|
|
isRemoteInitiator := !chanState.IsInitiator
|
|
|
|
|
|
|
|
// We'll generate the original second level witness script now, as
|
|
|
|
// we'll need it if we're revoking an HTLC output on the remote
|
|
|
|
// commitment transaction, and *they* go to the second level.
|
2024-04-25 19:01:37 +02:00
|
|
|
secondLevelAuxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) fn.Option[input.AuxTapLeaf] {
|
|
|
|
return fn.MapOption(func(val uint16) input.AuxTapLeaf {
|
|
|
|
idx := input.HtlcIndex(val)
|
|
|
|
|
|
|
|
if htlc.Incoming.Val {
|
|
|
|
leaves := l.IncomingHtlcLeaves[idx]
|
|
|
|
return leaves.SecondLevelLeaf
|
|
|
|
}
|
|
|
|
|
|
|
|
return l.OutgoingHtlcLeaves[idx].SecondLevelLeaf
|
|
|
|
})(htlc.HtlcIndex.ValOpt())
|
|
|
|
},
|
|
|
|
)(auxLeaves)
|
2022-04-13 16:33:07 +02:00
|
|
|
secondLevelScript, err := SecondLevelHtlcScript(
|
|
|
|
chanState.ChanType, isRemoteInitiator,
|
|
|
|
keyRing.RevocationKey, keyRing.ToLocalKey, theirDelay,
|
2024-04-25 19:01:37 +02:00
|
|
|
leaseExpiry, fn.FlattenOption(secondLevelAuxLeaf),
|
2022-04-13 16:33:07 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return emptyRetribution, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is an incoming HTLC, then this means that they were the
|
|
|
|
// sender of the HTLC (relative to us). So we'll re-generate the sender
|
|
|
|
// HTLC script. Otherwise, is this was an outgoing HTLC that we sent,
|
|
|
|
// then from the PoV of the remote commitment state, they're the
|
|
|
|
// receiver of this HTLC.
|
2024-04-25 19:01:37 +02:00
|
|
|
htlcLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) fn.Option[input.AuxTapLeaf] {
|
|
|
|
return fn.MapOption(func(val uint16) input.AuxTapLeaf {
|
|
|
|
idx := input.HtlcIndex(val)
|
|
|
|
|
|
|
|
if htlc.Incoming.Val {
|
|
|
|
leaves := l.IncomingHtlcLeaves[idx]
|
|
|
|
return leaves.AuxTapLeaf
|
|
|
|
}
|
|
|
|
|
|
|
|
return l.OutgoingHtlcLeaves[idx].AuxTapLeaf
|
|
|
|
})(htlc.HtlcIndex.ValOpt())
|
|
|
|
},
|
|
|
|
)(auxLeaves)
|
2023-03-02 06:38:53 +01:00
|
|
|
scriptInfo, err := genHtlcScript(
|
2024-03-31 01:28:35 +01:00
|
|
|
chanState.ChanType, htlc.Incoming.Val, lntypes.Remote,
|
|
|
|
htlc.RefundTimeout.Val, htlc.RHash.Val, keyRing,
|
2024-04-25 19:01:37 +02:00
|
|
|
fn.FlattenOption(htlcLeaf),
|
2022-04-13 16:33:07 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return emptyRetribution, err
|
|
|
|
}
|
|
|
|
|
2023-03-02 06:44:47 +01:00
|
|
|
signDesc := input.SignDescriptor{
|
|
|
|
KeyDesc: chanState.LocalChanCfg.
|
|
|
|
RevocationBasePoint,
|
|
|
|
DoubleTweak: commitmentSecret,
|
2023-08-08 06:09:58 +02:00
|
|
|
WitnessScript: scriptInfo.WitnessScriptToSign(),
|
2023-03-02 06:44:47 +01:00
|
|
|
Output: &wire.TxOut{
|
2023-08-08 06:09:58 +02:00
|
|
|
PkScript: scriptInfo.PkScript(),
|
2024-03-31 01:28:35 +01:00
|
|
|
Value: int64(htlc.Amt.Val.Int()),
|
2022-04-13 16:33:07 +02:00
|
|
|
},
|
2023-08-19 00:17:51 +02:00
|
|
|
HashType: sweepSigHash(chanState.ChanType),
|
2023-03-02 06:44:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// For taproot HTLC outputs, we need to set the sign method to key
|
|
|
|
// spend, and also set the tap tweak root needed to derive the proper
|
|
|
|
// private key.
|
2023-08-08 06:09:58 +02:00
|
|
|
if scriptTree, ok := scriptInfo.(input.TapscriptDescriptor); ok {
|
2023-03-02 06:44:47 +01:00
|
|
|
signDesc.SignMethod = input.TaprootKeySpendSignMethod
|
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
signDesc.TapTweak = scriptTree.TapTweak()
|
|
|
|
}
|
|
|
|
|
2023-08-13 22:35:55 +02:00
|
|
|
// The second level script we sign will always be the success path.
|
2023-08-08 06:09:58 +02:00
|
|
|
secondLevelWitnessScript, err := secondLevelScript.WitnessScriptForPath(
|
|
|
|
input.ScriptPathSuccess,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return emptyRetribution, err
|
2023-03-02 06:44:47 +01:00
|
|
|
}
|
|
|
|
|
2023-03-02 07:17:30 +01:00
|
|
|
// If this is a taproot output, we'll also need to obtain the second
|
|
|
|
// level tap tweak as well.
|
|
|
|
var secondLevelTapTweak [32]byte
|
2023-08-08 06:09:58 +02:00
|
|
|
if scriptTree, ok := secondLevelScript.(input.TapscriptDescriptor); ok {
|
2023-08-13 22:35:55 +02:00
|
|
|
copy(secondLevelTapTweak[:], scriptTree.TapTweak())
|
2023-03-02 07:17:30 +01:00
|
|
|
}
|
|
|
|
|
2023-03-02 06:44:47 +01:00
|
|
|
return HtlcRetribution{
|
|
|
|
SignDesc: signDesc,
|
2022-04-13 16:33:07 +02:00
|
|
|
OutPoint: wire.OutPoint{
|
|
|
|
Hash: commitHash,
|
2024-03-31 01:28:35 +01:00
|
|
|
Index: uint32(htlc.OutputIndex.Val),
|
2022-04-13 16:33:07 +02:00
|
|
|
},
|
2023-08-08 06:09:58 +02:00
|
|
|
SecondLevelWitnessScript: secondLevelWitnessScript,
|
2024-03-31 01:28:35 +01:00
|
|
|
IsIncoming: htlc.Incoming.Val,
|
2023-08-13 22:35:55 +02:00
|
|
|
SecondLevelTapTweak: secondLevelTapTweak,
|
2022-04-13 16:33:07 +02:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createBreachRetribution creates a partially initiated BreachRetribution
|
|
|
|
// using a RevocationLog. Returns the constructed retribution, our amount,
|
2023-02-02 09:13:44 +01:00
|
|
|
// their amount, and a possible non-nil error. If the spendTx parameter is
|
|
|
|
// non-nil, then it will be used to glean the breach transaction's to-local and
|
|
|
|
// to-remote output amounts. Otherwise, the RevocationLog will be checked to
|
|
|
|
// see if these fields are present there. If they are not, then
|
|
|
|
// ErrRevLogDataMissing is returned.
|
2022-04-13 16:33:07 +02:00
|
|
|
func createBreachRetribution(revokedLog *channeldb.RevocationLog,
|
|
|
|
spendTx *wire.MsgTx, chanState *channeldb.OpenChannel,
|
|
|
|
keyRing *CommitmentKeyRing, commitmentSecret *btcec.PrivateKey,
|
2024-04-25 19:01:37 +02:00
|
|
|
leaseExpiry uint32,
|
|
|
|
auxLeaves fn.Option[CommitAuxLeaves]) (*BreachRetribution, int64, int64,
|
|
|
|
error) {
|
2022-04-13 16:33:07 +02:00
|
|
|
|
|
|
|
commitHash := revokedLog.CommitTxHash
|
|
|
|
|
|
|
|
// Create the htlc retributions.
|
|
|
|
htlcRetributions := make([]HtlcRetribution, len(revokedLog.HTLCEntries))
|
|
|
|
for i, htlc := range revokedLog.HTLCEntries {
|
|
|
|
hr, err := createHtlcRetribution(
|
2024-03-31 03:13:57 +02:00
|
|
|
chanState, keyRing, commitHash.Val,
|
2024-04-25 19:01:37 +02:00
|
|
|
commitmentSecret, leaseExpiry, htlc, auxLeaves,
|
2022-04-13 16:33:07 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
|
|
|
}
|
|
|
|
htlcRetributions[i] = hr
|
|
|
|
}
|
|
|
|
|
|
|
|
var ourAmt, theirAmt int64
|
|
|
|
|
|
|
|
// Construct the our outpoint.
|
|
|
|
ourOutpoint := wire.OutPoint{
|
2024-03-31 03:13:57 +02:00
|
|
|
Hash: commitHash.Val,
|
2022-04-13 16:33:07 +02:00
|
|
|
}
|
2024-03-31 03:13:57 +02:00
|
|
|
if revokedLog.OurOutputIndex.Val != channeldb.OutputIndexEmpty {
|
|
|
|
ourOutpoint.Index = uint32(revokedLog.OurOutputIndex.Val)
|
2022-04-13 16:33:07 +02:00
|
|
|
|
2023-02-02 09:13:44 +01:00
|
|
|
// If the spend transaction is provided, then we use it to get
|
|
|
|
// the value of our output.
|
|
|
|
if spendTx != nil {
|
|
|
|
// Sanity check that OurOutputIndex is within range.
|
|
|
|
if int(ourOutpoint.Index) >= len(spendTx.TxOut) {
|
|
|
|
return nil, 0, 0, fmt.Errorf("%w: ours=%v, "+
|
|
|
|
"len(TxOut)=%v",
|
|
|
|
ErrOutputIndexOutOfRange,
|
|
|
|
ourOutpoint.Index, len(spendTx.TxOut),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
// Read the amounts from the breach transaction.
|
|
|
|
//
|
|
|
|
// NOTE: ourAmt here includes commit fee and anchor
|
|
|
|
// amount (if enabled).
|
|
|
|
ourAmt = spendTx.TxOut[ourOutpoint.Index].Value
|
|
|
|
} else {
|
|
|
|
// Otherwise, we check to see if the revocation log
|
|
|
|
// contains our output amount. Due to a previous
|
|
|
|
// migration, this field may be empty in which case an
|
|
|
|
// error will be returned.
|
2024-03-31 03:13:57 +02:00
|
|
|
b, err := revokedLog.OurBalance.ValOpt().UnwrapOrErr(
|
|
|
|
ErrRevLogDataMissing,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
2023-02-02 09:13:44 +01:00
|
|
|
}
|
|
|
|
|
2024-03-31 03:13:57 +02:00
|
|
|
ourAmt = int64(b.Int().ToSatoshis())
|
2022-04-13 16:33:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the their outpoint.
|
|
|
|
theirOutpoint := wire.OutPoint{
|
2024-03-31 03:13:57 +02:00
|
|
|
Hash: commitHash.Val,
|
2022-04-13 16:33:07 +02:00
|
|
|
}
|
2024-03-31 03:13:57 +02:00
|
|
|
if revokedLog.TheirOutputIndex.Val != channeldb.OutputIndexEmpty {
|
|
|
|
theirOutpoint.Index = uint32(revokedLog.TheirOutputIndex.Val)
|
2022-04-13 16:33:07 +02:00
|
|
|
|
2023-02-02 09:13:44 +01:00
|
|
|
// If the spend transaction is provided, then we use it to get
|
|
|
|
// the value of the remote parties' output.
|
|
|
|
if spendTx != nil {
|
|
|
|
// Sanity check that TheirOutputIndex is within range.
|
2024-03-31 03:13:57 +02:00
|
|
|
if int(revokedLog.TheirOutputIndex.Val) >=
|
2023-02-02 09:13:44 +01:00
|
|
|
len(spendTx.TxOut) {
|
|
|
|
|
|
|
|
return nil, 0, 0, fmt.Errorf("%w: theirs=%v, "+
|
|
|
|
"len(TxOut)=%v",
|
|
|
|
ErrOutputIndexOutOfRange,
|
|
|
|
revokedLog.TheirOutputIndex,
|
|
|
|
len(spendTx.TxOut),
|
|
|
|
)
|
|
|
|
}
|
2022-04-13 16:33:07 +02:00
|
|
|
|
2023-02-02 09:13:44 +01:00
|
|
|
// Read the amounts from the breach transaction.
|
|
|
|
theirAmt = spendTx.TxOut[theirOutpoint.Index].Value
|
|
|
|
} else {
|
|
|
|
// Otherwise, we check to see if the revocation log
|
|
|
|
// contains remote parties' output amount. Due to a
|
|
|
|
// previous migration, this field may be empty in which
|
|
|
|
// case an error will be returned.
|
2024-03-31 03:13:57 +02:00
|
|
|
b, err := revokedLog.TheirBalance.ValOpt().UnwrapOrErr(
|
|
|
|
ErrRevLogDataMissing,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
2023-02-02 09:13:44 +01:00
|
|
|
}
|
|
|
|
|
2024-03-31 03:13:57 +02:00
|
|
|
theirAmt = int64(b.Int().ToSatoshis())
|
2023-02-02 09:13:44 +01:00
|
|
|
}
|
2022-04-13 16:33:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return &BreachRetribution{
|
2024-03-31 03:13:57 +02:00
|
|
|
BreachTxHash: commitHash.Val,
|
2022-04-13 16:33:07 +02:00
|
|
|
ChainHash: chanState.ChainHash,
|
|
|
|
LocalOutpoint: ourOutpoint,
|
|
|
|
RemoteOutpoint: theirOutpoint,
|
|
|
|
HtlcRetributions: htlcRetributions,
|
|
|
|
KeyRing: keyRing,
|
|
|
|
}, ourAmt, theirAmt, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createBreachRetributionLegacy creates a partially initiated
|
|
|
|
// BreachRetribution using a ChannelCommitment. Returns the constructed
|
|
|
|
// retribution, our amount, their amount, and a possible non-nil error.
|
|
|
|
func createBreachRetributionLegacy(revokedLog *channeldb.ChannelCommitment,
|
|
|
|
chanState *channeldb.OpenChannel, keyRing *CommitmentKeyRing,
|
|
|
|
commitmentSecret *btcec.PrivateKey,
|
2023-08-08 06:09:58 +02:00
|
|
|
ourScript, theirScript input.ScriptDescriptor,
|
2022-04-13 16:33:07 +02:00
|
|
|
leaseExpiry uint32) (*BreachRetribution, int64, int64, error) {
|
|
|
|
|
|
|
|
commitHash := revokedLog.CommitTx.TxHash()
|
|
|
|
ourOutpoint := wire.OutPoint{
|
|
|
|
Hash: commitHash,
|
|
|
|
}
|
|
|
|
theirOutpoint := wire.OutPoint{
|
|
|
|
Hash: commitHash,
|
|
|
|
}
|
|
|
|
|
|
|
|
// In order to fully populate the breach retribution struct, we'll need
|
|
|
|
// to find the exact index of the commitment outputs.
|
|
|
|
for i, txOut := range revokedLog.CommitTx.TxOut {
|
|
|
|
switch {
|
2023-08-08 06:09:58 +02:00
|
|
|
case bytes.Equal(txOut.PkScript, ourScript.PkScript()):
|
2022-04-13 16:33:07 +02:00
|
|
|
ourOutpoint.Index = uint32(i)
|
2023-08-08 06:09:58 +02:00
|
|
|
case bytes.Equal(txOut.PkScript, theirScript.PkScript()):
|
2022-04-13 16:33:07 +02:00
|
|
|
theirOutpoint.Index = uint32(i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-30 22:11:33 +02:00
|
|
|
// With the commitment outputs located, we'll now generate all the
|
|
|
|
// retribution structs for each of the HTLC transactions active on the
|
|
|
|
// remote commitment transaction.
|
2022-04-13 16:33:07 +02:00
|
|
|
htlcRetributions := make([]HtlcRetribution, len(revokedLog.Htlcs))
|
|
|
|
for i, htlc := range revokedLog.Htlcs {
|
2018-04-05 02:07:45 +02:00
|
|
|
// If the HTLC is dust, then we'll skip it as it doesn't have
|
|
|
|
// an output on the commitment transaction.
|
2021-09-28 17:34:10 +02:00
|
|
|
if HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanState.ChanType, htlc.Incoming, lntypes.Remote,
|
2022-04-13 16:33:07 +02:00
|
|
|
chainfee.SatPerKWeight(revokedLog.FeePerKw),
|
|
|
|
htlc.Amt.ToSatoshis(),
|
|
|
|
chanState.RemoteChanCfg.DustLimit,
|
2018-04-05 02:07:45 +02:00
|
|
|
) {
|
2022-04-13 16:33:07 +02:00
|
|
|
|
2018-04-05 02:07:45 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-04-02 02:00:55 +02:00
|
|
|
entry, err := channeldb.NewHTLCEntryFromHTLC(htlc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, 0, err
|
|
|
|
}
|
|
|
|
|
2022-04-13 16:33:07 +02:00
|
|
|
hr, err := createHtlcRetribution(
|
|
|
|
chanState, keyRing, commitHash,
|
|
|
|
commitmentSecret, leaseExpiry, entry,
|
2024-04-25 19:01:37 +02:00
|
|
|
fn.None[CommitAuxLeaves](),
|
2020-03-06 16:11:44 +01:00
|
|
|
)
|
2018-07-18 04:22:17 +02:00
|
|
|
if err != nil {
|
2022-04-13 16:33:07 +02:00
|
|
|
return nil, 0, 0, err
|
2018-07-18 04:22:17 +02:00
|
|
|
}
|
2022-04-13 16:33:07 +02:00
|
|
|
htlcRetributions[i] = hr
|
2017-07-30 22:11:33 +02:00
|
|
|
}
|
|
|
|
|
2022-04-13 16:33:07 +02:00
|
|
|
// Compute the balances in satoshis.
|
|
|
|
ourAmt := int64(revokedLog.LocalBalance.ToSatoshis())
|
|
|
|
theirAmt := int64(revokedLog.RemoteBalance.ToSatoshis())
|
|
|
|
|
2016-11-21 07:54:35 +01:00
|
|
|
return &BreachRetribution{
|
2022-04-13 16:33:07 +02:00
|
|
|
BreachTxHash: commitHash,
|
|
|
|
ChainHash: chanState.ChainHash,
|
|
|
|
LocalOutpoint: ourOutpoint,
|
|
|
|
RemoteOutpoint: theirOutpoint,
|
|
|
|
HtlcRetributions: htlcRetributions,
|
|
|
|
KeyRing: keyRing,
|
|
|
|
}, ourAmt, theirAmt, nil
|
2016-11-21 07:54:35 +01:00
|
|
|
}
|
|
|
|
|
2021-09-28 17:34:10 +02:00
|
|
|
// HtlcIsDust determines if an HTLC output is dust or not depending on two
|
2017-07-30 21:44:13 +02:00
|
|
|
// bits: if the HTLC is incoming and if the HTLC will be placed on our
|
|
|
|
// commitment transaction, or theirs. These two pieces of information are
|
2024-03-19 10:05:03 +01:00
|
|
|
// required as we currently used second-level HTLC transactions as off-chain
|
2017-07-30 21:44:13 +02:00
|
|
|
// covenants. Depending on the two bits, we'll either be using a timeout or
|
|
|
|
// success transaction which have different weights.
|
2021-09-28 17:34:10 +02:00
|
|
|
func HtlcIsDust(chanType channeldb.ChannelType,
|
2024-07-31 01:44:18 +02:00
|
|
|
incoming bool, whoseCommit lntypes.ChannelParty,
|
|
|
|
feePerKw chainfee.SatPerKWeight, htlcAmt, dustLimit btcutil.Amount,
|
|
|
|
) bool {
|
2017-07-30 21:44:13 +02:00
|
|
|
|
|
|
|
// First we'll determine the fee required for this HTLC based on if this is
|
|
|
|
// an incoming HTLC or not, and also on whose commitment transaction it
|
|
|
|
// will be placed on.
|
|
|
|
var htlcFee btcutil.Amount
|
|
|
|
switch {
|
|
|
|
|
|
|
|
// If this is an incoming HTLC on our commitment transaction, then the
|
|
|
|
// second-level transaction will be a success transaction.
|
2024-07-31 01:44:18 +02:00
|
|
|
case incoming && whoseCommit.IsLocal():
|
2020-03-06 16:11:49 +01:00
|
|
|
htlcFee = HtlcSuccessFee(chanType, feePerKw)
|
2017-07-30 21:44:13 +02:00
|
|
|
|
|
|
|
// If this is an incoming HTLC on their commitment transaction, then
|
|
|
|
// we'll be using a second-level timeout transaction as they've added
|
|
|
|
// this HTLC.
|
2024-07-31 01:44:18 +02:00
|
|
|
case incoming && whoseCommit.IsRemote():
|
2020-03-06 16:11:49 +01:00
|
|
|
htlcFee = HtlcTimeoutFee(chanType, feePerKw)
|
2017-07-30 21:44:13 +02:00
|
|
|
|
|
|
|
// If this is an outgoing HTLC on our commitment transaction, then
|
|
|
|
// we'll be using a timeout transaction as we're the sender of the
|
|
|
|
// HTLC.
|
2024-07-31 01:44:18 +02:00
|
|
|
case !incoming && whoseCommit.IsLocal():
|
2020-03-06 16:11:49 +01:00
|
|
|
htlcFee = HtlcTimeoutFee(chanType, feePerKw)
|
2017-07-30 21:44:13 +02:00
|
|
|
|
|
|
|
// If this is an outgoing HTLC on their commitment transaction, then
|
|
|
|
// we'll be using an HTLC success transaction as they're the receiver
|
|
|
|
// of this HTLC.
|
2024-07-31 01:44:18 +02:00
|
|
|
case !incoming && whoseCommit.IsRemote():
|
2020-03-06 16:11:49 +01:00
|
|
|
htlcFee = HtlcSuccessFee(chanType, feePerKw)
|
2017-02-03 02:10:57 +01:00
|
|
|
}
|
|
|
|
|
2017-07-30 21:44:13 +02:00
|
|
|
return (htlcAmt - htlcFee) < dustLimit
|
2017-02-03 02:10:57 +01:00
|
|
|
}
|
|
|
|
|
2024-04-02 05:00:29 +02:00
|
|
|
// HtlcView represents the "active" HTLCs at a particular point within the
|
2016-11-21 07:54:35 +01:00
|
|
|
// history of the HTLC update log.
|
2024-04-02 05:00:29 +02:00
|
|
|
type HtlcView struct {
|
|
|
|
// NextHeight is the height of the commitment transaction that will be
|
|
|
|
// created using this view.
|
|
|
|
NextHeight uint64
|
|
|
|
|
2024-07-24 23:57:32 +02:00
|
|
|
// Updates is a Dual of the Local and Remote HTLCs.
|
|
|
|
Updates lntypes.Dual[[]*paymentDescriptor]
|
2024-04-02 05:00:29 +02:00
|
|
|
|
|
|
|
// FeePerKw is the fee rate in sat/kw of the commitment transaction.
|
|
|
|
FeePerKw chainfee.SatPerKWeight
|
2016-01-05 22:01:42 +01:00
|
|
|
}
|
|
|
|
|
2024-09-13 15:47:33 +02:00
|
|
|
// AuxOurUpdates returns the outgoing HTLCs as a read-only copy of
|
|
|
|
// AuxHtlcDescriptors.
|
|
|
|
func (v *HtlcView) AuxOurUpdates() []AuxHtlcDescriptor {
|
2024-07-24 23:57:32 +02:00
|
|
|
return fn.Map(newAuxHtlcDescriptor, v.Updates.Local)
|
2024-09-13 15:47:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// AuxTheirUpdates returns the incoming HTLCs as a read-only copy of
|
|
|
|
// AuxHtlcDescriptors.
|
|
|
|
func (v *HtlcView) AuxTheirUpdates() []AuxHtlcDescriptor {
|
2024-07-24 23:57:32 +02:00
|
|
|
return fn.Map(newAuxHtlcDescriptor, v.Updates.Remote)
|
2024-09-13 15:47:33 +02:00
|
|
|
}
|
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// fetchHTLCView returns all the candidate HTLC updates which should be
|
|
|
|
// considered for inclusion within a commitment based on the passed HTLC log
|
|
|
|
// indexes.
|
2024-04-02 05:00:29 +02:00
|
|
|
func (lc *LightningChannel) fetchHTLCView(theirLogIndex,
|
|
|
|
ourLogIndex uint64) *HtlcView {
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
var ourHTLCs []*paymentDescriptor
|
2024-08-09 22:00:59 +02:00
|
|
|
for e := lc.updateLogs.Local.Front(); e != nil; e = e.Next() {
|
2024-05-01 00:23:50 +02:00
|
|
|
htlc := e.Value
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2016-07-22 01:50:20 +02:00
|
|
|
// This HTLC is active from this point-of-view iff the log
|
|
|
|
// index of the state update is below the specified index in
|
|
|
|
// our update log.
|
2017-10-23 01:28:30 +02:00
|
|
|
if htlc.LogIndex < ourLogIndex {
|
2016-07-22 01:50:20 +02:00
|
|
|
ourHTLCs = append(ourHTLCs, htlc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
var theirHTLCs []*paymentDescriptor
|
2024-08-09 22:00:59 +02:00
|
|
|
for e := lc.updateLogs.Remote.Front(); e != nil; e = e.Next() {
|
2024-05-01 00:23:50 +02:00
|
|
|
htlc := e.Value
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2016-07-22 01:50:20 +02:00
|
|
|
// If this is an incoming HTLC, then it is only active from
|
|
|
|
// this point-of-view if the index of the HTLC addition in
|
|
|
|
// their log is below the specified view index.
|
2017-10-23 01:28:30 +02:00
|
|
|
if htlc.LogIndex < theirLogIndex {
|
2016-07-22 01:50:20 +02:00
|
|
|
theirHTLCs = append(theirHTLCs, htlc)
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2024-04-02 05:00:29 +02:00
|
|
|
return &HtlcView{
|
2024-07-24 23:57:32 +02:00
|
|
|
Updates: lntypes.Dual[[]*paymentDescriptor]{
|
|
|
|
Local: ourHTLCs,
|
|
|
|
Remote: theirHTLCs,
|
|
|
|
},
|
2016-07-22 01:50:20 +02:00
|
|
|
}
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// fetchCommitmentView returns a populated commitment which expresses the state
|
|
|
|
// of the channel from the point of view of a local or remote chain, evaluating
|
|
|
|
// the HTLC log up to the passed indexes. This function is used to construct
|
|
|
|
// both local and remote commitment transactions in order to sign or verify new
|
|
|
|
// commitment updates. A fully populated commitment is returned which reflects
|
|
|
|
// the proper balances for both sides at this point in the commitment chain.
|
2024-07-31 01:44:18 +02:00
|
|
|
func (lc *LightningChannel) fetchCommitmentView(
|
|
|
|
whoseCommitChain lntypes.ChannelParty,
|
2017-11-10 07:53:18 +01:00
|
|
|
ourLogIndex, ourHtlcIndex, theirLogIndex, theirHtlcIndex uint64,
|
2018-01-18 22:45:30 +01:00
|
|
|
keyRing *CommitmentKeyRing) (*commitment, error) {
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2024-08-09 21:47:58 +02:00
|
|
|
commitChain := lc.commitChains.Local
|
2020-01-06 11:42:03 +01:00
|
|
|
dustLimit := lc.channelState.LocalChanCfg.DustLimit
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommitChain.IsRemote() {
|
2024-08-09 21:47:58 +02:00
|
|
|
commitChain = lc.commitChains.Remote
|
2020-01-06 11:42:03 +01:00
|
|
|
dustLimit = lc.channelState.RemoteChanCfg.DustLimit
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2016-07-22 01:50:20 +02:00
|
|
|
nextHeight := commitChain.tip().height + 1
|
|
|
|
|
2017-01-13 06:01:50 +01:00
|
|
|
// Run through all the HTLCs that will be covered by this transaction
|
2016-07-06 02:01:55 +02:00
|
|
|
// in order to update their commitment addition height, and to adjust
|
2020-01-06 11:42:03 +01:00
|
|
|
// the balances on the commitment transaction accordingly. Note that
|
|
|
|
// these balances will be *before* taking a commitment fee from the
|
|
|
|
// initiator.
|
2016-07-22 01:50:20 +02:00
|
|
|
htlcView := lc.fetchHTLCView(theirLogIndex, ourLogIndex)
|
2020-02-12 11:10:19 +01:00
|
|
|
ourBalance, theirBalance, _, filteredHTLCView, err := lc.computeView(
|
2024-07-31 01:44:18 +02:00
|
|
|
htlcView, whoseCommitChain, true,
|
|
|
|
fn.None[chainfee.SatPerKWeight](),
|
2019-01-10 12:23:57 +01:00
|
|
|
)
|
2020-02-12 11:10:19 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-04-02 05:00:29 +02:00
|
|
|
feePerKw := filteredHTLCView.FeePerKw
|
|
|
|
|
|
|
|
htlcView.NextHeight = nextHeight
|
|
|
|
filteredHTLCView.NextHeight = nextHeight
|
2017-07-14 20:38:35 +02:00
|
|
|
|
2020-01-06 11:42:03 +01:00
|
|
|
// Actually generate unsigned commitment transaction for this view.
|
|
|
|
commitTx, err := lc.commitBuilder.createUnsignedCommitmentTx(
|
2024-07-31 01:44:18 +02:00
|
|
|
ourBalance, theirBalance, whoseCommitChain, feePerKw,
|
2024-03-17 21:53:38 +01:00
|
|
|
nextHeight, htlcView, filteredHTLCView, keyRing,
|
|
|
|
commitChain.tip(),
|
2020-01-06 11:42:03 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-07-30 22:07:38 +02:00
|
|
|
}
|
2017-09-25 22:05:49 +02:00
|
|
|
|
2020-03-06 16:11:46 +01:00
|
|
|
// We'll assert that there hasn't been a mistake during fee calculation
|
|
|
|
// leading to a fee too low.
|
|
|
|
var totalOut btcutil.Amount
|
|
|
|
for _, txOut := range commitTx.txn.TxOut {
|
|
|
|
totalOut += btcutil.Amount(txOut.Value)
|
|
|
|
}
|
|
|
|
fee := lc.channelState.Capacity - totalOut
|
|
|
|
|
2023-09-27 13:45:52 +02:00
|
|
|
var witnessWeight int64
|
|
|
|
if lc.channelState.ChanType.IsTaproot() {
|
|
|
|
witnessWeight = input.TaprootKeyPathWitnessSize
|
|
|
|
} else {
|
|
|
|
witnessWeight = input.WitnessCommitmentTxWeight
|
|
|
|
}
|
|
|
|
|
2020-03-06 16:11:46 +01:00
|
|
|
// Since the transaction is not signed yet, we use the witness weight
|
|
|
|
// used for weight calculation.
|
|
|
|
uTx := btcutil.NewTx(commitTx.txn)
|
2023-09-27 13:45:52 +02:00
|
|
|
weight := blockchain.GetTransactionWeight(uTx) + witnessWeight
|
2020-03-06 16:11:46 +01:00
|
|
|
|
|
|
|
effFeeRate := chainfee.SatPerKWeight(fee) * 1000 /
|
|
|
|
chainfee.SatPerKWeight(weight)
|
2020-04-21 08:37:34 +02:00
|
|
|
if effFeeRate < chainfee.AbsoluteFeePerKwFloor {
|
2020-03-06 16:11:46 +01:00
|
|
|
return nil, fmt.Errorf("height=%v, for ChannelPoint(%v) "+
|
2020-04-21 07:12:49 +02:00
|
|
|
"attempts to create commitment with feerate %v: %v",
|
2020-03-06 16:11:46 +01:00
|
|
|
nextHeight, lc.channelState.FundingOutpoint,
|
|
|
|
effFeeRate, spew.Sdump(commitTx))
|
|
|
|
}
|
|
|
|
|
2024-03-17 21:53:38 +01:00
|
|
|
// Given the custom blob of the past state, and this new HTLC view,
|
|
|
|
// we'll generate a new blob for the latest commitment.
|
|
|
|
newCommitBlob, err := fn.MapOptionZ(
|
|
|
|
lc.leafStore,
|
|
|
|
func(s AuxLeafStore) fn.Result[fn.Option[tlv.Blob]] {
|
|
|
|
return updateAuxBlob(
|
|
|
|
s, lc.channelState,
|
|
|
|
commitChain.tip().customBlob, htlcView,
|
|
|
|
whoseCommitChain, ourBalance, theirBalance,
|
|
|
|
*keyRing,
|
|
|
|
)
|
|
|
|
},
|
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch aux leaves: %w", err)
|
|
|
|
}
|
|
|
|
|
2024-08-09 23:52:21 +02:00
|
|
|
messageIndices := lntypes.Dual[uint64]{
|
|
|
|
Local: ourLogIndex,
|
|
|
|
Remote: theirLogIndex,
|
|
|
|
}
|
|
|
|
|
2020-01-06 11:42:03 +01:00
|
|
|
// With the commitment view created, store the resulting balances and
|
|
|
|
// transaction with the other parameters for this height.
|
2017-09-25 22:05:49 +02:00
|
|
|
c := &commitment{
|
2024-08-09 23:52:21 +02:00
|
|
|
ourBalance: commitTx.ourBalance,
|
|
|
|
theirBalance: commitTx.theirBalance,
|
|
|
|
txn: commitTx.txn,
|
|
|
|
fee: commitTx.fee,
|
|
|
|
messageIndices: messageIndices,
|
|
|
|
ourHtlcIndex: ourHtlcIndex,
|
|
|
|
theirHtlcIndex: theirHtlcIndex,
|
|
|
|
height: nextHeight,
|
|
|
|
feePerKw: feePerKw,
|
|
|
|
dustLimit: dustLimit,
|
|
|
|
whoseCommit: whoseCommitChain,
|
|
|
|
customBlob: newCommitBlob,
|
2017-09-25 22:05:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// In order to ensure _none_ of the HTLC's associated with this new
|
|
|
|
// commitment are mutated, we'll manually copy over each HTLC to its
|
|
|
|
// respective slice.
|
2024-04-02 05:00:29 +02:00
|
|
|
c.outgoingHTLCs = make(
|
2024-07-24 23:57:32 +02:00
|
|
|
[]paymentDescriptor, len(filteredHTLCView.Updates.Local),
|
2024-04-02 05:00:29 +02:00
|
|
|
)
|
2024-07-24 23:57:32 +02:00
|
|
|
for i, htlc := range filteredHTLCView.Updates.Local {
|
2017-09-25 22:05:49 +02:00
|
|
|
c.outgoingHTLCs[i] = *htlc
|
|
|
|
}
|
2024-04-02 05:00:29 +02:00
|
|
|
c.incomingHTLCs = make(
|
2024-07-24 23:57:32 +02:00
|
|
|
[]paymentDescriptor, len(filteredHTLCView.Updates.Remote),
|
2024-04-02 05:00:29 +02:00
|
|
|
)
|
2024-07-24 23:57:32 +02:00
|
|
|
for i, htlc := range filteredHTLCView.Updates.Remote {
|
2017-09-25 22:05:49 +02:00
|
|
|
c.incomingHTLCs[i] = *htlc
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll populate all the HTLC indexes so we can track the
|
2020-03-31 00:50:10 +02:00
|
|
|
// locations of each HTLC in the commitment state. We pass in the sorted
|
|
|
|
// slice of CLTV deltas in order to properly locate HTLCs that otherwise
|
|
|
|
// have the same payment hash and amount.
|
|
|
|
err = c.populateHtlcIndexes(lc.channelState.ChanType, commitTx.cltvs)
|
|
|
|
if err != nil {
|
2017-09-25 22:05:49 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2023-08-09 04:18:34 +02:00
|
|
|
// fundingTxIn returns the funding output as a transaction input. The input
|
|
|
|
// returned by this function uses a max sequence number, so it isn't able to be
|
|
|
|
// used with RBF by default.
|
2020-01-06 11:42:03 +01:00
|
|
|
func fundingTxIn(chanState *channeldb.OpenChannel) wire.TxIn {
|
|
|
|
return *wire.NewTxIn(&chanState.FundingOutpoint, nil, nil)
|
2017-09-26 06:47:39 +02:00
|
|
|
}
|
|
|
|
|
2016-07-22 01:50:20 +02:00
|
|
|
// evaluateHTLCView processes all update entries in both HTLC update logs,
|
|
|
|
// producing a final view which is the result of properly applying all adds,
|
2019-01-10 12:23:57 +01:00
|
|
|
// settles, timeouts and fee updates found in both logs. The resulting view
|
|
|
|
// returned reflects the current state of HTLCs within the remote or local
|
|
|
|
// commitment chain, and the current commitment fee rate.
|
2018-02-06 20:02:01 +01:00
|
|
|
//
|
2024-07-20 01:53:58 +02:00
|
|
|
// The return values of this function are as follows:
|
|
|
|
// 1. The new htlcView reflecting the current channel state.
|
|
|
|
// 2. A Dual of the updates which have not yet been committed in
|
|
|
|
// 'whoseCommitChain's commitment chain.
|
2024-04-02 05:00:29 +02:00
|
|
|
func (lc *LightningChannel) evaluateHTLCView(view *HtlcView, ourBalance,
|
2018-02-06 20:02:01 +01:00
|
|
|
theirBalance *lnwire.MilliSatoshi, nextHeight uint64,
|
2024-07-20 01:53:58 +02:00
|
|
|
whoseCommitChain lntypes.ChannelParty) (*HtlcView,
|
|
|
|
lntypes.Dual[[]*paymentDescriptor], error) {
|
2016-07-22 01:50:20 +02:00
|
|
|
|
2019-01-10 12:23:57 +01:00
|
|
|
// We initialize the view's fee rate to the fee rate of the unfiltered
|
|
|
|
// view. If any fee updates are found when evaluating the view, it will
|
|
|
|
// be updated.
|
2024-04-02 05:00:29 +02:00
|
|
|
newView := &HtlcView{
|
|
|
|
FeePerKw: view.FeePerKw,
|
|
|
|
NextHeight: nextHeight,
|
2019-01-10 12:23:57 +01:00
|
|
|
}
|
2016-07-22 01:50:20 +02:00
|
|
|
|
2024-07-25 00:52:41 +02:00
|
|
|
// The fee rate of our view is always the last UpdateFee message from
|
|
|
|
// the channel's OpeningParty.
|
|
|
|
openerUpdates := view.Updates.GetForParty(lc.channelState.Initiator())
|
|
|
|
feeUpdates := fn.Filter(func(u *paymentDescriptor) bool {
|
|
|
|
return u.EntryType == FeeUpdate
|
|
|
|
}, openerUpdates)
|
|
|
|
lastFeeUpdate := fn.Last(feeUpdates)
|
|
|
|
lastFeeUpdate.WhenSome(func(pd *paymentDescriptor) {
|
|
|
|
newView.FeePerKw = chainfee.SatPerKWeight(
|
|
|
|
pd.Amount.ToSatoshis(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
|
2016-07-22 01:50:20 +02:00
|
|
|
// We use two maps, one for the local log and one for the remote log to
|
|
|
|
// keep track of which entries we need to skip when creating the final
|
|
|
|
// htlc view. We skip an entry whenever we find a settle or a timeout
|
|
|
|
// modifying an entry.
|
2024-07-22 23:12:03 +02:00
|
|
|
skipUs := fn.NewSet[uint64]()
|
|
|
|
skipThem := fn.NewSet[uint64]()
|
2016-07-22 01:50:20 +02:00
|
|
|
|
|
|
|
// First we run through non-add entries in both logs, populating the
|
2024-07-20 01:53:58 +02:00
|
|
|
// skip sets.
|
2024-07-24 23:57:32 +02:00
|
|
|
for _, entry := range view.Updates.Local {
|
2019-01-10 12:23:57 +01:00
|
|
|
switch entry.EntryType {
|
|
|
|
// Skip adds for now. They will be processed below.
|
|
|
|
case Add:
|
|
|
|
continue
|
|
|
|
|
2024-07-25 00:52:41 +02:00
|
|
|
// Skip fee updates because we've already dealt with them above.
|
2019-01-10 12:23:57 +01:00
|
|
|
case FeeUpdate:
|
2024-07-20 01:53:58 +02:00
|
|
|
continue
|
2016-11-17 23:39:38 +01:00
|
|
|
}
|
2016-07-22 01:50:20 +02:00
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
addEntry, err := lc.fetchParent(
|
|
|
|
entry, whoseCommitChain, lntypes.Remote,
|
|
|
|
)
|
2020-02-12 11:10:19 +01:00
|
|
|
if err != nil {
|
2024-07-20 01:53:58 +02:00
|
|
|
return nil, lntypes.Dual[[]*paymentDescriptor]{}, err
|
2020-02-12 11:10:19 +01:00
|
|
|
}
|
2018-05-04 13:25:10 +02:00
|
|
|
|
2024-07-22 23:12:03 +02:00
|
|
|
skipThem.Add(addEntry.HtlcIndex)
|
2024-04-02 05:00:29 +02:00
|
|
|
|
2024-07-20 01:53:58 +02:00
|
|
|
rmvHeight := entry.removeCommitHeights.GetForParty(
|
|
|
|
whoseCommitChain,
|
|
|
|
)
|
2024-07-17 22:09:04 +02:00
|
|
|
if rmvHeight == 0 {
|
|
|
|
processRemoveEntry(
|
2024-07-22 23:34:18 +02:00
|
|
|
entry, ourBalance, theirBalance,
|
|
|
|
lntypes.Remote,
|
2024-07-17 22:09:04 +02:00
|
|
|
)
|
|
|
|
}
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2024-07-20 01:53:58 +02:00
|
|
|
|
|
|
|
// Do the same for our peer's updates.
|
2024-07-24 23:57:32 +02:00
|
|
|
for _, entry := range view.Updates.Remote {
|
2019-01-10 12:23:57 +01:00
|
|
|
switch entry.EntryType {
|
|
|
|
// Skip adds for now. They will be processed below.
|
|
|
|
case Add:
|
|
|
|
continue
|
|
|
|
|
2024-07-25 00:52:41 +02:00
|
|
|
// Skip fee updates because we've already dealt with them above.
|
2019-01-10 12:23:57 +01:00
|
|
|
case FeeUpdate:
|
2024-07-20 01:53:58 +02:00
|
|
|
continue
|
2016-11-17 23:39:38 +01:00
|
|
|
}
|
2016-07-22 01:50:20 +02:00
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
addEntry, err := lc.fetchParent(
|
|
|
|
entry, whoseCommitChain, lntypes.Local,
|
|
|
|
)
|
2020-02-12 11:10:19 +01:00
|
|
|
if err != nil {
|
2024-07-20 01:53:58 +02:00
|
|
|
return nil, lntypes.Dual[[]*paymentDescriptor]{}, err
|
2020-02-12 11:10:19 +01:00
|
|
|
}
|
2018-05-04 13:25:10 +02:00
|
|
|
|
2024-07-22 23:12:03 +02:00
|
|
|
skipUs.Add(addEntry.HtlcIndex)
|
2024-04-02 05:00:29 +02:00
|
|
|
|
2024-07-20 01:53:58 +02:00
|
|
|
rmvHeight := entry.removeCommitHeights.GetForParty(
|
|
|
|
whoseCommitChain,
|
|
|
|
)
|
2024-07-17 22:09:04 +02:00
|
|
|
if rmvHeight == 0 {
|
|
|
|
processRemoveEntry(
|
2024-07-22 23:34:18 +02:00
|
|
|
entry, ourBalance, theirBalance, lntypes.Local,
|
2024-07-17 22:09:04 +02:00
|
|
|
)
|
|
|
|
}
|
2016-07-22 01:50:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next we take a second pass through all the log entries, skipping any
|
2017-07-30 21:25:41 +02:00
|
|
|
// settled HTLCs, and debiting the chain state balance due to any newly
|
|
|
|
// added HTLCs.
|
2024-07-24 23:57:32 +02:00
|
|
|
for _, entry := range view.Updates.Local {
|
2016-07-22 01:50:20 +02:00
|
|
|
isAdd := entry.EntryType == Add
|
2024-07-22 23:12:03 +02:00
|
|
|
if skipUs.Contains(entry.HtlcIndex) || !isAdd {
|
2016-07-22 01:50:20 +02:00
|
|
|
continue
|
|
|
|
}
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2024-07-17 22:09:04 +02:00
|
|
|
// Skip the entries that have already had their add commit
|
|
|
|
// height set for this commit chain.
|
2024-07-20 01:53:58 +02:00
|
|
|
addHeight := entry.addCommitHeights.GetForParty(
|
|
|
|
whoseCommitChain,
|
|
|
|
)
|
2024-07-17 22:09:04 +02:00
|
|
|
if addHeight == 0 {
|
|
|
|
processAddEntry(
|
2024-07-22 23:34:18 +02:00
|
|
|
entry, ourBalance, theirBalance, lntypes.Local,
|
2024-07-17 22:09:04 +02:00
|
|
|
)
|
|
|
|
}
|
2024-04-02 05:00:29 +02:00
|
|
|
|
2024-07-24 23:57:32 +02:00
|
|
|
newView.Updates.Local = append(newView.Updates.Local, entry)
|
2016-07-22 01:50:20 +02:00
|
|
|
}
|
2024-07-20 01:53:58 +02:00
|
|
|
|
|
|
|
// Again, we do the same for our peer's updates.
|
2024-07-24 23:57:32 +02:00
|
|
|
for _, entry := range view.Updates.Remote {
|
2016-07-22 01:50:20 +02:00
|
|
|
isAdd := entry.EntryType == Add
|
2024-07-22 23:12:03 +02:00
|
|
|
if skipThem.Contains(entry.HtlcIndex) || !isAdd {
|
2016-07-22 01:50:20 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-07-17 22:09:04 +02:00
|
|
|
// Skip the entries that have already had their add commit
|
|
|
|
// height set for this commit chain.
|
2024-07-20 01:53:58 +02:00
|
|
|
addHeight := entry.addCommitHeights.GetForParty(
|
|
|
|
whoseCommitChain,
|
|
|
|
)
|
2024-07-17 22:09:04 +02:00
|
|
|
if addHeight == 0 {
|
|
|
|
processAddEntry(
|
2024-07-22 23:34:18 +02:00
|
|
|
entry, ourBalance, theirBalance, lntypes.Remote,
|
2024-07-17 22:09:04 +02:00
|
|
|
)
|
|
|
|
}
|
2024-04-02 05:00:29 +02:00
|
|
|
|
2024-07-24 23:57:32 +02:00
|
|
|
newView.Updates.Remote = append(newView.Updates.Remote, entry)
|
2016-07-22 01:50:20 +02:00
|
|
|
}
|
|
|
|
|
2024-07-20 01:53:58 +02:00
|
|
|
// Create a function that is capable of identifying whether or not the
|
|
|
|
// paymentDescriptor has been committed in the commitment chain
|
|
|
|
// corresponding to whoseCommitmentChain.
|
|
|
|
isUncommitted := func(update *paymentDescriptor) bool {
|
|
|
|
switch update.EntryType {
|
|
|
|
case Add:
|
|
|
|
return update.addCommitHeights.GetForParty(
|
|
|
|
whoseCommitChain,
|
|
|
|
) == 0
|
|
|
|
|
|
|
|
case FeeUpdate:
|
|
|
|
return update.addCommitHeights.GetForParty(
|
|
|
|
whoseCommitChain,
|
|
|
|
) == 0
|
|
|
|
|
|
|
|
case Settle, Fail, MalformedFail:
|
|
|
|
return update.removeCommitHeights.GetForParty(
|
|
|
|
whoseCommitChain,
|
|
|
|
) == 0
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("invalid paymentDescriptor EntryType")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect all of the updates that haven't had their commit heights set
|
|
|
|
// for the commitment chain corresponding to whoseCommitmentChain.
|
2024-07-24 23:57:32 +02:00
|
|
|
uncommittedUpdates := lntypes.MapDual(
|
|
|
|
view.Updates,
|
|
|
|
func(us []*paymentDescriptor) []*paymentDescriptor {
|
|
|
|
return fn.Filter(isUncommitted, us)
|
|
|
|
},
|
|
|
|
)
|
2024-07-20 01:53:58 +02:00
|
|
|
|
|
|
|
return newView, uncommittedUpdates, nil
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2020-07-09 22:11:59 +02:00
|
|
|
// fetchParent is a helper that looks up update log parent entries in the
|
2020-05-03 14:07:55 +02:00
|
|
|
// appropriate log.
|
2024-06-15 01:30:28 +02:00
|
|
|
func (lc *LightningChannel) fetchParent(entry *paymentDescriptor,
|
2024-07-31 01:44:18 +02:00
|
|
|
whoseCommitChain, whoseUpdateLog lntypes.ChannelParty,
|
2024-06-15 01:30:28 +02:00
|
|
|
) (*paymentDescriptor, error) {
|
2020-05-03 14:07:55 +02:00
|
|
|
|
|
|
|
var (
|
|
|
|
updateLog *updateLog
|
|
|
|
logName string
|
|
|
|
)
|
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseUpdateLog.IsRemote() {
|
2024-08-09 22:00:59 +02:00
|
|
|
updateLog = lc.updateLogs.Remote
|
2020-05-03 14:07:55 +02:00
|
|
|
logName = "remote"
|
|
|
|
} else {
|
2024-08-09 22:00:59 +02:00
|
|
|
updateLog = lc.updateLogs.Local
|
2020-05-03 14:07:55 +02:00
|
|
|
logName = "local"
|
|
|
|
}
|
|
|
|
|
|
|
|
addEntry := updateLog.lookupHtlc(entry.ParentIndex)
|
|
|
|
|
|
|
|
switch {
|
|
|
|
// We check if the parent entry is not found at this point.
|
|
|
|
// This could happen for old versions of lnd, and we return an
|
|
|
|
// error to gracefully shut down the state machine if such an
|
|
|
|
// entry is still in the logs.
|
|
|
|
case addEntry == nil:
|
|
|
|
return nil, fmt.Errorf("unable to find parent entry "+
|
|
|
|
"%d in %v update log: %v\nUpdatelog: %v",
|
|
|
|
entry.ParentIndex, logName,
|
2024-07-25 16:18:00 +02:00
|
|
|
lnutils.SpewLogClosure(entry),
|
|
|
|
lnutils.SpewLogClosure(updateLog))
|
2020-05-03 14:07:55 +02:00
|
|
|
|
|
|
|
// The parent add height should never be zero at this point. If
|
|
|
|
// that's the case we probably forgot to send a new commitment.
|
2024-07-17 00:47:14 +02:00
|
|
|
case addEntry.addCommitHeights.GetForParty(whoseCommitChain) == 0:
|
2020-05-03 14:07:55 +02:00
|
|
|
return nil, fmt.Errorf("parent entry %d for update %d "+
|
2024-07-17 00:47:14 +02:00
|
|
|
"had zero %v add height", entry.ParentIndex,
|
|
|
|
entry.LogIndex, whoseCommitChain)
|
2020-05-03 14:07:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return addEntry, nil
|
|
|
|
}
|
|
|
|
|
2016-07-22 01:50:20 +02:00
|
|
|
// processAddEntry evaluates the effect of an add entry within the HTLC log.
|
2016-07-06 02:01:55 +02:00
|
|
|
// If the HTLC hasn't yet been committed in either chain, then the height it
|
2016-10-26 14:25:42 +02:00
|
|
|
// was committed is updated. Keeping track of this inclusion height allows us to
|
2016-07-06 02:01:55 +02:00
|
|
|
// later compact the log once the change is fully committed in both chains.
|
2024-06-15 01:30:28 +02:00
|
|
|
func processAddEntry(htlc *paymentDescriptor, ourBalance,
|
2024-07-22 23:34:18 +02:00
|
|
|
theirBalance *lnwire.MilliSatoshi, originator lntypes.ChannelParty) {
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2024-07-22 23:34:18 +02:00
|
|
|
if originator == lntypes.Remote {
|
2016-07-06 02:01:55 +02:00
|
|
|
// If this is a new incoming (un-committed) HTLC, then we need
|
|
|
|
// to update their balance accordingly by subtracting the
|
|
|
|
// amount of the HTLC that are funds pending.
|
|
|
|
*theirBalance -= htlc.Amount
|
|
|
|
} else {
|
2016-07-22 01:50:20 +02:00
|
|
|
// Similarly, we need to debit our balance if this is an out
|
2016-07-06 02:01:55 +02:00
|
|
|
// going HTLC to reflect the pending balance.
|
|
|
|
*ourBalance -= htlc.Amount
|
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:53:18 +01:00
|
|
|
// processRemoveEntry processes a log entry which settles or times out a
|
2016-07-06 02:01:55 +02:00
|
|
|
// previously added HTLC. If the removal entry has already been processed, it
|
|
|
|
// is skipped.
|
2024-06-15 01:30:28 +02:00
|
|
|
func processRemoveEntry(htlc *paymentDescriptor, ourBalance,
|
2024-07-22 23:34:18 +02:00
|
|
|
theirBalance *lnwire.MilliSatoshi, originator lntypes.ChannelParty) {
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
switch {
|
|
|
|
// If an incoming HTLC is being settled, then this means that we've
|
2017-01-13 06:01:50 +01:00
|
|
|
// received the preimage either from another subsystem, or the
|
2016-07-06 02:01:55 +02:00
|
|
|
// upstream peer in the route. Therefore, we increase our balance by
|
|
|
|
// the HTLC amount.
|
2024-07-22 23:34:18 +02:00
|
|
|
case originator == lntypes.Remote && htlc.EntryType == Settle:
|
2016-07-06 02:01:55 +02:00
|
|
|
*ourBalance += htlc.Amount
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
|
|
|
|
// Otherwise, this HTLC is being failed out, therefore the value of the
|
2016-07-06 02:01:55 +02:00
|
|
|
// HTLC should return to the remote party.
|
2024-07-22 23:34:18 +02:00
|
|
|
case originator == lntypes.Remote &&
|
|
|
|
(htlc.EntryType == Fail || htlc.EntryType == MalformedFail):
|
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
*theirBalance += htlc.Amount
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// If an outgoing HTLC is being settled, then this means that the
|
|
|
|
// downstream party resented the preimage or learned of it via a
|
|
|
|
// downstream peer. In either case, we credit their settled value with
|
|
|
|
// the value of the HTLC.
|
2024-07-22 23:34:18 +02:00
|
|
|
case originator == lntypes.Local && htlc.EntryType == Settle:
|
2016-07-06 02:01:55 +02:00
|
|
|
*theirBalance += htlc.Amount
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
|
|
|
|
// Otherwise, one of our outgoing HTLC's has timed out, so the value of
|
|
|
|
// the HTLC should be returned to our settled balance.
|
2024-07-22 23:34:18 +02:00
|
|
|
case originator == lntypes.Local &&
|
|
|
|
(htlc.EntryType == Fail || htlc.EntryType == MalformedFail):
|
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
*ourBalance += htlc.Amount
|
|
|
|
}
|
|
|
|
}
|
2016-06-21 07:07:03 +02:00
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
// generateRemoteHtlcSigJobs generates a series of HTLC signature jobs for the
|
|
|
|
// sig pool, along with a channel that if closed, will cancel any jobs after
|
|
|
|
// they have been submitted to the sigPool. This method is to be used when
|
|
|
|
// generating a new commitment for the remote party. The jobs generated by the
|
|
|
|
// signature can be submitted to the sigPool to generate all the signatures
|
|
|
|
// asynchronously and in parallel.
|
2018-01-18 22:45:30 +01:00
|
|
|
func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
2024-04-25 19:00:42 +02:00
|
|
|
chanState *channeldb.OpenChannel, leaseExpiry uint32,
|
|
|
|
remoteCommitView *commitment,
|
2024-04-09 04:48:36 +02:00
|
|
|
leafStore fn.Option[AuxLeafStore]) ([]SignJob, []AuxSigJob,
|
|
|
|
chan struct{}, error) {
|
2024-04-25 19:00:42 +02:00
|
|
|
|
|
|
|
var (
|
|
|
|
isRemoteInitiator = !chanState.IsInitiator
|
|
|
|
localChanCfg = chanState.LocalChanCfg
|
|
|
|
remoteChanCfg = chanState.RemoteChanCfg
|
|
|
|
chanType = chanState.ChanType
|
|
|
|
)
|
2017-07-30 22:00:24 +02:00
|
|
|
|
|
|
|
txHash := remoteCommitView.txn.TxHash()
|
2018-03-22 13:04:57 +01:00
|
|
|
dustLimit := remoteChanCfg.DustLimit
|
2017-07-30 22:00:24 +02:00
|
|
|
feePerKw := remoteCommitView.feePerKw
|
2020-03-06 16:11:47 +01:00
|
|
|
sigHashType := HtlcSigHashType(chanType)
|
2017-07-30 22:00:24 +02:00
|
|
|
|
|
|
|
// With the keys generated, we'll make a slice with enough capacity to
|
2018-04-18 04:02:04 +02:00
|
|
|
// hold potentially all the HTLCs. The actual slice may be a bit
|
|
|
|
// smaller (than its total capacity) and some HTLCs may be dust.
|
2024-04-25 19:01:37 +02:00
|
|
|
numSigs := len(remoteCommitView.incomingHTLCs) +
|
|
|
|
len(remoteCommitView.outgoingHTLCs)
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
sigBatch := make([]SignJob, 0, numSigs)
|
2024-04-09 04:48:36 +02:00
|
|
|
auxSigBatch := make([]AuxSigJob, 0, numSigs)
|
2017-07-30 22:00:24 +02:00
|
|
|
|
|
|
|
var err error
|
|
|
|
cancelChan := make(chan struct{})
|
|
|
|
|
2024-09-02 11:01:07 +02:00
|
|
|
diskCommit := remoteCommitView.toDiskCommit(lntypes.Remote)
|
2024-04-25 19:01:37 +02:00
|
|
|
auxResult, err := fn.MapOptionZ(
|
|
|
|
leafStore, func(s AuxLeafStore) fn.Result[CommitDiffAuxResult] {
|
|
|
|
return s.FetchLeavesFromCommit(
|
2024-09-02 11:01:07 +02:00
|
|
|
NewAuxChanState(chanState), *diskCommit,
|
2024-04-25 19:01:37 +02:00
|
|
|
*keyRing,
|
|
|
|
)
|
|
|
|
},
|
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
2024-04-09 04:48:36 +02:00
|
|
|
return nil, nil, nil, fmt.Errorf("unable to fetch aux leaves: "+
|
|
|
|
"%w", err)
|
2024-04-25 19:01:37 +02:00
|
|
|
}
|
|
|
|
|
2018-04-18 04:02:04 +02:00
|
|
|
// For each outgoing and incoming HTLC, if the HTLC isn't considered a
|
2017-07-30 22:00:24 +02:00
|
|
|
// dust output after taking into account second-level HTLC fees, then a
|
|
|
|
// sigJob will be generated and appended to the current batch.
|
|
|
|
for _, htlc := range remoteCommitView.incomingHTLCs {
|
2021-09-28 17:34:10 +02:00
|
|
|
if HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, true, lntypes.Remote, feePerKw,
|
2020-03-06 16:11:49 +01:00
|
|
|
htlc.Amount.ToSatoshis(), dustLimit,
|
|
|
|
) {
|
2022-02-07 13:58:28 +01:00
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the HTLC isn't dust, then we'll create an empty sign job
|
|
|
|
// to add to the batch momentarily.
|
2023-01-20 04:21:49 +01:00
|
|
|
var sigJob SignJob
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
sigJob.Cancel = cancelChan
|
|
|
|
sigJob.Resp = make(chan SignJobResp, 1)
|
2017-07-30 22:00:24 +02:00
|
|
|
|
|
|
|
// As this is an incoming HTLC and we're sinning the commitment
|
|
|
|
// transaction of the remote node, we'll need to generate an
|
|
|
|
// HTLC timeout transaction for them. The output of the timeout
|
|
|
|
// transaction needs to account for fees, so we'll compute the
|
|
|
|
// required fee and output now.
|
2020-03-06 16:11:49 +01:00
|
|
|
htlcFee := HtlcTimeoutFee(chanType, feePerKw)
|
2017-08-22 08:20:29 +02:00
|
|
|
outputAmt := htlc.Amount.ToSatoshis() - htlcFee
|
2017-07-30 22:00:24 +02:00
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
leaves := l.IncomingHtlcLeaves
|
|
|
|
return leaves[htlc.HtlcIndex].SecondLevelLeaf
|
|
|
|
},
|
|
|
|
)(auxResult.AuxLeaves)
|
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
// With the fee calculate, we can properly create the HTLC
|
|
|
|
// timeout transaction using the HTLC amount minus the fee.
|
|
|
|
op := wire.OutPoint{
|
|
|
|
Hash: txHash,
|
|
|
|
Index: uint32(htlc.remoteOutputIndex),
|
|
|
|
}
|
2020-11-17 12:50:41 +01:00
|
|
|
sigJob.Tx, err = CreateHtlcTimeoutTx(
|
2021-07-15 02:16:13 +02:00
|
|
|
chanType, isRemoteInitiator, op, outputAmt,
|
|
|
|
htlc.Timeout, uint32(remoteChanCfg.CsvDelay),
|
|
|
|
leaseExpiry, keyRing.RevocationKey, keyRing.ToLocalKey,
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf,
|
2018-04-05 02:41:05 +02:00
|
|
|
)
|
2017-07-30 22:00:24 +02:00
|
|
|
if err != nil {
|
2024-04-09 04:48:36 +02:00
|
|
|
return nil, nil, nil, err
|
2017-07-30 22:00:24 +02:00
|
|
|
}
|
|
|
|
|
2023-01-20 04:21:49 +01:00
|
|
|
// Construct a full hash cache as we may be signing a segwit v1
|
|
|
|
// sighash.
|
|
|
|
txOut := remoteCommitView.txn.TxOut[htlc.remoteOutputIndex]
|
|
|
|
prevFetcher := txscript.NewCannedPrevOutputFetcher(
|
|
|
|
txOut.PkScript, int64(htlc.Amount.ToSatoshis()),
|
|
|
|
)
|
|
|
|
hashCache := txscript.NewTxSigHashes(sigJob.Tx, prevFetcher)
|
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
// Finally, we'll generate a sign descriptor to generate a
|
|
|
|
// signature to give to the remote party for this commitment
|
|
|
|
// transaction. Note we use the raw HTLC amount.
|
2019-01-16 15:47:43 +01:00
|
|
|
sigJob.SignDesc = input.SignDescriptor{
|
2023-01-20 04:21:49 +01:00
|
|
|
KeyDesc: localChanCfg.HtlcBasePoint,
|
|
|
|
SingleTweak: keyRing.LocalHtlcKeyTweak,
|
|
|
|
WitnessScript: htlc.theirWitnessScript,
|
|
|
|
Output: txOut,
|
|
|
|
PrevOutputFetcher: prevFetcher,
|
|
|
|
HashType: sigHashType,
|
|
|
|
SigHashes: hashCache,
|
|
|
|
InputIndex: 0,
|
2017-07-30 22:00:24 +02:00
|
|
|
}
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
sigJob.OutputIndex = htlc.remoteOutputIndex
|
2017-07-30 22:00:24 +02:00
|
|
|
|
2023-01-20 04:21:49 +01:00
|
|
|
// If this is a taproot channel, then we'll need to set the
|
|
|
|
// method type to ensure we generate a valid signature.
|
|
|
|
if chanType.IsTaproot() {
|
2024-04-25 19:01:37 +02:00
|
|
|
//nolint:lll
|
|
|
|
sigJob.SignDesc.SignMethod = input.TaprootScriptSpendSignMethod
|
2023-01-20 04:21:49 +01:00
|
|
|
}
|
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
sigBatch = append(sigBatch, sigJob)
|
2024-04-09 04:48:36 +02:00
|
|
|
|
|
|
|
auxSigBatch = append(auxSigBatch, NewAuxSigJob(
|
|
|
|
sigJob, *keyRing, true, newAuxHtlcDescriptor(&htlc),
|
|
|
|
remoteCommitView.customBlob, auxLeaf, cancelChan,
|
|
|
|
))
|
2017-07-30 22:00:24 +02:00
|
|
|
}
|
|
|
|
for _, htlc := range remoteCommitView.outgoingHTLCs {
|
2021-09-28 17:34:10 +02:00
|
|
|
if HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, false, lntypes.Remote, feePerKw,
|
2020-03-06 16:11:49 +01:00
|
|
|
htlc.Amount.ToSatoshis(), dustLimit,
|
|
|
|
) {
|
2022-02-07 13:58:28 +01:00
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
sigJob := SignJob{}
|
|
|
|
sigJob.Cancel = cancelChan
|
|
|
|
sigJob.Resp = make(chan SignJobResp, 1)
|
2017-07-30 22:00:24 +02:00
|
|
|
|
|
|
|
// As this is an outgoing HTLC and we're signing the commitment
|
|
|
|
// transaction of the remote node, we'll need to generate an
|
|
|
|
// HTLC success transaction for them. The output of the timeout
|
|
|
|
// transaction needs to account for fees, so we'll compute the
|
|
|
|
// required fee and output now.
|
2020-03-06 16:11:49 +01:00
|
|
|
htlcFee := HtlcSuccessFee(chanType, feePerKw)
|
2017-08-22 08:20:29 +02:00
|
|
|
outputAmt := htlc.Amount.ToSatoshis() - htlcFee
|
2017-07-30 22:00:24 +02:00
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
leaves := l.OutgoingHtlcLeaves
|
|
|
|
return leaves[htlc.HtlcIndex].SecondLevelLeaf
|
|
|
|
},
|
|
|
|
)(auxResult.AuxLeaves)
|
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
// With the proper output amount calculated, we can now
|
|
|
|
// generate the success transaction using the remote party's
|
|
|
|
// CSV delay.
|
|
|
|
op := wire.OutPoint{
|
|
|
|
Hash: txHash,
|
|
|
|
Index: uint32(htlc.remoteOutputIndex),
|
|
|
|
}
|
2020-11-17 12:50:41 +01:00
|
|
|
sigJob.Tx, err = CreateHtlcSuccessTx(
|
2021-07-15 02:16:13 +02:00
|
|
|
chanType, isRemoteInitiator, op, outputAmt,
|
|
|
|
uint32(remoteChanCfg.CsvDelay), leaseExpiry,
|
2020-01-06 11:42:04 +01:00
|
|
|
keyRing.RevocationKey, keyRing.ToLocalKey,
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf,
|
2018-04-05 02:41:05 +02:00
|
|
|
)
|
2017-07-30 22:00:24 +02:00
|
|
|
if err != nil {
|
2024-04-09 04:48:36 +02:00
|
|
|
return nil, nil, nil, err
|
2017-07-30 22:00:24 +02:00
|
|
|
}
|
|
|
|
|
2023-01-20 04:21:49 +01:00
|
|
|
// Construct a full hash cache as we may be signing a segwit v1
|
|
|
|
// sighash.
|
|
|
|
txOut := remoteCommitView.txn.TxOut[htlc.remoteOutputIndex]
|
|
|
|
prevFetcher := txscript.NewCannedPrevOutputFetcher(
|
|
|
|
txOut.PkScript, int64(htlc.Amount.ToSatoshis()),
|
|
|
|
)
|
|
|
|
hashCache := txscript.NewTxSigHashes(sigJob.Tx, prevFetcher)
|
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
// Finally, we'll generate a sign descriptor to generate a
|
|
|
|
// signature to give to the remote party for this commitment
|
|
|
|
// transaction. Note we use the raw HTLC amount.
|
2019-01-16 15:47:43 +01:00
|
|
|
sigJob.SignDesc = input.SignDescriptor{
|
2023-01-20 04:21:49 +01:00
|
|
|
KeyDesc: localChanCfg.HtlcBasePoint,
|
|
|
|
SingleTweak: keyRing.LocalHtlcKeyTweak,
|
|
|
|
WitnessScript: htlc.theirWitnessScript,
|
|
|
|
Output: txOut,
|
|
|
|
PrevOutputFetcher: prevFetcher,
|
|
|
|
HashType: sigHashType,
|
|
|
|
SigHashes: hashCache,
|
|
|
|
InputIndex: 0,
|
2017-07-30 22:00:24 +02:00
|
|
|
}
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
sigJob.OutputIndex = htlc.remoteOutputIndex
|
2017-07-30 22:00:24 +02:00
|
|
|
|
2023-01-20 04:21:49 +01:00
|
|
|
// If this is a taproot channel, then we'll need to set the
|
|
|
|
// method type to ensure we generate a valid signature.
|
|
|
|
if chanType.IsTaproot() {
|
2024-04-09 04:48:36 +02:00
|
|
|
//nolint:lll
|
|
|
|
sigJob.SignDesc.SignMethod = input.TaprootScriptSpendSignMethod
|
2023-01-20 04:21:49 +01:00
|
|
|
}
|
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
sigBatch = append(sigBatch, sigJob)
|
2024-04-09 04:48:36 +02:00
|
|
|
|
|
|
|
auxSigBatch = append(auxSigBatch, NewAuxSigJob(
|
|
|
|
sigJob, *keyRing, false, newAuxHtlcDescriptor(&htlc),
|
|
|
|
remoteCommitView.customBlob, auxLeaf, cancelChan,
|
|
|
|
))
|
2017-07-30 22:00:24 +02:00
|
|
|
}
|
|
|
|
|
2024-04-09 04:48:36 +02:00
|
|
|
return sigBatch, auxSigBatch, cancelChan, nil
|
2017-07-30 22:00:24 +02:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:55:38 +01:00
|
|
|
// createCommitDiff will create a commit diff given a new pending commitment
|
|
|
|
// for the remote party and the necessary signatures for the remote party to
|
|
|
|
// validate this new state. This function is called right before sending the
|
|
|
|
// new commitment to the remote party. The commit diff returned contains all
|
|
|
|
// information necessary for retransmission.
|
2024-04-25 19:00:42 +02:00
|
|
|
func (lc *LightningChannel) createCommitDiff(newCommit *commitment,
|
2024-04-09 04:48:36 +02:00
|
|
|
commitSig lnwire.Sig, htlcSigs []lnwire.Sig,
|
|
|
|
auxSigs []fn.Option[tlv.Blob]) (*channeldb.CommitDiff, error) {
|
2017-11-10 07:55:38 +01:00
|
|
|
|
2018-02-28 05:01:41 +01:00
|
|
|
var (
|
2019-01-10 12:23:57 +01:00
|
|
|
logUpdates []channeldb.LogUpdate
|
2018-02-28 05:01:41 +01:00
|
|
|
ackAddRefs []channeldb.AddRef
|
|
|
|
settleFailRefs []channeldb.SettleFailRef
|
2022-11-18 12:15:22 +01:00
|
|
|
openCircuitKeys []models.CircuitKey
|
|
|
|
closedCircuitKeys []models.CircuitKey
|
2018-02-28 05:01:41 +01:00
|
|
|
)
|
|
|
|
|
2017-11-10 07:55:38 +01:00
|
|
|
// We'll now run through our local update log to locate the items which
|
|
|
|
// were only just committed within this pending state. This will be the
|
|
|
|
// set of items we need to retransmit if we reconnect and find that
|
|
|
|
// they didn't process this new state fully.
|
2024-08-09 22:00:59 +02:00
|
|
|
for e := lc.updateLogs.Local.Front(); e != nil; e = e.Next() {
|
2024-05-01 00:23:50 +02:00
|
|
|
pd := e.Value
|
2017-08-14 11:14:04 +02:00
|
|
|
|
2017-11-10 07:55:38 +01:00
|
|
|
// If this entry wasn't committed at the exact height of this
|
|
|
|
// remote commitment, then we'll skip it as it was already
|
|
|
|
// lingering in the log.
|
2024-07-17 00:35:58 +02:00
|
|
|
if pd.addCommitHeights.Remote != newCommit.height &&
|
|
|
|
pd.removeCommitHeights.Remote != newCommit.height {
|
2017-11-10 07:55:38 +01:00
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
// We'll map the type of the paymentDescriptor to one of the
|
2017-11-10 07:55:38 +01:00
|
|
|
// four messages that it corresponds to. With this set of
|
|
|
|
// messages obtained, we can simply read from disk and re-send
|
|
|
|
// them in the case of a needed channel sync.
|
|
|
|
switch pd.EntryType {
|
|
|
|
case Add:
|
2018-02-28 05:01:41 +01:00
|
|
|
// Gather any references for circuits opened by this Add
|
|
|
|
// HTLC.
|
2018-02-28 05:04:41 +01:00
|
|
|
if pd.OpenCircuitKey != nil {
|
2024-06-15 02:38:52 +02:00
|
|
|
openCircuitKeys = append(
|
|
|
|
openCircuitKeys, *pd.OpenCircuitKey,
|
|
|
|
)
|
2018-02-28 05:01:41 +01:00
|
|
|
}
|
|
|
|
|
2024-06-15 02:38:52 +02:00
|
|
|
case Settle, Fail, MalformedFail:
|
|
|
|
// Gather the fwd pkg references from any settle or fail
|
|
|
|
// packets, if they exist.
|
|
|
|
if pd.SourceRef != nil {
|
|
|
|
ackAddRefs = append(ackAddRefs, *pd.SourceRef)
|
2017-11-10 07:55:38 +01:00
|
|
|
}
|
2024-06-15 02:38:52 +02:00
|
|
|
if pd.DestRef != nil {
|
|
|
|
settleFailRefs = append(
|
|
|
|
settleFailRefs, *pd.DestRef,
|
|
|
|
)
|
2017-08-14 11:14:04 +02:00
|
|
|
}
|
2024-06-15 02:38:52 +02:00
|
|
|
if pd.ClosedCircuitKey != nil {
|
|
|
|
closedCircuitKeys = append(
|
|
|
|
closedCircuitKeys, *pd.ClosedCircuitKey,
|
|
|
|
)
|
2017-11-10 07:55:38 +01:00
|
|
|
}
|
2019-01-10 12:23:56 +01:00
|
|
|
|
|
|
|
case FeeUpdate:
|
2024-06-15 02:38:52 +02:00
|
|
|
// Nothing special to do.
|
2017-08-14 11:14:04 +02:00
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
logUpdates = append(logUpdates, pd.toLogUpdate())
|
2017-08-14 11:14:04 +02:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:55:38 +01:00
|
|
|
// With the set of log updates mapped into wire messages, we'll now
|
|
|
|
// convert the in-memory commit into a format suitable for writing to
|
|
|
|
// disk.
|
2024-07-31 01:44:18 +02:00
|
|
|
diskCommit := newCommit.toDiskCommit(lntypes.Remote)
|
2017-11-10 07:55:38 +01:00
|
|
|
|
2024-04-09 04:48:36 +02:00
|
|
|
// We prepare the commit sig message to be sent to the remote party.
|
|
|
|
commitSigMsg := &lnwire.CommitSig{
|
|
|
|
ChanID: lnwire.NewChanIDFromOutPoint(
|
|
|
|
lc.channelState.FundingOutpoint,
|
|
|
|
),
|
|
|
|
CommitSig: commitSig,
|
|
|
|
HtlcSigs: htlcSigs,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encode and check the size of the custom records now.
|
|
|
|
auxCustomRecords, err := fn.MapOptionZ(
|
|
|
|
lc.auxSigner,
|
|
|
|
func(s AuxSigner) fn.Result[lnwire.CustomRecords] {
|
|
|
|
blobOption, err := s.PackSigs(auxSigs).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return fn.Err[lnwire.CustomRecords](err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We now serialize the commit sig message without the
|
|
|
|
// custom records to make sure we have space for them.
|
|
|
|
var buf bytes.Buffer
|
|
|
|
err = commitSigMsg.Encode(&buf, 0)
|
|
|
|
if err != nil {
|
|
|
|
return fn.Err[lnwire.CustomRecords](err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The number of available bytes is the max message size
|
|
|
|
// minus the size of the message without the custom
|
|
|
|
// records. We also subtract 8 bytes for encoding
|
|
|
|
// overhead of the custom records (just some safety
|
|
|
|
// padding).
|
|
|
|
available := lnwire.MaxMsgBody - buf.Len() - 8
|
|
|
|
|
|
|
|
blob := blobOption.UnwrapOr(nil)
|
|
|
|
if len(blob) > available {
|
|
|
|
err = fmt.Errorf("aux sigs size %d exceeds "+
|
|
|
|
"max allowed size of %d", len(blob),
|
|
|
|
available)
|
|
|
|
|
|
|
|
return fn.Err[lnwire.CustomRecords](err)
|
|
|
|
}
|
|
|
|
|
|
|
|
records, err := lnwire.ParseCustomRecords(blob)
|
|
|
|
if err != nil {
|
|
|
|
return fn.Err[lnwire.CustomRecords](err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fn.Ok(records)
|
2017-11-10 07:55:38 +01:00
|
|
|
},
|
2024-04-09 04:48:36 +02:00
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error packing aux sigs: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
commitSigMsg.CustomRecords = auxCustomRecords
|
|
|
|
|
|
|
|
return &channeldb.CommitDiff{
|
|
|
|
Commitment: *diskCommit,
|
|
|
|
CommitSig: commitSigMsg,
|
2018-02-28 05:01:41 +01:00
|
|
|
LogUpdates: logUpdates,
|
|
|
|
OpenedCircuitKeys: openCircuitKeys,
|
|
|
|
ClosedCircuitKeys: closedCircuitKeys,
|
|
|
|
AddAcks: ackAddRefs,
|
|
|
|
SettleFailAcks: settleFailRefs,
|
2024-04-09 04:48:36 +02:00
|
|
|
}, nil
|
2017-08-14 11:14:04 +02:00
|
|
|
}
|
|
|
|
|
2020-01-03 15:53:51 +01:00
|
|
|
// getUnsignedAckedUpdates returns all remote log updates that we haven't
|
|
|
|
// signed for yet ourselves.
|
|
|
|
func (lc *LightningChannel) getUnsignedAckedUpdates() []channeldb.LogUpdate {
|
|
|
|
// Fetch the last remote update that we have signed for.
|
2024-08-09 23:52:21 +02:00
|
|
|
lastRemoteCommitted :=
|
|
|
|
lc.commitChains.Remote.tail().messageIndices.Remote
|
2020-01-03 15:53:51 +01:00
|
|
|
|
|
|
|
// Fetch the last remote update that we have acked.
|
2024-08-09 23:52:21 +02:00
|
|
|
lastLocalCommitted :=
|
|
|
|
lc.commitChains.Local.tail().messageIndices.Remote
|
2020-01-03 15:53:51 +01:00
|
|
|
|
|
|
|
// We'll now run through the remote update log to locate the items that
|
|
|
|
// we haven't signed for yet. This will be the set of items we need to
|
|
|
|
// restore if we reconnect in order to produce the signature that the
|
|
|
|
// remote party expects.
|
|
|
|
var logUpdates []channeldb.LogUpdate
|
2024-08-09 22:00:59 +02:00
|
|
|
for e := lc.updateLogs.Remote.Front(); e != nil; e = e.Next() {
|
2024-05-01 00:23:50 +02:00
|
|
|
pd := e.Value
|
2020-01-03 15:53:51 +01:00
|
|
|
|
|
|
|
// Skip all remote updates that we have already included in our
|
|
|
|
// commit chain.
|
|
|
|
if pd.LogIndex < lastRemoteCommitted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip all remote updates that we haven't acked yet. At the
|
|
|
|
// moment this function is called, there shouldn't be any, but
|
|
|
|
// we check it anyway to make this function more generally
|
|
|
|
// usable.
|
|
|
|
if pd.LogIndex >= lastLocalCommitted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
logUpdates = append(logUpdates, pd.toLogUpdate())
|
2020-01-03 15:53:51 +01:00
|
|
|
}
|
2024-06-15 02:38:52 +02:00
|
|
|
|
2020-01-03 15:53:51 +01:00
|
|
|
return logUpdates
|
|
|
|
}
|
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
// CalcFeeBuffer calculates a FeeBuffer in accordance with the recommended
|
|
|
|
// amount specified in BOLT 02. It accounts for two times the current fee rate
|
|
|
|
// plus an additional htlc at this higher fee rate which allows our peer to add
|
|
|
|
// an htlc even if our channel is drained locally.
|
|
|
|
// See: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
|
|
|
|
func CalcFeeBuffer(feePerKw chainfee.SatPerKWeight,
|
2024-05-24 15:56:30 +02:00
|
|
|
commitWeight lntypes.WeightUnit) lnwire.MilliSatoshi {
|
2023-11-05 11:29:34 +01:00
|
|
|
|
|
|
|
// Account for a 100% in fee rate increase.
|
|
|
|
bufferFeePerKw := 2 * feePerKw
|
|
|
|
|
|
|
|
feeBuffer := lnwire.NewMSatFromSatoshis(
|
|
|
|
// Account for an additional htlc at the higher fee level.
|
|
|
|
bufferFeePerKw.FeeForWeight(commitWeight + input.HTLCWeight),
|
|
|
|
)
|
|
|
|
|
|
|
|
return feeBuffer
|
|
|
|
}
|
|
|
|
|
|
|
|
// BufferType is used to determine what kind of additional buffer should be left
|
|
|
|
// when evaluating the usable balance of a channel.
|
|
|
|
type BufferType uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
// NoBuffer means no additional buffer is accounted for. This is
|
|
|
|
// important when verifying an already locked-in commitment state.
|
|
|
|
NoBuffer BufferType = iota
|
|
|
|
|
|
|
|
// FeeBuffer accounts for several edge cases. One of them is where
|
|
|
|
// a locally drained channel might become unusable due to the non-opener
|
|
|
|
// of the channel not being able to add a non-dust htlc to the channel
|
|
|
|
// state because we as a channel opener cannot pay the additional fees
|
|
|
|
// an htlc would require on the commitment tx.
|
|
|
|
// See: https://github.com/lightningnetwork/lightning-rfc/issues/728
|
|
|
|
//
|
|
|
|
// Moreover it mitigates the situation where htlcs are added
|
|
|
|
// simultaneously to the commitment transaction. This cannot be avoided
|
|
|
|
// until the feature __option_simplified_update__ is available in the
|
|
|
|
// protocol and deployed widely in the network.
|
|
|
|
// More information about the issue and the simplified commitment flow
|
|
|
|
// can be found here:
|
|
|
|
// https://github.com/lightningnetwork/lnd/issues/7657
|
|
|
|
// https://github.com/lightning/bolts/pull/867
|
|
|
|
//
|
|
|
|
// The last advantage is that we can react to fee spikes (up or down)
|
|
|
|
// by accounting for at least twice the size of the current fee rate
|
|
|
|
// (BOLT02). It also accounts for decreases in the fee rate because
|
|
|
|
// former dust htlcs might now become normal outputs so the overall
|
|
|
|
// fee might increase although the fee rate decreases (this is only true
|
|
|
|
// for non-anchor channels because htlcs have to account for their
|
|
|
|
// fee of the second-level covenant transactions).
|
|
|
|
FeeBuffer
|
|
|
|
|
|
|
|
// AdditionalHtlc just accounts for an additional htlc which is helpful
|
|
|
|
// when deciding about a fee update of the commitment transaction.
|
|
|
|
// Leaving always room for an additional htlc makes sure that even
|
|
|
|
// though we are the opener of a channel a new fee update will always
|
|
|
|
// allow an htlc from our peer to be added to the commitment tx.
|
|
|
|
AdditionalHtlc
|
|
|
|
)
|
|
|
|
|
|
|
|
// String returns a human readable name for the buffer type.
|
|
|
|
func (b BufferType) String() string {
|
|
|
|
switch b {
|
|
|
|
case NoBuffer:
|
|
|
|
return "nobuffer"
|
|
|
|
case FeeBuffer:
|
|
|
|
return "feebuffer"
|
|
|
|
case AdditionalHtlc:
|
|
|
|
return "additionalhtlc"
|
|
|
|
default:
|
|
|
|
return "unknown"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// applyCommitFee applies the commitFee including a buffer to the balance amount
|
|
|
|
// and verifies that it does not become negative. This function returns the new
|
|
|
|
// balance and the exact buffer amount (excluding the commitment fee).
|
|
|
|
func (lc *LightningChannel) applyCommitFee(
|
2024-05-24 15:56:30 +02:00
|
|
|
balance lnwire.MilliSatoshi, commitWeight lntypes.WeightUnit,
|
2023-11-05 11:29:34 +01:00
|
|
|
feePerKw chainfee.SatPerKWeight,
|
|
|
|
buffer BufferType) (lnwire.MilliSatoshi, lnwire.MilliSatoshi, error) {
|
|
|
|
|
|
|
|
commitFee := feePerKw.FeeForWeight(commitWeight)
|
|
|
|
commitFeeMsat := lnwire.NewMSatFromSatoshis(commitFee)
|
|
|
|
|
|
|
|
var bufferAmt lnwire.MilliSatoshi
|
|
|
|
switch buffer {
|
|
|
|
// The FeeBuffer is subtracted from the balance. It is of predefined
|
|
|
|
// size add keeps room for an up to 2x increase in fees of the
|
|
|
|
// commitment tx and an additional htlc at this fee level reserved for
|
|
|
|
// the peer.
|
|
|
|
case FeeBuffer:
|
|
|
|
// Make sure that we are the initiator of the channel before we
|
|
|
|
// apply the FeeBuffer.
|
|
|
|
if !lc.channelState.IsInitiator {
|
|
|
|
return 0, 0, ErrFeeBufferNotInitiator
|
|
|
|
}
|
|
|
|
|
|
|
|
// The FeeBuffer already includes the commitFee.
|
|
|
|
bufferAmt = CalcFeeBuffer(feePerKw, commitWeight)
|
|
|
|
if bufferAmt < balance {
|
|
|
|
newBalance := balance - bufferAmt
|
|
|
|
return newBalance, bufferAmt - commitFeeMsat, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The AdditionalHtlc buffer type does NOT keep a FeeBuffer but solely
|
|
|
|
// keeps space for an additional htlc on the commitment tx which our
|
|
|
|
// peer can add.
|
|
|
|
case AdditionalHtlc:
|
|
|
|
additionalHtlcFee := lnwire.NewMSatFromSatoshis(
|
|
|
|
feePerKw.FeeForWeight(input.HTLCWeight),
|
|
|
|
)
|
|
|
|
|
|
|
|
bufferAmt = commitFeeMsat + additionalHtlcFee
|
|
|
|
newBalance := balance - bufferAmt
|
|
|
|
if bufferAmt < balance {
|
|
|
|
return newBalance, additionalHtlcFee, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The default case does not account for any buffer on the local balance
|
|
|
|
// but just subtracts the commit fee.
|
|
|
|
default:
|
|
|
|
if commitFeeMsat < balance {
|
|
|
|
newBalance := balance - commitFeeMsat
|
|
|
|
return newBalance, 0, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We still return the amount and bufferAmt here to log them at a later
|
|
|
|
// stage.
|
|
|
|
return balance, bufferAmt, ErrBelowChanReserve
|
|
|
|
}
|
|
|
|
|
2019-08-01 05:10:45 +02:00
|
|
|
// validateCommitmentSanity is used to validate the current state of the
|
|
|
|
// commitment transaction in terms of the ChannelConstraints that we and our
|
2020-02-19 12:27:41 +01:00
|
|
|
// remote peer agreed upon during the funding workflow. The
|
|
|
|
// predict[Our|Their]Add should parameters should be set to a valid
|
2024-06-15 01:30:28 +02:00
|
|
|
// paymentDescriptor if we are validating in the state when adding a new HTLC,
|
2020-02-19 12:27:41 +01:00
|
|
|
// or nil otherwise.
|
2019-08-01 05:10:45 +02:00
|
|
|
func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
|
2024-07-31 01:44:18 +02:00
|
|
|
ourLogCounter uint64, whoseCommitChain lntypes.ChannelParty,
|
2024-06-15 01:30:28 +02:00
|
|
|
buffer BufferType, predictOurAdd, predictTheirAdd *paymentDescriptor,
|
2024-07-31 01:44:18 +02:00
|
|
|
) error {
|
2019-08-01 05:10:45 +02:00
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
// First fetch the initial balance before applying any updates.
|
2024-08-09 21:47:58 +02:00
|
|
|
commitChain := lc.commitChains.Local
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommitChain.IsRemote() {
|
2024-08-09 21:47:58 +02:00
|
|
|
commitChain = lc.commitChains.Remote
|
2023-11-05 11:29:34 +01:00
|
|
|
}
|
|
|
|
ourInitialBalance := commitChain.tip().ourBalance
|
|
|
|
theirInitialBalance := commitChain.tip().theirBalance
|
|
|
|
|
2019-08-01 05:10:45 +02:00
|
|
|
// Fetch all updates not committed.
|
|
|
|
view := lc.fetchHTLCView(theirLogCounter, ourLogCounter)
|
|
|
|
|
|
|
|
// If we are checking if we can add a new HTLC, we add this to the
|
2020-02-15 15:45:25 +01:00
|
|
|
// appropriate update log, in order to validate the sanity of the
|
|
|
|
// commitment resulting from _actually adding_ this HTLC to the state.
|
2020-02-19 12:27:41 +01:00
|
|
|
if predictOurAdd != nil {
|
2024-07-24 23:57:32 +02:00
|
|
|
view.Updates.Local = append(view.Updates.Local, predictOurAdd)
|
2020-02-19 12:27:41 +01:00
|
|
|
}
|
|
|
|
if predictTheirAdd != nil {
|
2024-07-24 23:57:32 +02:00
|
|
|
view.Updates.Remote = append(
|
|
|
|
view.Updates.Remote, predictTheirAdd,
|
|
|
|
)
|
2019-08-01 05:10:45 +02:00
|
|
|
}
|
|
|
|
|
2020-02-12 11:10:19 +01:00
|
|
|
ourBalance, theirBalance, commitWeight, filteredView, err := lc.computeView(
|
2024-07-31 01:44:18 +02:00
|
|
|
view, whoseCommitChain, false,
|
|
|
|
fn.None[chainfee.SatPerKWeight](),
|
2019-08-01 05:10:45 +02:00
|
|
|
)
|
2020-02-12 11:10:19 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-10-14 09:32:58 +02:00
|
|
|
|
2024-04-02 05:00:29 +02:00
|
|
|
feePerKw := filteredView.FeePerKw
|
2019-08-01 05:10:45 +02:00
|
|
|
|
|
|
|
// Ensure that the fee being applied is enough to be relayed across the
|
|
|
|
// network in a reasonable time frame.
|
2019-10-31 03:43:05 +01:00
|
|
|
if feePerKw < chainfee.FeePerKwFloor {
|
2019-08-01 05:10:45 +02:00
|
|
|
return fmt.Errorf("commitment fee per kw %v below fee floor %v",
|
2019-10-31 03:43:05 +01:00
|
|
|
feePerKw, chainfee.FeePerKwFloor)
|
2019-08-01 05:10:45 +02:00
|
|
|
}
|
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
// The channel opener has to account for the commitment fee. This
|
|
|
|
// includes also a buffer type. Depending on whether we are the opener
|
|
|
|
// of the channel we either want to enforce a buffer on the local
|
|
|
|
// amount.
|
|
|
|
var bufferAmt lnwire.MilliSatoshi
|
|
|
|
if lc.channelState.IsInitiator {
|
|
|
|
ourBalance, bufferAmt, err = lc.applyCommitFee(
|
|
|
|
ourBalance, commitWeight, feePerKw, buffer)
|
|
|
|
if err != nil {
|
|
|
|
commitFee := feePerKw.FeeForWeight(commitWeight)
|
|
|
|
lc.log.Errorf("Cannot pay for the CommitmentFee of "+
|
|
|
|
"the ChannelState: ourBalance is negative "+
|
|
|
|
"after applying the fee: ourBalance=%v, "+
|
|
|
|
"commitFee=%v, feeBuffer=%v (type=%v) "+
|
|
|
|
"local_chan_initiator", int64(ourBalance),
|
|
|
|
commitFee, bufferAmt, buffer)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// No FeeBuffer is enforced when we are not the initiator of
|
|
|
|
// the channel. We cannot do this, because if our peer does not
|
|
|
|
// enforce the FeeBuffer (older LND software) the peer might
|
|
|
|
// bring his balance below the FeeBuffer making the channel
|
|
|
|
// stuck because locally we will never put another outgoing HTLC
|
|
|
|
// on the channel state. The FeeBuffer should ONLY be enforced
|
|
|
|
// if we locally pay for the commitment transaction.
|
|
|
|
theirBalance, bufferAmt, err = lc.applyCommitFee(
|
|
|
|
theirBalance, commitWeight, feePerKw, NoBuffer)
|
|
|
|
if err != nil {
|
|
|
|
commitFee := feePerKw.FeeForWeight(commitWeight)
|
|
|
|
lc.log.Errorf("Cannot pay for the CommitmentFee "+
|
|
|
|
"of the ChannelState: theirBalance is "+
|
|
|
|
"negative after applying the fee: "+
|
|
|
|
"theiBalance=%v, commitFee=%v, feeBuffer=%v "+
|
|
|
|
"(type=%v) remote_chan_initiator",
|
|
|
|
int64(theirBalance), commitFee, bufferAmt,
|
|
|
|
buffer)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The commitment fee was accounted for successfully now make sure we
|
|
|
|
// still do have enough left to account for the channel reserve.
|
2019-08-01 05:10:45 +02:00
|
|
|
// If the added HTLCs will decrease the balance, make sure they won't
|
|
|
|
// dip the local and remote balances below the channel reserves.
|
2022-10-14 09:32:58 +02:00
|
|
|
ourReserve := lnwire.NewMSatFromSatoshis(
|
|
|
|
lc.channelState.LocalChanCfg.ChanReserve,
|
|
|
|
)
|
|
|
|
theirReserve := lnwire.NewMSatFromSatoshis(
|
|
|
|
lc.channelState.RemoteChanCfg.ChanReserve,
|
|
|
|
)
|
2019-08-01 05:10:45 +02:00
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
// Calculate the commitment fee to log the information if needed.
|
|
|
|
commitFee := feePerKw.FeeForWeight(commitWeight)
|
|
|
|
commitFeeMsat := lnwire.NewMSatFromSatoshis(commitFee)
|
|
|
|
|
2022-10-14 09:32:58 +02:00
|
|
|
switch {
|
2023-11-05 11:29:34 +01:00
|
|
|
// TODO(ziggie): Allow the peer dip us below the channel reserve when
|
|
|
|
// our local balance would increase during this commitment dance or
|
|
|
|
// allow us to dip the peer below its reserve then their balance would
|
|
|
|
// increase during this commitment dance. This is needed for splicing
|
|
|
|
// when e.g. a new channel (bigger capacity) has a higher required
|
|
|
|
// reserve and the peer would need to add an additional htlc to push the
|
|
|
|
// missing amount to our side and viceversa.
|
|
|
|
// See: https://github.com/lightningnetwork/lnd/issues/8249
|
2022-10-14 09:32:58 +02:00
|
|
|
case ourBalance < ourInitialBalance && ourBalance < ourReserve:
|
|
|
|
lc.log.Debugf("Funds below chan reserve: ourBalance=%v, "+
|
2023-11-05 11:29:34 +01:00
|
|
|
"ourReserve=%v, commitFee=%v, feeBuffer=%v "+
|
|
|
|
"chan_initiator=%v", ourBalance, ourReserve,
|
|
|
|
commitFeeMsat, bufferAmt, lc.channelState.IsInitiator)
|
|
|
|
|
2022-10-14 09:32:58 +02:00
|
|
|
return fmt.Errorf("%w: our balance below chan reserve",
|
|
|
|
ErrBelowChanReserve)
|
|
|
|
|
|
|
|
case theirBalance < theirInitialBalance && theirBalance < theirReserve:
|
|
|
|
lc.log.Debugf("Funds below chan reserve: theirBalance=%v, "+
|
|
|
|
"theirReserve=%v", theirBalance, theirReserve)
|
2023-11-05 11:29:34 +01:00
|
|
|
|
2022-10-14 09:32:58 +02:00
|
|
|
return fmt.Errorf("%w: their balance below chan reserve",
|
|
|
|
ErrBelowChanReserve)
|
2019-08-01 05:10:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// validateUpdates take a set of updates, and validates them against
|
|
|
|
// the passed channel constraints.
|
2024-06-15 01:30:28 +02:00
|
|
|
validateUpdates := func(updates []*paymentDescriptor,
|
2019-08-01 05:10:45 +02:00
|
|
|
constraints *channeldb.ChannelConfig) error {
|
|
|
|
|
|
|
|
// We keep track of the number of HTLCs in flight for the
|
|
|
|
// commitment, and the amount in flight.
|
|
|
|
var numInFlight uint16
|
|
|
|
var amtInFlight lnwire.MilliSatoshi
|
|
|
|
|
|
|
|
// Go through all updates, checking that they don't violate the
|
|
|
|
// channel constraints.
|
|
|
|
for _, entry := range updates {
|
|
|
|
if entry.EntryType == Add {
|
|
|
|
// An HTLC is being added, this will add to the
|
|
|
|
// number and amount in flight.
|
|
|
|
amtInFlight += entry.Amount
|
|
|
|
numInFlight++
|
|
|
|
|
2020-04-02 18:31:51 +02:00
|
|
|
// Check that the HTLC amount is positive.
|
|
|
|
if entry.Amount == 0 {
|
|
|
|
return ErrInvalidHTLCAmt
|
|
|
|
}
|
|
|
|
|
2019-08-01 05:10:45 +02:00
|
|
|
// Check that the value of the HTLC they added
|
|
|
|
// is above our minimum.
|
|
|
|
if entry.Amount < constraints.MinHTLC {
|
|
|
|
return ErrBelowMinHTLC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we know the total value of added HTLCs, we check
|
2022-01-13 17:29:43 +01:00
|
|
|
// that this satisfy the MaxPendingAmont constraint.
|
2019-08-01 05:10:45 +02:00
|
|
|
if amtInFlight > constraints.MaxPendingAmount {
|
|
|
|
return ErrMaxPendingAmount
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this step, we verify that the total number of active
|
|
|
|
// HTLCs does not exceed the constraint of the maximum number
|
|
|
|
// of HTLCs in flight.
|
|
|
|
if numInFlight > constraints.MaxAcceptedHtlcs {
|
|
|
|
return ErrMaxHTLCNumber
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// First check that the remote updates won't violate it's channel
|
|
|
|
// constraints.
|
2020-02-12 11:10:19 +01:00
|
|
|
err = validateUpdates(
|
2024-07-24 23:57:32 +02:00
|
|
|
filteredView.Updates.Remote, &lc.channelState.RemoteChanCfg,
|
2019-08-01 05:10:45 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Secondly check that our updates won't violate our channel
|
|
|
|
// constraints.
|
|
|
|
err = validateUpdates(
|
2024-07-24 23:57:32 +02:00
|
|
|
filteredView.Updates.Local, &lc.channelState.LocalChanCfg,
|
2019-08-01 05:10:45 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
// CommitSigs holds the set of related signatures for a new commitment
|
2023-01-20 03:27:07 +01:00
|
|
|
// transaction state.
|
|
|
|
type CommitSigs struct {
|
|
|
|
// CommitSig is the normal commitment signature. This will only be a
|
2023-01-20 04:24:48 +01:00
|
|
|
// non-zero commitment signature for non-taproot channels.
|
2023-01-20 03:27:07 +01:00
|
|
|
CommitSig lnwire.Sig
|
|
|
|
|
|
|
|
// HtlcSigs is the set of signatures for all HTLCs in the commitment
|
|
|
|
// transaction. Depending on the channel type, these will either be
|
|
|
|
// ECDSA or Schnorr signatures.
|
|
|
|
HtlcSigs []lnwire.Sig
|
2023-01-20 04:24:48 +01:00
|
|
|
|
|
|
|
// PartialSig is the musig2 partial signature for taproot commitment
|
|
|
|
// transactions.
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
PartialSig lnwire.OptPartialSigWithNonceTLV
|
2024-04-09 04:48:36 +02:00
|
|
|
|
|
|
|
// AuxSigBlob is the blob containing all the auxiliary signatures for
|
|
|
|
// this new commitment state.
|
|
|
|
AuxSigBlob tlv.Blob
|
2023-01-20 03:27:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewCommitState wraps the various signatures needed to properly
|
|
|
|
// propose/accept a new commitment state. This includes the signer's nonce for
|
|
|
|
// musig2 channels.
|
|
|
|
type NewCommitState struct {
|
|
|
|
*CommitSigs
|
|
|
|
|
|
|
|
// PendingHTLCs is the set of new/pending HTLCs produced by this
|
|
|
|
// commitment state.
|
|
|
|
PendingHTLCs []channeldb.HTLC
|
|
|
|
}
|
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// SignNextCommitment signs a new commitment which includes any previous
|
|
|
|
// unsettled HTLCs, any new HTLCs, and any modifications to prior HTLCs
|
|
|
|
// committed in previous commitment updates. Signing a new commitment
|
|
|
|
// decrements the available revocation window by 1. After a successful method
|
|
|
|
// call, the remote party's commitment chain is extended by a new commitment
|
|
|
|
// which includes all updates to the HTLC log prior to this method invocation.
|
2017-11-10 07:53:18 +01:00
|
|
|
// The first return parameter is the signature for the commitment transaction
|
|
|
|
// itself, while the second parameter is a slice of all HTLC signatures (if
|
|
|
|
// any). The HTLC signatures are sorted according to the BIP 69 order of the
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 02:23:26 +02:00
|
|
|
// HTLC's on the commitment transaction. Finally, the new set of pending HTLCs
|
|
|
|
// for the remote party's commitment are also returned.
|
2024-04-09 04:48:36 +02:00
|
|
|
//
|
|
|
|
//nolint:funlen
|
2023-01-20 03:27:07 +01:00
|
|
|
func (lc *LightningChannel) SignNextCommitment() (*NewCommitState, error) {
|
2016-11-21 04:18:30 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2019-09-24 14:33:59 +02:00
|
|
|
// Check for empty commit sig. This should never happen, but we don't
|
|
|
|
// dare to fail hard here. We assume peers can deal with the empty sig
|
|
|
|
// and continue channel operation. We log an error so that the bug
|
|
|
|
// causing this can be tracked down.
|
2024-07-31 01:44:18 +02:00
|
|
|
if !lc.oweCommitment(lntypes.Local) {
|
2019-09-24 14:33:59 +02:00
|
|
|
lc.log.Errorf("sending empty commit sig")
|
|
|
|
}
|
|
|
|
|
2018-01-31 04:55:39 +01:00
|
|
|
var (
|
2023-01-20 04:24:48 +01:00
|
|
|
sig lnwire.Sig
|
|
|
|
partialSig *lnwire.PartialSigWithNonce
|
|
|
|
htlcSigs []lnwire.Sig
|
2018-01-31 04:55:39 +01:00
|
|
|
)
|
|
|
|
|
2017-12-11 01:10:44 +01:00
|
|
|
// If we're awaiting for an ACK to a commitment signature, or if we
|
|
|
|
// don't yet have the initial next revocation point of the remote
|
|
|
|
// party, then we're unable to create new states. Each time we create a
|
|
|
|
// new state, we consume a prior revocation point.
|
2017-12-12 00:32:24 +01:00
|
|
|
commitPoint := lc.channelState.RemoteNextRevocation
|
2024-08-09 21:47:58 +02:00
|
|
|
unacked := lc.commitChains.Remote.hasUnackedCommitment()
|
2022-01-21 00:35:56 +01:00
|
|
|
if unacked || commitPoint == nil {
|
|
|
|
lc.log.Tracef("waiting for remote ack=%v, nil "+
|
|
|
|
"RemoteNextRevocation: %v", unacked, commitPoint == nil)
|
2023-01-20 03:27:07 +01:00
|
|
|
return nil, ErrNoWindow
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
}
|
|
|
|
|
2017-10-19 02:36:28 +02:00
|
|
|
// Determine the last update on the remote log that has been locked in.
|
2024-08-09 23:52:21 +02:00
|
|
|
remoteACKedIndex := lc.commitChains.Local.tail().messageIndices.Remote
|
2024-08-09 21:47:58 +02:00
|
|
|
remoteHtlcIndex := lc.commitChains.Local.tail().theirHtlcIndex
|
2017-10-19 02:36:28 +02:00
|
|
|
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
// Before we extend this new commitment to the remote commitment chain,
|
|
|
|
// ensure that we aren't violating any of the constraints the remote
|
|
|
|
// party set up when we initially set up the channel. If we are, then
|
|
|
|
// we'll abort this state transition.
|
2023-11-05 11:29:34 +01:00
|
|
|
// We do not enforce the FeeBuffer here because when we reach this
|
|
|
|
// point all updates will have to get locked-in so we enforce the
|
|
|
|
// minimum requirement.
|
2019-03-09 01:05:28 +01:00
|
|
|
err := lc.validateCommitmentSanity(
|
2024-08-09 22:00:59 +02:00
|
|
|
remoteACKedIndex, lc.updateLogs.Local.logIndex, lntypes.Remote,
|
2024-07-31 01:44:18 +02:00
|
|
|
NoBuffer, nil, nil,
|
2019-03-09 01:05:28 +01:00
|
|
|
)
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
if err != nil {
|
2023-01-20 03:27:07 +01:00
|
|
|
return nil, err
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2017-09-04 00:38:36 +02:00
|
|
|
// Grab the next commitment point for the remote party. This will be
|
2017-07-30 21:32:24 +02:00
|
|
|
// used within fetchCommitmentView to derive all the keys necessary to
|
|
|
|
// construct the commitment state.
|
2019-09-17 04:06:19 +02:00
|
|
|
keyRing := DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
commitPoint, lntypes.Remote, lc.channelState.ChanType,
|
2020-01-06 11:42:02 +01:00
|
|
|
&lc.channelState.LocalChanCfg, &lc.channelState.RemoteChanCfg,
|
2019-03-09 01:05:28 +01:00
|
|
|
)
|
2016-07-06 02:01:55 +02:00
|
|
|
|
|
|
|
// Create a new commitment view which will calculate the evaluated
|
|
|
|
// state of the remote node's new commitment including our latest added
|
2017-01-13 06:01:50 +01:00
|
|
|
// HTLCs. The view includes the latest balances for both sides on the
|
2016-07-06 02:01:55 +02:00
|
|
|
// remote node's chain, and also update the addition height of any new
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
// HTLC log entries. When we creating a new remote view, we include
|
|
|
|
// _all_ of our changes (pending or committed) but only the remote
|
|
|
|
// node's changes up to the last change we've ACK'd.
|
2017-11-10 07:53:18 +01:00
|
|
|
newCommitView, err := lc.fetchCommitmentView(
|
2024-08-09 22:00:59 +02:00
|
|
|
lntypes.Remote, lc.updateLogs.Local.logIndex,
|
|
|
|
lc.updateLogs.Local.htlcCounter, remoteACKedIndex,
|
2024-07-31 01:44:18 +02:00
|
|
|
remoteHtlcIndex, keyRing,
|
2017-11-10 07:53:18 +01:00
|
|
|
)
|
2016-01-05 22:01:42 +01:00
|
|
|
if err != nil {
|
2023-01-20 03:27:07 +01:00
|
|
|
return nil, err
|
2016-01-05 22:01:42 +01:00
|
|
|
}
|
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Tracef("extending remote chain to height %v, "+
|
2017-11-10 07:53:18 +01:00
|
|
|
"local_log=%v, remote_log=%v",
|
2019-09-24 13:12:53 +02:00
|
|
|
newCommitView.height,
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.logIndex, remoteACKedIndex)
|
2017-07-30 21:25:41 +02:00
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Tracef("remote chain: our_balance=%v, "+
|
2017-07-30 21:25:41 +02:00
|
|
|
"their_balance=%v, commit_tx: %v",
|
2019-09-24 13:12:53 +02:00
|
|
|
newCommitView.ourBalance,
|
2017-07-30 21:25:41 +02:00
|
|
|
newCommitView.theirBalance,
|
2024-07-25 16:18:00 +02:00
|
|
|
lnutils.SpewLogClosure(newCommitView.txn))
|
2016-07-13 02:35:51 +02:00
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
// With the commitment view constructed, if there are any HTLC's, we'll
|
|
|
|
// need to generate signatures of each of them for the remote party's
|
|
|
|
// commitment state. We do so in two phases: first we generate and
|
|
|
|
// submit the set of signature jobs to the worker pool.
|
2021-07-15 02:16:13 +02:00
|
|
|
var leaseExpiry uint32
|
|
|
|
if lc.channelState.ChanType.HasLeaseExpiration() {
|
|
|
|
leaseExpiry = lc.channelState.ThawHeight
|
|
|
|
}
|
2024-04-09 04:48:36 +02:00
|
|
|
sigBatch, auxSigBatch, cancelChan, err := genRemoteHtlcSigJobs(
|
2024-04-25 19:00:42 +02:00
|
|
|
keyRing, lc.channelState, leaseExpiry, newCommitView,
|
|
|
|
lc.leafStore,
|
2017-07-30 22:00:24 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
2023-01-20 03:27:07 +01:00
|
|
|
return nil, err
|
2017-07-30 22:00:24 +02:00
|
|
|
}
|
2024-09-08 23:00:13 +02:00
|
|
|
|
|
|
|
// We'll need to send over the signatures to the remote party in the
|
|
|
|
// order as they appear on the commitment transaction after BIP 69
|
|
|
|
// sorting.
|
|
|
|
slices.SortFunc(sigBatch, func(i, j SignJob) int {
|
|
|
|
return int(i.OutputIndex - j.OutputIndex)
|
|
|
|
})
|
|
|
|
slices.SortFunc(auxSigBatch, func(i, j AuxSigJob) int {
|
|
|
|
return int(i.OutputIndex - j.OutputIndex)
|
|
|
|
})
|
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
lc.sigPool.SubmitSignBatch(sigBatch)
|
|
|
|
|
2024-04-09 04:48:36 +02:00
|
|
|
err = fn.MapOptionZ(lc.auxSigner, func(a AuxSigner) error {
|
|
|
|
return a.SubmitSecondLevelSigBatch(
|
|
|
|
NewAuxChanState(lc.channelState), newCommitView.txn,
|
|
|
|
auxSigBatch,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error submitting second level sig "+
|
|
|
|
"batch: %w", err)
|
|
|
|
}
|
|
|
|
|
2017-07-30 22:00:24 +02:00
|
|
|
// While the jobs are being carried out, we'll Sign their version of
|
|
|
|
// the new commitment transaction while we're waiting for the rest of
|
|
|
|
// the HTLC signatures to be processed.
|
2023-01-20 04:24:48 +01:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): abstract into CommitSigner interface?
|
|
|
|
if lc.channelState.ChanType.IsTaproot() {
|
|
|
|
// In this case, we'll send out a partial signature as this is
|
|
|
|
// a musig2 channel. The encoded normal ECDSA signature will be
|
|
|
|
// just blank.
|
|
|
|
remoteSession := lc.musigSessions.RemoteSession
|
|
|
|
musig, err := remoteSession.SignCommit(
|
|
|
|
newCommitView.txn,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
close(cancelChan)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
partialSig = musig.ToWireSig()
|
|
|
|
} else {
|
|
|
|
lc.signDesc.SigHashes = input.NewTxSigHashesV0Only(
|
|
|
|
newCommitView.txn,
|
|
|
|
)
|
|
|
|
rawSig, err := lc.Signer.SignOutputRaw(
|
|
|
|
newCommitView.txn, lc.signDesc,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
close(cancelChan)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
sig, err = lnwire.NewSigFromSignature(rawSig)
|
|
|
|
if err != nil {
|
|
|
|
close(cancelChan)
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-07-30 22:00:24 +02:00
|
|
|
}
|
|
|
|
|
2024-09-08 23:00:13 +02:00
|
|
|
// Iterate through all the responses to gather each of the signatures
|
|
|
|
// in the order they were submitted.
|
2018-01-31 04:55:39 +01:00
|
|
|
htlcSigs = make([]lnwire.Sig, 0, len(sigBatch))
|
2024-04-09 04:48:36 +02:00
|
|
|
auxSigs := make([]fn.Option[tlv.Blob], 0, len(auxSigBatch))
|
|
|
|
for i := range sigBatch {
|
|
|
|
htlcSigJob := sigBatch[i]
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
jobResp := <-htlcSigJob.Resp
|
2017-07-30 22:00:24 +02:00
|
|
|
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
// If an error occurred, then we'll cancel any other active
|
|
|
|
// jobs.
|
|
|
|
if jobResp.Err != nil {
|
|
|
|
close(cancelChan)
|
2023-01-20 03:27:07 +01:00
|
|
|
return nil, jobResp.Err
|
2017-07-30 22:00:24 +02:00
|
|
|
}
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
|
|
|
|
htlcSigs = append(htlcSigs, jobResp.Sig)
|
2024-04-09 04:48:36 +02:00
|
|
|
|
|
|
|
if lc.auxSigner.IsNone() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
auxHtlcSigJob := auxSigBatch[i]
|
|
|
|
auxJobResp := <-auxHtlcSigJob.Resp
|
|
|
|
|
|
|
|
// If an error occurred, then we'll cancel any other active
|
|
|
|
// jobs.
|
|
|
|
if auxJobResp.Err != nil {
|
|
|
|
close(cancelChan)
|
|
|
|
return nil, auxJobResp.Err
|
|
|
|
}
|
|
|
|
|
|
|
|
auxSigs = append(auxSigs, auxJobResp.SigBlob)
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2017-11-10 07:56:08 +01:00
|
|
|
// As we're about to proposer a new commitment state for the remote
|
|
|
|
// party, we'll write this pending state to disk before we exit, so we
|
|
|
|
// can retransmit it if necessary.
|
2024-04-09 04:48:36 +02:00
|
|
|
commitDiff, err := lc.createCommitDiff(
|
|
|
|
newCommitView, sig, htlcSigs, auxSigs,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-03-09 01:05:57 +01:00
|
|
|
err = lc.channelState.AppendRemoteCommitChain(commitDiff)
|
|
|
|
if err != nil {
|
2023-01-20 03:27:07 +01:00
|
|
|
return nil, err
|
2017-08-15 19:09:16 +02:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:56:08 +01:00
|
|
|
// TODO(roasbeef): check that one eclair bug
|
|
|
|
// * need to retransmit on first state still?
|
|
|
|
// * after initial reconnect
|
|
|
|
|
|
|
|
// Extend the remote commitment chain by one with the addition of our
|
|
|
|
// latest commitment update.
|
2024-08-09 21:47:58 +02:00
|
|
|
lc.commitChains.Remote.addCommitment(newCommitView)
|
2017-08-14 11:14:04 +02:00
|
|
|
|
2024-04-09 04:48:36 +02:00
|
|
|
auxSigBlob, err := commitDiff.CommitSig.CustomRecords.Serialize()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to serialize aux sig blob: %w",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
2023-01-20 03:27:07 +01:00
|
|
|
return &NewCommitState{
|
|
|
|
CommitSigs: &CommitSigs{
|
2023-01-20 04:24:48 +01:00
|
|
|
CommitSig: sig,
|
|
|
|
HtlcSigs: htlcSigs,
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
PartialSig: lnwire.MaybePartialSigWithNonce(partialSig),
|
2024-04-09 04:48:36 +02:00
|
|
|
AuxSigBlob: auxSigBlob,
|
2023-01-20 03:27:07 +01:00
|
|
|
},
|
|
|
|
PendingHTLCs: commitDiff.Commitment.Htlcs,
|
|
|
|
}, nil
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2024-08-29 01:23:14 +02:00
|
|
|
// resignMusigCommit is used to resign a commitment transaction for taproot
|
|
|
|
// channels when we need to retransmit a signature after a channel reestablish
|
|
|
|
// message. Taproot channels use musig2, which means we must use fresh nonces
|
|
|
|
// each time. After we receive the channel reestablish message, we learn the
|
|
|
|
// nonce we need to use for the remote party. As a result, we need to generate
|
|
|
|
// the partial signature again with the new nonce.
|
2024-04-09 04:48:36 +02:00
|
|
|
func (lc *LightningChannel) resignMusigCommit(
|
|
|
|
commitTx *wire.MsgTx) (lnwire.OptPartialSigWithNonceTLV, error) {
|
2024-08-29 01:23:14 +02:00
|
|
|
|
|
|
|
remoteSession := lc.musigSessions.RemoteSession
|
|
|
|
musig, err := remoteSession.SignCommit(commitTx)
|
|
|
|
if err != nil {
|
|
|
|
var none lnwire.OptPartialSigWithNonceTLV
|
|
|
|
return none, err
|
|
|
|
}
|
|
|
|
|
|
|
|
partialSig := lnwire.MaybePartialSigWithNonce(musig.ToWireSig())
|
|
|
|
|
|
|
|
return partialSig, nil
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:59:14 +01:00
|
|
|
// ProcessChanSyncMsg processes a ChannelReestablish message sent by the remote
|
|
|
|
// connection upon re establishment of our connection with them. This method
|
|
|
|
// will return a single message if we are currently out of sync, otherwise a
|
|
|
|
// nil lnwire.Message will be returned. If it is decided that our level of
|
|
|
|
// de-synchronization is irreconcilable, then an error indicating the issue
|
|
|
|
// will be returned. In this case that an error is returned, the channel should
|
|
|
|
// be force closed, as we cannot continue updates.
|
|
|
|
//
|
|
|
|
// One of two message sets will be returned:
|
|
|
|
//
|
2022-08-22 20:58:42 +02:00
|
|
|
// - CommitSig+Updates: if we have a pending remote commit which they claim to
|
|
|
|
// have not received
|
|
|
|
// - RevokeAndAck: if we sent a revocation message that they claim to have
|
|
|
|
// not received
|
2018-02-28 05:04:41 +01:00
|
|
|
//
|
|
|
|
// If we detect a scenario where we need to send a CommitSig+Updates, this
|
2022-11-18 12:15:22 +01:00
|
|
|
// method also returns two sets models.CircuitKeys identifying the circuits
|
2018-02-28 05:04:41 +01:00
|
|
|
// that were opened and closed, respectively, as a result of signing the
|
|
|
|
// previous commitment txn. This allows the link to clear its mailbox of those
|
|
|
|
// circuits in case they are still in memory, and ensure the switch's circuit
|
|
|
|
// map has been updated by deleting the closed circuits.
|
2018-02-28 05:01:41 +01:00
|
|
|
func (lc *LightningChannel) ProcessChanSyncMsg(
|
2022-11-18 12:15:22 +01:00
|
|
|
msg *lnwire.ChannelReestablish) ([]lnwire.Message, []models.CircuitKey,
|
|
|
|
[]models.CircuitKey, error) {
|
2017-11-10 07:59:14 +01:00
|
|
|
|
|
|
|
// Now we'll examine the state we have, vs what was contained in the
|
|
|
|
// chain sync message. If we're de-synchronized, then we'll send a
|
|
|
|
// batch of messages which when applied will kick start the chain
|
|
|
|
// resync.
|
2018-02-28 05:01:41 +01:00
|
|
|
var (
|
2018-02-28 05:04:41 +01:00
|
|
|
updates []lnwire.Message
|
2022-11-18 12:15:22 +01:00
|
|
|
openedCircuits []models.CircuitKey
|
|
|
|
closedCircuits []models.CircuitKey
|
2018-02-28 05:01:41 +01:00
|
|
|
)
|
2017-07-09 01:30:20 +02:00
|
|
|
|
2017-11-14 07:45:57 +01:00
|
|
|
// If the remote party included the optional fields, then we'll verify
|
|
|
|
// their correctness first, as it will influence our decisions below.
|
|
|
|
hasRecoveryOptions := msg.LocalUnrevokedCommitPoint != nil
|
|
|
|
if hasRecoveryOptions && msg.RemoteCommitTailHeight != 0 {
|
|
|
|
// We'll check that they've really sent a valid commit
|
|
|
|
// secret from our shachain for our prior height, but only if
|
|
|
|
// this isn't the first state.
|
|
|
|
heightSecret, err := lc.channelState.RevocationProducer.AtIndex(
|
|
|
|
msg.RemoteCommitTailHeight - 1,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2018-02-28 05:04:41 +01:00
|
|
|
return nil, nil, nil, err
|
2017-11-14 07:45:57 +01:00
|
|
|
}
|
2018-07-12 11:02:52 +02:00
|
|
|
commitSecretCorrect := bytes.Equal(
|
2017-11-14 07:45:57 +01:00
|
|
|
heightSecret[:], msg.LastRemoteCommitSecret[:],
|
|
|
|
)
|
2018-07-12 11:02:52 +02:00
|
|
|
|
|
|
|
// If the commit secret they sent is incorrect then we'll fail
|
|
|
|
// the channel as the remote node has an inconsistent state.
|
|
|
|
if !commitSecretCorrect {
|
|
|
|
// In this case, we'll return an error to indicate the
|
|
|
|
// remote node sent us the wrong values. This will let
|
|
|
|
// the caller act accordingly.
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Errorf("sync failed: remote provided invalid " +
|
|
|
|
"commit secret!")
|
2018-07-12 11:02:52 +02:00
|
|
|
return nil, nil, nil, ErrInvalidLastCommitSecret
|
|
|
|
}
|
2017-11-14 07:45:57 +01:00
|
|
|
}
|
|
|
|
|
2023-07-12 04:04:31 +02:00
|
|
|
// If this is a taproot channel, then we expect that the remote party
|
|
|
|
// has sent the next verification nonce. If they haven't, then we'll
|
|
|
|
// bail out, otherwise we'll init our local session then continue as
|
|
|
|
// normal.
|
|
|
|
switch {
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
case lc.channelState.ChanType.IsTaproot() && msg.LocalNonce.IsNone():
|
2023-07-12 04:04:31 +02:00
|
|
|
return nil, nil, nil, fmt.Errorf("remote verification nonce " +
|
|
|
|
"not sent")
|
|
|
|
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
case lc.channelState.ChanType.IsTaproot() && msg.LocalNonce.IsSome():
|
2023-10-13 17:00:49 +02:00
|
|
|
if lc.opts.skipNonceInit {
|
|
|
|
// Don't call InitRemoteMusigNonces if we have already
|
|
|
|
// done so.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
nextNonce, err := msg.LocalNonce.UnwrapOrErrV(errNoNonce)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = lc.InitRemoteMusigNonces(&musig2.Nonces{
|
|
|
|
PubNonce: nextNonce,
|
2023-07-12 04:04:31 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, fmt.Errorf("unable to init "+
|
|
|
|
"remote nonce: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-11 00:38:45 +01:00
|
|
|
// If we detect that this is is a restored channel, then we can skip a
|
|
|
|
// portion of the verification, as we already know that we're unable to
|
|
|
|
// proceed with any updates.
|
|
|
|
isRestoredChan := lc.channelState.HasChanStatus(
|
|
|
|
channeldb.ChanStatusRestored,
|
|
|
|
)
|
|
|
|
|
2018-07-12 11:02:53 +02:00
|
|
|
// Take note of our current commit chain heights before we begin adding
|
|
|
|
// more to them.
|
|
|
|
var (
|
2024-08-09 21:47:58 +02:00
|
|
|
localTailHeight = lc.commitChains.Local.tail().height
|
|
|
|
remoteTailHeight = lc.commitChains.Remote.tail().height
|
|
|
|
remoteTipHeight = lc.commitChains.Remote.tip().height
|
2018-07-12 11:02:53 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// We'll now check that their view of our local chain is up-to-date.
|
|
|
|
// This means checking that what their view of our local chain tail
|
|
|
|
// height is what they believe. Note that the tail and tip height will
|
|
|
|
// always be the same for the local chain at this stage, as we won't
|
|
|
|
// store any received commitment to disk before it is ACKed.
|
2017-11-14 07:45:57 +01:00
|
|
|
switch {
|
2018-07-12 11:02:53 +02:00
|
|
|
|
|
|
|
// If their reported height for our local chain tail is ahead of our
|
|
|
|
// view, then we're behind!
|
2019-03-11 00:38:45 +01:00
|
|
|
case msg.RemoteCommitTailHeight > localTailHeight || isRestoredChan:
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Errorf("sync failed with local data loss: remote "+
|
|
|
|
"believes our tail height is %v, while we have %v!",
|
2018-07-12 11:02:53 +02:00
|
|
|
msg.RemoteCommitTailHeight, localTailHeight)
|
|
|
|
|
2019-03-11 00:38:45 +01:00
|
|
|
if isRestoredChan {
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Warnf("detected restored triggering DLP")
|
2019-03-11 00:38:45 +01:00
|
|
|
}
|
|
|
|
|
2018-07-12 11:02:53 +02:00
|
|
|
// We must check that we had recovery options to ensure the
|
|
|
|
// commitment secret matched up, and the remote is just not
|
|
|
|
// lying about its height.
|
|
|
|
if !hasRecoveryOptions {
|
|
|
|
// At this point we the remote is either lying about
|
|
|
|
// its height, or we are actually behind but the remote
|
|
|
|
// doesn't support data loss protection. In either case
|
|
|
|
// it is not safe for us to keep using the channel, so
|
|
|
|
// we mark it borked and fail the channel.
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Errorf("sync failed: local data loss, but no " +
|
|
|
|
"recovery option.")
|
|
|
|
|
2018-07-12 11:02:53 +02:00
|
|
|
return nil, nil, nil, ErrCannotSyncCommitChains
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this case, we've likely lost data and shouldn't proceed
|
2019-09-06 13:14:40 +02:00
|
|
|
// with channel updates.
|
2019-09-06 13:14:39 +02:00
|
|
|
return nil, nil, nil, &ErrCommitSyncLocalDataLoss{
|
|
|
|
ChannelPoint: lc.channelState.FundingOutpoint,
|
|
|
|
CommitPoint: msg.LocalUnrevokedCommitPoint,
|
|
|
|
}
|
2018-07-12 11:02:53 +02:00
|
|
|
|
|
|
|
// If the height of our commitment chain reported by the remote party
|
|
|
|
// is behind our view of the chain, then they probably lost some state,
|
|
|
|
// and we'll force close the channel.
|
|
|
|
case msg.RemoteCommitTailHeight+1 < localTailHeight:
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Errorf("sync failed: remote believes our tail height is "+
|
|
|
|
"%v, while we have %v!",
|
2018-07-12 11:02:53 +02:00
|
|
|
msg.RemoteCommitTailHeight, localTailHeight)
|
|
|
|
return nil, nil, nil, ErrCommitSyncRemoteDataLoss
|
|
|
|
|
|
|
|
// Their view of our commit chain is consistent with our view.
|
|
|
|
case msg.RemoteCommitTailHeight == localTailHeight:
|
|
|
|
// In sync, don't have to do anything.
|
|
|
|
|
|
|
|
// We owe them a revocation if the tail of our current commitment chain
|
|
|
|
// is one greater than what they _think_ our commitment tail is. In
|
|
|
|
// this case we'll re-send the last revocation message that we sent.
|
|
|
|
// This will be the revocation message for our prior chain tail.
|
|
|
|
case msg.RemoteCommitTailHeight+1 == localTailHeight:
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Debugf("sync: remote believes our tail height is %v, "+
|
|
|
|
"while we have %v, we owe them a revocation",
|
2018-07-12 11:02:53 +02:00
|
|
|
msg.RemoteCommitTailHeight, localTailHeight)
|
|
|
|
|
2023-07-12 04:04:31 +02:00
|
|
|
heightToRetransmit := localTailHeight - 1
|
|
|
|
revocationMsg, err := lc.generateRevocation(heightToRetransmit)
|
2017-07-09 01:30:20 +02:00
|
|
|
if err != nil {
|
2018-02-28 05:04:41 +01:00
|
|
|
return nil, nil, nil, err
|
2017-07-09 01:30:20 +02:00
|
|
|
}
|
2023-07-12 04:04:31 +02:00
|
|
|
|
2017-07-09 01:30:20 +02:00
|
|
|
updates = append(updates, revocationMsg)
|
|
|
|
|
2017-11-10 07:59:14 +01:00
|
|
|
// Next, as a precaution, we'll check a special edge case. If
|
|
|
|
// they initiated a state transition, we sent the revocation,
|
|
|
|
// but died before the signature was sent. We re-transmit our
|
|
|
|
// revocation, but also initiate a state transition to re-sync
|
|
|
|
// them.
|
2022-03-19 17:59:19 +01:00
|
|
|
if lc.OweCommitment() {
|
2023-01-20 03:27:07 +01:00
|
|
|
newCommit, err := lc.SignNextCommitment()
|
2017-12-12 00:42:00 +01:00
|
|
|
switch {
|
|
|
|
|
|
|
|
// If we signed this state, then we'll accumulate
|
|
|
|
// another update to send over.
|
|
|
|
case err == nil:
|
2024-04-09 04:48:36 +02:00
|
|
|
customRecords, err := lnwire.ParseCustomRecords(
|
|
|
|
newCommit.AuxSigBlob,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
sErr := fmt.Errorf("error parsing aux "+
|
|
|
|
"sigs: %w", err)
|
|
|
|
return nil, nil, nil, sErr
|
|
|
|
}
|
|
|
|
|
2023-01-20 03:27:07 +01:00
|
|
|
commitSig := &lnwire.CommitSig{
|
2017-12-12 00:42:00 +01:00
|
|
|
ChanID: lnwire.NewChanIDFromOutPoint(
|
2024-01-29 22:19:15 +01:00
|
|
|
lc.channelState.FundingOutpoint,
|
2017-12-12 00:42:00 +01:00
|
|
|
),
|
2024-04-09 04:48:36 +02:00
|
|
|
CommitSig: newCommit.CommitSig,
|
|
|
|
HtlcSigs: newCommit.HtlcSigs,
|
|
|
|
PartialSig: newCommit.PartialSig,
|
|
|
|
CustomRecords: customRecords,
|
2023-01-20 03:27:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
updates = append(updates, commitSig)
|
2017-12-12 00:42:00 +01:00
|
|
|
|
|
|
|
// If we get a failure due to not knowing their next
|
|
|
|
// point, then this is fine as they'll either send
|
2023-03-15 22:45:14 +01:00
|
|
|
// ChannelReady, or revoke their next state to allow
|
2017-12-12 00:42:00 +01:00
|
|
|
// us to continue forwards.
|
|
|
|
case err == ErrNoWindow:
|
|
|
|
|
|
|
|
// Otherwise, this is an error and we'll treat it as
|
|
|
|
// such.
|
|
|
|
default:
|
2018-02-28 05:04:41 +01:00
|
|
|
return nil, nil, nil, err
|
2017-11-10 07:59:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-12 11:02:53 +02:00
|
|
|
// There should be no other possible states.
|
|
|
|
default:
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Errorf("sync failed: remote believes our tail height is "+
|
|
|
|
"%v, while we have %v!",
|
2018-07-12 11:02:53 +02:00
|
|
|
msg.RemoteCommitTailHeight, localTailHeight)
|
2018-02-28 05:04:41 +01:00
|
|
|
return nil, nil, nil, ErrCannotSyncCommitChains
|
2017-11-10 07:59:14 +01:00
|
|
|
}
|
|
|
|
|
2018-07-12 11:02:53 +02:00
|
|
|
// Now check if our view of the remote chain is consistent with what
|
|
|
|
// they tell us.
|
|
|
|
switch {
|
|
|
|
|
|
|
|
// The remote's view of what their next commit height is 2+ states
|
|
|
|
// ahead of us, we most likely lost data, or the remote is trying to
|
|
|
|
// trick us. Since we have no way of verifying whether they are lying
|
|
|
|
// or not, we will fail the channel, but should not force close it
|
|
|
|
// automatically.
|
|
|
|
case msg.NextLocalCommitHeight > remoteTipHeight+1:
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Errorf("sync failed: remote's next commit height is %v, "+
|
|
|
|
"while we believe it is %v!",
|
2023-12-22 17:36:41 +01:00
|
|
|
msg.NextLocalCommitHeight, remoteTipHeight+1)
|
2018-07-12 11:02:53 +02:00
|
|
|
|
|
|
|
return nil, nil, nil, ErrCannotSyncCommitChains
|
|
|
|
|
|
|
|
// They are waiting for a state they have already ACKed.
|
|
|
|
case msg.NextLocalCommitHeight <= remoteTailHeight:
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Errorf("sync failed: remote's next commit height is %v, "+
|
|
|
|
"while we believe it is %v!",
|
2023-12-22 17:36:41 +01:00
|
|
|
msg.NextLocalCommitHeight, remoteTipHeight+1)
|
2018-07-12 11:02:53 +02:00
|
|
|
|
|
|
|
// They previously ACKed our current tail, and now they are
|
|
|
|
// waiting for it. They probably lost state.
|
|
|
|
return nil, nil, nil, ErrCommitSyncRemoteDataLoss
|
|
|
|
|
|
|
|
// They have received our latest commitment, life is good.
|
|
|
|
case msg.NextLocalCommitHeight == remoteTipHeight+1:
|
|
|
|
|
|
|
|
// We owe them a commitment if the tip of their chain (from our Pov) is
|
|
|
|
// equal to what they think their next commit height should be. We'll
|
2018-09-06 10:48:46 +02:00
|
|
|
// re-send all the updates necessary to recreate this state, along
|
2018-07-12 11:02:53 +02:00
|
|
|
// with the commit sig.
|
|
|
|
case msg.NextLocalCommitHeight == remoteTipHeight:
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Debugf("sync: remote's next commit height is %v, while "+
|
|
|
|
"we believe it is %v, we owe them a commitment",
|
multi: ensure link is always torn down due to db failures, add exponential back off for sql-kvdb failures (#7927)
* lnwallet: fix log output msg
The log message is off by one.
* htlcswitch: fail channel when revoking it fails.
When the revocation of a channel state fails after receiving a new
CommitmentSigned msg we have to fail the channel otherwise we
continue with an unclean state.
* docs: update release-docs
* htlcswitch: tear down connection if revocation processing fails
If we couldn't revoke due to a DB error, then we want to also tear down
the connection, as we don't want the other party to continue to send
updates. That may lead to de-sync'd state an eventual force close.
Otherwise, the database might be able to recover come the next
reconnection attempt.
* kvdb: use sql.LevelSerializable for all backends
In this commit, we modify the default isolation level to be
`sql.LevelSerializable. This is the strictness isolation type for
postgres. For sqlite, there's only ever a single writer, so this doesn't
apply directly.
* kvdb/sqlbase: add randomized exponential backoff for serialization failures
In this commit, we add randomized exponential backoff for serialization
failures. For postgres, we''ll his this any time a transaction set fails
to be linearized. For sqlite, we'll his this if we have many writers
trying to grab the write lock at time same time, manifesting as a
`SQLITE_BUSY` error code.
As is, we'll retry up to 10 times, waiting a minimum of 50 miliseconds
between each attempt, up to 5 seconds without any delay at all. For
sqlite, this is also bounded by the busy timeout set, which applies on
top of this retry logic (block for busy timeout seconds, then apply this
back off logic).
* docs/release-notes: add entry for sqlite/postgres tx retry
---------
Co-authored-by: ziggie <ziggie1984@protonmail.com>
2023-08-31 01:48:00 +02:00
|
|
|
msg.NextLocalCommitHeight, remoteTipHeight+1)
|
2018-07-12 11:02:53 +02:00
|
|
|
|
|
|
|
// Grab the current remote chain tip from the database. This
|
2017-11-10 07:59:14 +01:00
|
|
|
// commit diff contains all the information required to re-sync
|
|
|
|
// our states.
|
|
|
|
commitDiff, err := lc.channelState.RemoteCommitChainTip()
|
2017-07-09 01:30:20 +02:00
|
|
|
if err != nil {
|
2018-02-28 05:04:41 +01:00
|
|
|
return nil, nil, nil, err
|
2017-07-09 01:30:20 +02:00
|
|
|
}
|
|
|
|
|
2021-01-12 21:26:47 +01:00
|
|
|
var commitUpdates []lnwire.Message
|
|
|
|
|
2017-11-10 07:59:14 +01:00
|
|
|
// Next, we'll need to send over any updates we sent as part of
|
|
|
|
// this new proposed commitment state.
|
|
|
|
for _, logUpdate := range commitDiff.LogUpdates {
|
2021-01-12 21:26:47 +01:00
|
|
|
commitUpdates = append(commitUpdates, logUpdate.UpdateMsg)
|
2017-07-09 01:30:20 +02:00
|
|
|
}
|
|
|
|
|
2024-08-29 01:23:14 +02:00
|
|
|
// If this is a taproot channel, then we need to regenerate the
|
|
|
|
// musig2 signature for the remote party, using their fresh
|
|
|
|
// nonce.
|
|
|
|
if lc.channelState.ChanType.IsTaproot() {
|
|
|
|
partialSig, err := lc.resignMusigCommit(
|
|
|
|
commitDiff.Commitment.CommitTx,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
commitDiff.CommitSig.PartialSig = partialSig
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:59:14 +01:00
|
|
|
// With the batch of updates accumulated, we'll now re-send the
|
|
|
|
// original CommitSig message required to re-sync their remote
|
|
|
|
// commitment chain with our local version of their chain.
|
2021-01-12 21:26:47 +01:00
|
|
|
commitUpdates = append(commitUpdates, commitDiff.CommitSig)
|
|
|
|
|
|
|
|
// NOTE: If a revocation is not owed, then updates is empty.
|
|
|
|
if lc.channelState.LastWasRevoke {
|
|
|
|
// If lastWasRevoke is set to true, a revocation was last and we
|
|
|
|
// need to reorder the updates so that the revocation stored in
|
|
|
|
// updates comes after the LogUpdates+CommitSig.
|
|
|
|
//
|
|
|
|
// ---logupdates--->
|
|
|
|
// ---commitsig---->
|
|
|
|
// ---revocation--->
|
|
|
|
updates = append(commitUpdates, updates...)
|
|
|
|
} else {
|
|
|
|
// Otherwise, the revocation should come before LogUpdates
|
|
|
|
// + CommitSig.
|
|
|
|
//
|
|
|
|
// ---revocation--->
|
|
|
|
// ---logupdates--->
|
|
|
|
// ---commitsig---->
|
|
|
|
updates = append(updates, commitUpdates...)
|
|
|
|
}
|
2017-11-10 07:59:14 +01:00
|
|
|
|
2018-02-28 05:04:41 +01:00
|
|
|
openedCircuits = commitDiff.OpenedCircuitKeys
|
|
|
|
closedCircuits = commitDiff.ClosedCircuitKeys
|
2017-11-10 07:59:14 +01:00
|
|
|
|
2018-07-12 11:02:53 +02:00
|
|
|
// There should be no other possible states as long as the commit chain
|
|
|
|
// can have at most two elements. If that's the case, something is
|
|
|
|
// wrong.
|
|
|
|
default:
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Errorf("sync failed: remote's next commit height is %v, "+
|
|
|
|
"while we believe it is %v!",
|
2018-07-12 11:02:53 +02:00
|
|
|
msg.NextLocalCommitHeight, remoteTipHeight)
|
2018-02-28 05:04:41 +01:00
|
|
|
return nil, nil, nil, ErrCannotSyncCommitChains
|
2017-07-09 01:30:20 +02:00
|
|
|
}
|
|
|
|
|
2018-07-12 11:02:53 +02:00
|
|
|
// If we didn't have recovery options, then the final check cannot be
|
|
|
|
// performed, and we'll return early.
|
|
|
|
if !hasRecoveryOptions {
|
|
|
|
return updates, openedCircuits, closedCircuits, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point we have determined that either the commit heights are
|
|
|
|
// in sync, or that we are in a state we can recover from. As a final
|
|
|
|
// check, we ensure that the commitment point sent to us by the remote
|
|
|
|
// is valid.
|
|
|
|
var commitPoint *btcec.PublicKey
|
|
|
|
switch {
|
2019-03-11 00:39:31 +01:00
|
|
|
// If their height is one beyond what we know their current height to
|
|
|
|
// be, then we need to compare their current unrevoked commitment point
|
|
|
|
// as that's what they should send.
|
2018-07-12 11:02:53 +02:00
|
|
|
case msg.NextLocalCommitHeight == remoteTailHeight+1:
|
|
|
|
commitPoint = lc.channelState.RemoteCurrentRevocation
|
2019-03-11 00:43:25 +01:00
|
|
|
|
|
|
|
// Alternatively, if their height is two beyond what we know their best
|
|
|
|
// height to be, then they're holding onto two commitments, and the
|
2019-03-11 01:05:00 +01:00
|
|
|
// highest unrevoked point is their next revocation.
|
2019-03-11 00:43:25 +01:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): verify this in the spec...
|
|
|
|
case msg.NextLocalCommitHeight == remoteTailHeight+2:
|
|
|
|
commitPoint = lc.channelState.RemoteNextRevocation
|
2018-07-12 11:02:53 +02:00
|
|
|
}
|
2019-03-11 00:43:25 +01:00
|
|
|
|
2019-08-01 05:10:45 +02:00
|
|
|
// Only if this is a tweakless channel will we attempt to verify the
|
|
|
|
// commitment point, as otherwise it has no validity requirements.
|
|
|
|
tweakless := lc.channelState.ChanType.IsTweakless()
|
|
|
|
if !tweakless && commitPoint != nil &&
|
2018-07-12 11:02:53 +02:00
|
|
|
!commitPoint.IsEqual(msg.LocalUnrevokedCommitPoint) {
|
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Errorf("sync failed: remote sent invalid commit point "+
|
|
|
|
"for height %v!",
|
2018-07-12 11:02:53 +02:00
|
|
|
msg.NextLocalCommitHeight)
|
|
|
|
return nil, nil, nil, ErrInvalidLocalUnrevokedCommitPoint
|
|
|
|
}
|
|
|
|
|
2018-02-28 05:04:41 +01:00
|
|
|
return updates, openedCircuits, closedCircuits, nil
|
2017-07-09 01:30:20 +02:00
|
|
|
}
|
|
|
|
|
2024-04-02 05:00:29 +02:00
|
|
|
// computeView takes the given HtlcView, and calculates the balances, filtered
|
2018-02-25 04:16:49 +01:00
|
|
|
// view (settling unsettled HTLCs), commitment weight and feePerKw, after
|
2018-02-25 04:19:46 +01:00
|
|
|
// applying the HTLCs to the latest commitment. The returned balances are the
|
2018-02-25 04:16:49 +01:00
|
|
|
// balances *before* subtracting the commitment fee from the initiator's
|
2024-06-03 18:43:33 +02:00
|
|
|
// balance. It accepts a "dry run" feerate argument to calculate a potential
|
|
|
|
// commitment transaction fee.
|
2018-01-09 16:42:07 +01:00
|
|
|
//
|
2018-02-25 04:16:49 +01:00
|
|
|
// If the updateState boolean is set true, the add and remove heights of the
|
|
|
|
// HTLCs will be set to the next commitment height.
|
2024-04-02 05:00:29 +02:00
|
|
|
func (lc *LightningChannel) computeView(view *HtlcView,
|
2024-07-31 01:44:18 +02:00
|
|
|
whoseCommitChain lntypes.ChannelParty, updateState bool,
|
|
|
|
dryRunFee fn.Option[chainfee.SatPerKWeight]) (lnwire.MilliSatoshi,
|
2024-04-02 05:00:29 +02:00
|
|
|
lnwire.MilliSatoshi, lntypes.WeightUnit, *HtlcView, error) {
|
2018-01-09 16:42:07 +01:00
|
|
|
|
2024-08-09 21:47:58 +02:00
|
|
|
commitChain := lc.commitChains.Local
|
2020-01-06 11:42:02 +01:00
|
|
|
dustLimit := lc.channelState.LocalChanCfg.DustLimit
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommitChain.IsRemote() {
|
2024-08-09 21:47:58 +02:00
|
|
|
commitChain = lc.commitChains.Remote
|
2020-01-06 11:42:02 +01:00
|
|
|
dustLimit = lc.channelState.RemoteChanCfg.DustLimit
|
2018-01-09 16:42:07 +01:00
|
|
|
}
|
|
|
|
|
2018-02-25 04:16:49 +01:00
|
|
|
// Since the fetched htlc view will include all updates added after the
|
|
|
|
// last committed state, we start with the balances reflecting that
|
|
|
|
// state.
|
2018-01-09 16:42:07 +01:00
|
|
|
ourBalance := commitChain.tip().ourBalance
|
|
|
|
theirBalance := commitChain.tip().theirBalance
|
|
|
|
|
|
|
|
// Add the fee from the previous commitment state back to the
|
|
|
|
// initiator's balance, so that the fee can be recalculated and
|
2018-02-25 04:16:49 +01:00
|
|
|
// re-applied in case fee estimation parameters have changed or the
|
|
|
|
// number of outstanding HTLCs has changed.
|
2018-01-09 16:42:07 +01:00
|
|
|
if lc.channelState.IsInitiator {
|
|
|
|
ourBalance += lnwire.NewMSatFromSatoshis(
|
|
|
|
commitChain.tip().fee)
|
|
|
|
} else if !lc.channelState.IsInitiator {
|
|
|
|
theirBalance += lnwire.NewMSatFromSatoshis(
|
|
|
|
commitChain.tip().fee)
|
|
|
|
}
|
|
|
|
nextHeight := commitChain.tip().height + 1
|
|
|
|
|
2019-01-10 12:23:57 +01:00
|
|
|
// Initiate feePerKw to the last committed fee for this chain as we'll
|
|
|
|
// need this to determine which HTLCs are dust, and also the final fee
|
|
|
|
// rate.
|
2024-04-02 05:00:29 +02:00
|
|
|
view.FeePerKw = commitChain.tip().feePerKw
|
2024-05-27 13:36:01 +02:00
|
|
|
view.NextHeight = nextHeight
|
2019-01-10 12:23:57 +01:00
|
|
|
|
2018-02-25 04:16:49 +01:00
|
|
|
// We evaluate the view at this stage, meaning settled and failed HTLCs
|
|
|
|
// will remove their corresponding added HTLCs. The resulting filtered
|
|
|
|
// view will only have Add entries left, making it easy to compare the
|
2019-01-10 12:23:57 +01:00
|
|
|
// channel constraints to the final commitment state. If any fee
|
2019-03-11 00:42:12 +01:00
|
|
|
// updates are found in the logs, the commitment fee rate should be
|
2019-01-10 12:23:57 +01:00
|
|
|
// changed, so we'll also set the feePerKw to this new value.
|
2024-07-20 01:53:58 +02:00
|
|
|
filteredHTLCView, uncommitted, err := lc.evaluateHTLCView(
|
2024-04-02 05:00:29 +02:00
|
|
|
view, &ourBalance, &theirBalance, nextHeight, whoseCommitChain,
|
|
|
|
)
|
2020-02-12 11:10:19 +01:00
|
|
|
if err != nil {
|
|
|
|
return 0, 0, 0, nil, err
|
|
|
|
}
|
2024-07-20 01:53:58 +02:00
|
|
|
|
|
|
|
if updateState {
|
|
|
|
for _, party := range lntypes.BothParties {
|
|
|
|
for _, u := range uncommitted.GetForParty(party) {
|
|
|
|
u.setCommitHeight(whoseCommitChain, nextHeight)
|
|
|
|
|
|
|
|
if whoseCommitChain == lntypes.Local &&
|
|
|
|
u.EntryType == Settle {
|
|
|
|
|
|
|
|
lc.recordSettlement(party, u.Amount)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-02 05:00:29 +02:00
|
|
|
feePerKw := filteredHTLCView.FeePerKw
|
2018-01-09 16:42:07 +01:00
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
// Here we override the view's fee-rate if a dry-run fee-rate was
|
|
|
|
// passed in.
|
|
|
|
if !updateState {
|
|
|
|
feePerKw = dryRunFee.UnwrapOr(feePerKw)
|
|
|
|
}
|
|
|
|
|
2023-12-20 14:28:18 +01:00
|
|
|
// We need to first check ourBalance and theirBalance to be negative
|
|
|
|
// because MilliSathoshi is a unsigned type and can underflow in
|
|
|
|
// `evaluateHTLCView`. This should never happen for views which do not
|
|
|
|
// include new updates (remote or local).
|
|
|
|
if int64(ourBalance) < 0 {
|
|
|
|
err := fmt.Errorf("%w: our balance", ErrBelowChanReserve)
|
|
|
|
return 0, 0, 0, nil, err
|
|
|
|
}
|
|
|
|
if int64(theirBalance) < 0 {
|
|
|
|
err := fmt.Errorf("%w: their balance", ErrBelowChanReserve)
|
|
|
|
return 0, 0, 0, nil, err
|
|
|
|
}
|
|
|
|
|
2018-01-09 16:42:07 +01:00
|
|
|
// Now go through all HTLCs at this stage, to calculate the total
|
|
|
|
// weight, needed to calculate the transaction fee.
|
2024-05-24 15:56:30 +02:00
|
|
|
var totalHtlcWeight lntypes.WeightUnit
|
2024-07-24 23:57:32 +02:00
|
|
|
for _, htlc := range filteredHTLCView.Updates.Local {
|
2021-09-28 17:34:10 +02:00
|
|
|
if HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
lc.channelState.ChanType, false, whoseCommitChain,
|
2020-03-06 16:11:49 +01:00
|
|
|
feePerKw, htlc.Amount.ToSatoshis(), dustLimit,
|
|
|
|
) {
|
2022-02-07 13:58:28 +01:00
|
|
|
|
2018-01-09 16:42:07 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-01-06 11:42:04 +01:00
|
|
|
totalHtlcWeight += input.HTLCWeight
|
2018-01-09 16:42:07 +01:00
|
|
|
}
|
2024-07-24 23:57:32 +02:00
|
|
|
for _, htlc := range filteredHTLCView.Updates.Remote {
|
2021-09-28 17:34:10 +02:00
|
|
|
if HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
lc.channelState.ChanType, true, whoseCommitChain,
|
2020-03-06 16:11:49 +01:00
|
|
|
feePerKw, htlc.Amount.ToSatoshis(), dustLimit,
|
|
|
|
) {
|
2022-02-07 13:58:28 +01:00
|
|
|
|
2018-01-09 16:42:07 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-01-06 11:42:04 +01:00
|
|
|
totalHtlcWeight += input.HTLCWeight
|
2018-01-09 16:42:07 +01:00
|
|
|
}
|
|
|
|
|
2020-03-06 16:11:46 +01:00
|
|
|
totalCommitWeight := CommitWeight(lc.channelState.ChanType) +
|
|
|
|
totalHtlcWeight
|
2020-02-12 11:10:19 +01:00
|
|
|
return ourBalance, theirBalance, totalCommitWeight, filteredHTLCView, nil
|
2018-01-09 16:42:07 +01:00
|
|
|
}
|
|
|
|
|
2024-07-20 01:53:58 +02:00
|
|
|
// recordSettlement updates the lifetime payment flow values in persistent state
|
|
|
|
// of the LightningChannel, adding amt to the total received by the redeemer.
|
|
|
|
func (lc *LightningChannel) recordSettlement(
|
|
|
|
redeemer lntypes.ChannelParty, amt lnwire.MilliSatoshi) {
|
|
|
|
|
|
|
|
if redeemer == lntypes.Local {
|
|
|
|
lc.channelState.TotalMSatReceived += amt
|
|
|
|
} else {
|
|
|
|
lc.channelState.TotalMSatSent += amt
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
// genHtlcSigValidationJobs generates a series of signatures verification jobs
|
|
|
|
// meant to verify all the signatures for HTLC's attached to a newly created
|
|
|
|
// commitment state. The jobs generated are fully populated, and can be sent
|
|
|
|
// directly into the pool of workers.
|
2024-04-25 19:00:42 +02:00
|
|
|
//
|
|
|
|
//nolint:funlen
|
|
|
|
func genHtlcSigValidationJobs(chanState *channeldb.OpenChannel,
|
|
|
|
localCommitmentView *commitment, keyRing *CommitmentKeyRing,
|
|
|
|
htlcSigs []lnwire.Sig, leaseExpiry uint32,
|
2024-04-09 04:48:36 +02:00
|
|
|
leafStore fn.Option[AuxLeafStore], auxSigner fn.Option[AuxSigner],
|
|
|
|
sigBlob fn.Option[tlv.Blob]) ([]VerifyJob, []AuxVerifyJob, error) {
|
2024-04-25 19:00:42 +02:00
|
|
|
|
|
|
|
var (
|
|
|
|
isLocalInitiator = chanState.IsInitiator
|
|
|
|
localChanCfg = chanState.LocalChanCfg
|
|
|
|
chanType = chanState.ChanType
|
|
|
|
)
|
2017-07-30 21:55:27 +02:00
|
|
|
|
|
|
|
txHash := localCommitmentView.txn.TxHash()
|
|
|
|
feePerKw := localCommitmentView.feePerKw
|
2020-03-06 16:11:47 +01:00
|
|
|
sigHashType := HtlcSigHashType(chanType)
|
2017-07-30 21:55:27 +02:00
|
|
|
|
|
|
|
// With the required state generated, we'll create a slice with large
|
|
|
|
// enough capacity to hold verification jobs for all HTLC's in this
|
|
|
|
// view. In the case that we have some dust outputs, then the actual
|
|
|
|
// length will be smaller than the total capacity.
|
2024-04-25 19:01:37 +02:00
|
|
|
numHtlcs := len(localCommitmentView.incomingHTLCs) +
|
|
|
|
len(localCommitmentView.outgoingHTLCs)
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
verifyJobs := make([]VerifyJob, 0, numHtlcs)
|
2024-04-09 04:48:36 +02:00
|
|
|
auxVerifyJobs := make([]AuxVerifyJob, 0, numHtlcs)
|
2017-07-30 21:55:27 +02:00
|
|
|
|
2024-09-02 11:01:07 +02:00
|
|
|
diskCommit := localCommitmentView.toDiskCommit(lntypes.Local)
|
2024-04-25 19:01:37 +02:00
|
|
|
auxResult, err := fn.MapOptionZ(
|
|
|
|
leafStore, func(s AuxLeafStore) fn.Result[CommitDiffAuxResult] {
|
|
|
|
return s.FetchLeavesFromCommit(
|
2024-09-02 11:01:07 +02:00
|
|
|
NewAuxChanState(chanState), *diskCommit,
|
|
|
|
*keyRing,
|
2024-04-25 19:01:37 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
2024-04-09 04:48:36 +02:00
|
|
|
return nil, nil, fmt.Errorf("unable to fetch aux leaves: %w",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a sig blob, then we'll attempt to map that to individual
|
|
|
|
// blobs for each HTLC we might need a signature for.
|
|
|
|
auxHtlcSigs, err := fn.MapOptionZ(
|
|
|
|
auxSigner, func(a AuxSigner) fn.Result[[]fn.Option[tlv.Blob]] {
|
|
|
|
return a.UnpackSigs(sigBlob)
|
|
|
|
},
|
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("error unpacking aux sigs: %w",
|
|
|
|
err)
|
2024-04-25 19:01:37 +02:00
|
|
|
}
|
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
// We'll iterate through each output in the commitment transaction,
|
|
|
|
// populating the sigHash closure function if it's detected to be an
|
|
|
|
// HLTC output. Given the sighash, and the signing key, we'll be able
|
|
|
|
// to validate each signature within the worker pool.
|
|
|
|
i := 0
|
2017-07-31 03:22:38 +02:00
|
|
|
for index := range localCommitmentView.txn.TxOut {
|
2018-01-31 04:55:39 +01:00
|
|
|
var (
|
2018-04-05 02:41:05 +02:00
|
|
|
htlcIndex uint64
|
|
|
|
sigHash func() ([]byte, error)
|
2023-01-17 04:33:21 +01:00
|
|
|
sig input.Signature
|
2024-04-09 04:48:36 +02:00
|
|
|
htlc *paymentDescriptor
|
|
|
|
incoming bool
|
|
|
|
auxLeaf input.AuxTapLeaf
|
2018-04-05 02:41:05 +02:00
|
|
|
err error
|
2018-01-31 04:55:39 +01:00
|
|
|
)
|
2017-07-30 21:55:27 +02:00
|
|
|
|
|
|
|
outputIndex := int32(index)
|
|
|
|
switch {
|
|
|
|
|
2018-04-05 02:41:05 +02:00
|
|
|
// If this output index is found within the incoming HTLC
|
|
|
|
// index, then this means that we need to generate an HTLC
|
|
|
|
// success transaction in order to validate the signature.
|
2024-04-09 04:48:36 +02:00
|
|
|
//nolint:lll
|
2017-07-30 21:55:27 +02:00
|
|
|
case localCommitmentView.incomingHTLCIndex[outputIndex] != nil:
|
2024-04-09 04:48:36 +02:00
|
|
|
htlc = localCommitmentView.incomingHTLCIndex[outputIndex]
|
2017-07-30 21:55:27 +02:00
|
|
|
|
2018-04-05 02:41:05 +02:00
|
|
|
htlcIndex = htlc.HtlcIndex
|
2024-04-09 04:48:36 +02:00
|
|
|
incoming = true
|
2018-04-05 02:41:05 +02:00
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
sigHash = func() ([]byte, error) {
|
|
|
|
op := wire.OutPoint{
|
|
|
|
Hash: txHash,
|
|
|
|
Index: uint32(htlc.localOutputIndex),
|
|
|
|
}
|
|
|
|
|
2020-03-06 16:11:49 +01:00
|
|
|
htlcFee := HtlcSuccessFee(chanType, feePerKw)
|
2017-08-22 08:20:29 +02:00
|
|
|
outputAmt := htlc.Amount.ToSatoshis() - htlcFee
|
2017-07-30 21:55:27 +02:00
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf := fn.ChainOption(func(
|
|
|
|
l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
|
|
|
|
leaves := l.IncomingHtlcLeaves
|
|
|
|
idx := htlc.HtlcIndex
|
|
|
|
return leaves[idx].SecondLevelLeaf
|
|
|
|
})(auxResult.AuxLeaves)
|
|
|
|
|
2020-11-17 12:50:41 +01:00
|
|
|
successTx, err := CreateHtlcSuccessTx(
|
2021-07-15 02:16:13 +02:00
|
|
|
chanType, isLocalInitiator, op,
|
|
|
|
outputAmt, uint32(localChanCfg.CsvDelay),
|
|
|
|
leaseExpiry, keyRing.RevocationKey,
|
2024-04-25 19:01:37 +02:00
|
|
|
keyRing.ToLocalKey, auxLeaf,
|
2020-03-06 16:11:45 +01:00
|
|
|
)
|
2017-07-30 21:55:27 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
htlcAmt := int64(htlc.Amount.ToSatoshis())
|
|
|
|
|
|
|
|
if chanType.IsTaproot() {
|
2023-07-23 17:35:20 +02:00
|
|
|
// TODO(roasbeef): add abstraction in
|
|
|
|
// front
|
|
|
|
prevFetcher := txscript.NewCannedPrevOutputFetcher( //nolint:lll
|
2023-01-20 04:24:48 +01:00
|
|
|
htlc.ourPkScript, htlcAmt,
|
|
|
|
)
|
|
|
|
hashCache := txscript.NewTxSigHashes(
|
|
|
|
successTx, prevFetcher,
|
|
|
|
)
|
|
|
|
tapLeaf := txscript.NewBaseTapLeaf(
|
|
|
|
htlc.ourWitnessScript,
|
|
|
|
)
|
2023-07-23 17:35:20 +02:00
|
|
|
|
|
|
|
return txscript.CalcTapscriptSignaturehash( //nolint:lll
|
|
|
|
hashCache, sigHashType,
|
|
|
|
successTx, 0, prevFetcher,
|
|
|
|
tapLeaf,
|
2023-01-20 04:24:48 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-03-18 18:37:43 +01:00
|
|
|
hashCache := input.NewTxSigHashesV0Only(successTx)
|
2017-07-30 21:55:27 +02:00
|
|
|
sigHash, err := txscript.CalcWitnessSigHash(
|
|
|
|
htlc.ourWitnessScript, hashCache,
|
2020-03-06 16:11:47 +01:00
|
|
|
sigHashType, successTx, 0,
|
2023-01-20 04:24:48 +01:00
|
|
|
htlcAmt,
|
2017-08-22 08:20:29 +02:00
|
|
|
)
|
2017-07-30 21:55:27 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return sigHash, nil
|
|
|
|
}
|
|
|
|
|
2018-03-22 12:52:12 +01:00
|
|
|
// Make sure there are more signatures left.
|
|
|
|
if i >= len(htlcSigs) {
|
2024-04-09 04:48:36 +02:00
|
|
|
return nil, nil, fmt.Errorf("not enough HTLC " +
|
2020-04-14 19:56:05 +02:00
|
|
|
"signatures")
|
2018-03-22 12:52:12 +01:00
|
|
|
}
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
// If this is a taproot channel, then we'll convert it
|
|
|
|
// to a schnorr signature, so we can get correct type
|
|
|
|
// from ToSignature below.
|
|
|
|
if chanType.IsTaproot() {
|
|
|
|
htlcSigs[i].ForceSchnorr()
|
|
|
|
}
|
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
// With the sighash generated, we'll also store the
|
|
|
|
// signature so it can be written to disk if this state
|
|
|
|
// is valid.
|
2018-01-31 04:55:39 +01:00
|
|
|
sig, err = htlcSigs[i].ToSignature()
|
|
|
|
if err != nil {
|
2024-04-09 04:48:36 +02:00
|
|
|
return nil, nil, err
|
2018-01-31 04:55:39 +01:00
|
|
|
}
|
|
|
|
htlc.sig = sig
|
2017-07-30 21:55:27 +02:00
|
|
|
|
|
|
|
// Otherwise, if this is an outgoing HTLC, then we'll need to
|
|
|
|
// generate a timeout transaction so we can verify the
|
|
|
|
// signature presented.
|
2024-04-09 04:48:36 +02:00
|
|
|
//nolint:lll
|
2017-09-25 20:25:58 +02:00
|
|
|
case localCommitmentView.outgoingHTLCIndex[outputIndex] != nil:
|
2024-04-09 04:48:36 +02:00
|
|
|
htlc = localCommitmentView.outgoingHTLCIndex[outputIndex]
|
2017-07-30 21:55:27 +02:00
|
|
|
|
2018-04-05 02:41:05 +02:00
|
|
|
htlcIndex = htlc.HtlcIndex
|
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
sigHash = func() ([]byte, error) {
|
|
|
|
op := wire.OutPoint{
|
|
|
|
Hash: txHash,
|
|
|
|
Index: uint32(htlc.localOutputIndex),
|
|
|
|
}
|
|
|
|
|
2020-03-06 16:11:49 +01:00
|
|
|
htlcFee := HtlcTimeoutFee(chanType, feePerKw)
|
2017-08-22 08:20:29 +02:00
|
|
|
outputAmt := htlc.Amount.ToSatoshis() - htlcFee
|
2017-07-30 21:55:27 +02:00
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf := fn.ChainOption(func(
|
|
|
|
l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
|
|
|
|
leaves := l.OutgoingHtlcLeaves
|
|
|
|
idx := htlc.HtlcIndex
|
|
|
|
return leaves[idx].SecondLevelLeaf
|
|
|
|
})(auxResult.AuxLeaves)
|
|
|
|
|
2020-11-17 12:50:41 +01:00
|
|
|
timeoutTx, err := CreateHtlcTimeoutTx(
|
2021-07-15 02:16:13 +02:00
|
|
|
chanType, isLocalInitiator, op,
|
|
|
|
outputAmt, htlc.Timeout,
|
2023-07-23 17:35:20 +02:00
|
|
|
uint32(localChanCfg.CsvDelay),
|
|
|
|
leaseExpiry, keyRing.RevocationKey,
|
2024-04-25 19:01:37 +02:00
|
|
|
keyRing.ToLocalKey, auxLeaf,
|
2017-07-30 21:55:27 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
htlcAmt := int64(htlc.Amount.ToSatoshis())
|
|
|
|
|
|
|
|
if chanType.IsTaproot() {
|
2023-07-23 17:35:20 +02:00
|
|
|
// TODO(roasbeef): add abstraction in
|
|
|
|
// front
|
|
|
|
prevFetcher := txscript.NewCannedPrevOutputFetcher( //nolint:lll
|
2023-01-20 04:24:48 +01:00
|
|
|
htlc.ourPkScript, htlcAmt,
|
|
|
|
)
|
|
|
|
hashCache := txscript.NewTxSigHashes(
|
|
|
|
timeoutTx, prevFetcher,
|
|
|
|
)
|
|
|
|
tapLeaf := txscript.NewBaseTapLeaf(
|
|
|
|
htlc.ourWitnessScript,
|
|
|
|
)
|
2023-07-23 17:35:20 +02:00
|
|
|
|
|
|
|
return txscript.CalcTapscriptSignaturehash( //nolint:lll
|
|
|
|
hashCache, sigHashType,
|
|
|
|
timeoutTx, 0, prevFetcher,
|
|
|
|
tapLeaf,
|
2023-01-20 04:24:48 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2023-07-23 17:35:20 +02:00
|
|
|
hashCache := input.NewTxSigHashesV0Only(
|
|
|
|
timeoutTx,
|
|
|
|
)
|
2017-07-30 21:55:27 +02:00
|
|
|
sigHash, err := txscript.CalcWitnessSigHash(
|
|
|
|
htlc.ourWitnessScript, hashCache,
|
2020-03-06 16:11:47 +01:00
|
|
|
sigHashType, timeoutTx, 0,
|
2023-01-20 04:24:48 +01:00
|
|
|
htlcAmt,
|
2017-07-30 21:55:27 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return sigHash, nil
|
|
|
|
}
|
|
|
|
|
2018-03-22 12:52:12 +01:00
|
|
|
// Make sure there are more signatures left.
|
|
|
|
if i >= len(htlcSigs) {
|
2024-04-09 04:48:36 +02:00
|
|
|
return nil, nil, fmt.Errorf("not enough HTLC " +
|
2020-04-14 19:56:05 +02:00
|
|
|
"signatures")
|
2018-03-22 12:52:12 +01:00
|
|
|
}
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
// If this is a taproot channel, then we'll convert it
|
|
|
|
// to a schnorr signature, so we can get correct type
|
|
|
|
// from ToSignature below.
|
|
|
|
if chanType.IsTaproot() {
|
|
|
|
htlcSigs[i].ForceSchnorr()
|
|
|
|
}
|
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
// With the sighash generated, we'll also store the
|
|
|
|
// signature so it can be written to disk if this state
|
|
|
|
// is valid.
|
2018-01-31 04:55:39 +01:00
|
|
|
sig, err = htlcSigs[i].ToSignature()
|
|
|
|
if err != nil {
|
2024-04-09 04:48:36 +02:00
|
|
|
return nil, nil, err
|
2018-01-31 04:55:39 +01:00
|
|
|
}
|
2023-01-20 04:24:48 +01:00
|
|
|
|
2018-01-31 04:55:39 +01:00
|
|
|
htlc.sig = sig
|
2017-07-30 21:55:27 +02:00
|
|
|
|
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
verifyJobs = append(verifyJobs, VerifyJob{
|
|
|
|
HtlcIndex: htlcIndex,
|
|
|
|
PubKey: keyRing.RemoteHtlcKey,
|
|
|
|
Sig: sig,
|
|
|
|
SigHash: sigHash,
|
2017-07-30 21:55:27 +02:00
|
|
|
})
|
|
|
|
|
2024-04-09 04:48:36 +02:00
|
|
|
if len(auxHtlcSigs) > i {
|
|
|
|
auxSig := auxHtlcSigs[i]
|
|
|
|
auxVerifyJob := NewAuxVerifyJob(
|
|
|
|
auxSig, *keyRing, incoming,
|
|
|
|
newAuxHtlcDescriptor(htlc),
|
|
|
|
localCommitmentView.customBlob, auxLeaf,
|
|
|
|
)
|
|
|
|
|
|
|
|
if htlc.CustomRecords == nil {
|
|
|
|
htlc.CustomRecords = make(lnwire.CustomRecords)
|
|
|
|
}
|
|
|
|
|
|
|
|
// As this HTLC has a custom signature associated with
|
|
|
|
// it, store it in the custom records map so we can
|
|
|
|
// write to disk later.
|
|
|
|
sigType := htlcCustomSigType.TypeVal()
|
|
|
|
htlc.CustomRecords[uint64(sigType)] = auxSig.UnwrapOr(
|
|
|
|
nil,
|
|
|
|
)
|
|
|
|
|
|
|
|
auxVerifyJobs = append(auxVerifyJobs, auxVerifyJob)
|
|
|
|
}
|
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
2018-03-22 13:23:23 +01:00
|
|
|
// If we received a number of HTLC signatures that doesn't match our
|
|
|
|
// commitment, we'll return an error now.
|
|
|
|
if len(htlcSigs) != i {
|
2024-04-09 04:48:36 +02:00
|
|
|
return nil, nil, fmt.Errorf("number of htlc sig mismatch. "+
|
2018-03-22 13:23:23 +01:00
|
|
|
"Expected %v sigs, got %v", i, len(htlcSigs))
|
|
|
|
}
|
|
|
|
|
2024-04-09 04:48:36 +02:00
|
|
|
return verifyJobs, auxVerifyJobs, nil
|
2017-07-30 21:55:27 +02:00
|
|
|
}
|
|
|
|
|
2018-01-09 03:50:24 +01:00
|
|
|
// InvalidCommitSigError is a struct that implements the error interface to
|
2018-04-05 02:41:05 +02:00
|
|
|
// report a failure to validate a commitment signature for a remote peer.
|
2018-01-09 03:50:24 +01:00
|
|
|
// We'll use the items in this struct to generate a rich error message for the
|
|
|
|
// remote peer when we receive an invalid signature from it. Doing so can
|
|
|
|
// greatly aide in debugging cross implementation issues.
|
|
|
|
type InvalidCommitSigError struct {
|
|
|
|
commitHeight uint64
|
|
|
|
|
|
|
|
commitSig []byte
|
|
|
|
|
|
|
|
sigHash []byte
|
|
|
|
|
|
|
|
commitTx []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns a detailed error string including the exact transaction that
|
|
|
|
// caused an invalid commitment signature.
|
|
|
|
func (i *InvalidCommitSigError) Error() string {
|
|
|
|
return fmt.Sprintf("rejected commitment: commit_height=%v, "+
|
2018-04-05 02:41:05 +02:00
|
|
|
"invalid_commit_sig=%x, commit_tx=%x, sig_hash=%x", i.commitHeight,
|
2018-01-09 03:50:24 +01:00
|
|
|
i.commitSig[:], i.commitTx, i.sigHash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
// A compile time flag to ensure that InvalidCommitSigError implements the
|
|
|
|
// error interface.
|
|
|
|
var _ error = (*InvalidCommitSigError)(nil)
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
// InvalidPartialCommitSigError is used when we encounter an invalid musig2
|
|
|
|
// partial signature.
|
|
|
|
type InvalidPartialCommitSigError struct {
|
|
|
|
InvalidCommitSigError
|
|
|
|
|
|
|
|
*invalidPartialSigError
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns a detailed error string including the exact transaction that
|
|
|
|
// caused an invalid partial commit sig signature.
|
|
|
|
func (i *InvalidPartialCommitSigError) Error() string {
|
|
|
|
return fmt.Sprintf("rejected commitment: commit_height=%v, "+
|
|
|
|
"commit_tx=%x -- %v", i.commitHeight, i.commitTx,
|
|
|
|
i.invalidPartialSigError)
|
|
|
|
}
|
|
|
|
|
2018-07-31 10:29:12 +02:00
|
|
|
// InvalidHtlcSigError is a struct that implements the error interface to
|
2018-04-05 02:41:05 +02:00
|
|
|
// report a failure to validate an htlc signature from a remote peer. We'll use
|
|
|
|
// the items in this struct to generate a rich error message for the remote
|
|
|
|
// peer when we receive an invalid signature from it. Doing so can greatly aide
|
|
|
|
// in debugging across implementation issues.
|
|
|
|
type InvalidHtlcSigError struct {
|
|
|
|
commitHeight uint64
|
|
|
|
|
|
|
|
htlcSig []byte
|
|
|
|
|
|
|
|
htlcIndex uint64
|
|
|
|
|
|
|
|
sigHash []byte
|
|
|
|
|
|
|
|
commitTx []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns a detailed error string including the exact transaction that
|
|
|
|
// caused an invalid htlc signature.
|
|
|
|
func (i *InvalidHtlcSigError) Error() string {
|
|
|
|
return fmt.Sprintf("rejected commitment: commit_height=%v, "+
|
|
|
|
"invalid_htlc_sig=%x, commit_tx=%x, sig_hash=%x", i.commitHeight,
|
|
|
|
i.htlcSig, i.commitTx, i.sigHash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
// A compile time flag to ensure that InvalidCommitSigError implements the
|
|
|
|
// error interface.
|
|
|
|
var _ error = (*InvalidCommitSigError)(nil)
|
|
|
|
|
2016-11-23 09:36:55 +01:00
|
|
|
// ReceiveNewCommitment process a signature for a new commitment state sent by
|
2017-10-19 02:36:28 +02:00
|
|
|
// the remote party. This method should be called in response to the
|
2016-07-06 02:01:55 +02:00
|
|
|
// remote party initiating a new change, or when the remote party sends a
|
|
|
|
// signature fully accepting a new state we've initiated. If we are able to
|
2016-10-26 14:25:42 +02:00
|
|
|
// successfully validate the signature, then the generated commitment is added
|
2016-07-06 02:01:55 +02:00
|
|
|
// to our local commitment chain. Once we send a revocation for our prior
|
|
|
|
// state, then this newly added commitment becomes our current accepted channel
|
|
|
|
// state.
|
2023-07-23 17:35:20 +02:00
|
|
|
//
|
|
|
|
//nolint:funlen
|
2023-01-20 03:27:07 +01:00
|
|
|
func (lc *LightningChannel) ReceiveNewCommitment(commitSigs *CommitSigs) error {
|
2016-11-21 04:18:30 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2019-09-24 14:33:59 +02:00
|
|
|
// Check for empty commit sig. Because of a previously existing bug, it
|
|
|
|
// is possible that we receive an empty commit sig from nodes running an
|
|
|
|
// older version. This is a relaxation of the spec, but it is still
|
|
|
|
// possible to handle it. To not break any channels with those older
|
|
|
|
// nodes, we just log the event. This check is also not totally
|
|
|
|
// reliable, because it could be that we've sent out a new sig, but the
|
|
|
|
// remote hasn't received it yet. We could then falsely assume that they
|
|
|
|
// should add our updates to their remote commitment tx.
|
2024-07-31 01:44:18 +02:00
|
|
|
if !lc.oweCommitment(lntypes.Remote) {
|
2019-09-24 14:33:59 +02:00
|
|
|
lc.log.Warnf("empty commit sig message received")
|
|
|
|
}
|
|
|
|
|
2017-10-19 02:36:28 +02:00
|
|
|
// Determine the last update on the local log that has been locked in.
|
2024-08-09 23:52:21 +02:00
|
|
|
localACKedIndex := lc.commitChains.Remote.tail().messageIndices.Local
|
2024-08-09 21:47:58 +02:00
|
|
|
localHtlcIndex := lc.commitChains.Remote.tail().ourHtlcIndex
|
2017-10-19 02:36:28 +02:00
|
|
|
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
// Ensure that this new local update from the remote node respects all
|
|
|
|
// the constraints we specified during initial channel setup. If not,
|
|
|
|
// then we'll abort the channel as they've violated our constraints.
|
2023-11-05 11:29:34 +01:00
|
|
|
//
|
|
|
|
// We do not enforce the FeeBuffer here because when we reach this
|
|
|
|
// point all updates will have to get locked-in (we already received
|
|
|
|
// the UpdateAddHTLC msg from our peer prior to receiving the
|
|
|
|
// commit-sig).
|
2019-03-09 01:05:28 +01:00
|
|
|
err := lc.validateCommitmentSanity(
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.logIndex, localACKedIndex, lntypes.Local,
|
2024-07-31 01:44:18 +02:00
|
|
|
NoBuffer, nil, nil,
|
2019-03-09 01:05:28 +01:00
|
|
|
)
|
2016-11-23 09:36:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-22 01:50:20 +02:00
|
|
|
// We're receiving a new commitment which attempts to extend our local
|
2017-07-30 21:55:27 +02:00
|
|
|
// commitment chain height by one, so fetch the proper commitment point
|
2017-10-19 02:36:28 +02:00
|
|
|
// as this will be needed to derive the keys required to construct the
|
2017-07-30 21:55:27 +02:00
|
|
|
// commitment.
|
2016-07-06 02:01:55 +02:00
|
|
|
nextHeight := lc.currentHeight + 1
|
2017-07-30 21:55:27 +02:00
|
|
|
commitSecret, err := lc.channelState.RevocationProducer.AtIndex(nextHeight)
|
2016-07-06 02:01:55 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-01-16 15:47:43 +01:00
|
|
|
commitPoint := input.ComputeCommitmentPoint(commitSecret[:])
|
2019-09-17 04:06:19 +02:00
|
|
|
keyRing := DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
commitPoint, lntypes.Local, lc.channelState.ChanType,
|
2020-01-06 11:42:02 +01:00
|
|
|
&lc.channelState.LocalChanCfg, &lc.channelState.RemoteChanCfg,
|
2019-03-09 01:05:28 +01:00
|
|
|
)
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
// With the current commitment point re-calculated, construct the new
|
2017-11-29 14:20:02 +01:00
|
|
|
// commitment view which includes all the entries (pending or committed)
|
|
|
|
// we know of in the remote node's HTLC log, but only our local changes
|
|
|
|
// up to the last change the remote node has ACK'd.
|
2017-11-10 07:53:18 +01:00
|
|
|
localCommitmentView, err := lc.fetchCommitmentView(
|
2024-07-31 01:44:18 +02:00
|
|
|
lntypes.Local, localACKedIndex, localHtlcIndex,
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.logIndex, lc.updateLogs.Remote.htlcCounter,
|
2017-11-10 07:53:18 +01:00
|
|
|
keyRing,
|
|
|
|
)
|
2016-07-06 02:01:55 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-01-05 22:01:42 +01:00
|
|
|
}
|
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Tracef("extending local chain to height %v, "+
|
2017-11-10 07:53:18 +01:00
|
|
|
"local_log=%v, remote_log=%v",
|
2019-09-24 13:12:53 +02:00
|
|
|
localCommitmentView.height,
|
2024-08-09 22:00:59 +02:00
|
|
|
localACKedIndex, lc.updateLogs.Remote.logIndex)
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Tracef("local chain: our_balance=%v, "+
|
|
|
|
"their_balance=%v, commit_tx: %v",
|
2016-07-13 02:35:51 +02:00
|
|
|
localCommitmentView.ourBalance, localCommitmentView.theirBalance,
|
2024-07-25 16:18:00 +02:00
|
|
|
lnutils.SpewLogClosure(localCommitmentView.txn))
|
2016-07-13 02:35:51 +02:00
|
|
|
|
2024-04-09 04:48:36 +02:00
|
|
|
var auxSigBlob fn.Option[tlv.Blob]
|
|
|
|
if commitSigs.AuxSigBlob != nil {
|
|
|
|
auxSigBlob = fn.Some(commitSigs.AuxSigBlob)
|
|
|
|
}
|
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
// As an optimization, we'll generate a series of jobs for the worker
|
2023-01-20 04:24:48 +01:00
|
|
|
// pool to verify each of the HTLC signatures presented. Once
|
2017-07-30 21:55:27 +02:00
|
|
|
// generated, we'll submit these jobs to the worker pool.
|
2021-07-15 02:16:13 +02:00
|
|
|
var leaseExpiry uint32
|
|
|
|
if lc.channelState.ChanType.HasLeaseExpiration() {
|
|
|
|
leaseExpiry = lc.channelState.ThawHeight
|
|
|
|
}
|
2024-04-09 04:48:36 +02:00
|
|
|
verifyJobs, auxVerifyJobs, err := genHtlcSigValidationJobs(
|
2024-04-25 19:00:42 +02:00
|
|
|
lc.channelState, localCommitmentView, keyRing,
|
2024-04-09 04:48:36 +02:00
|
|
|
commitSigs.HtlcSigs, leaseExpiry, lc.leafStore, lc.auxSigner,
|
|
|
|
auxSigBlob,
|
2018-01-31 04:55:39 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
cancelChan := make(chan struct{})
|
|
|
|
verifyResps := lc.sigPool.SubmitVerifyBatch(verifyJobs, cancelChan)
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
localCommitTx := localCommitmentView.txn
|
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
// While the HTLC verification jobs are proceeding asynchronously,
|
|
|
|
// we'll ensure that the newly constructed commitment state has a valid
|
2016-07-06 02:01:55 +02:00
|
|
|
// signature.
|
2023-01-20 04:24:48 +01:00
|
|
|
//
|
|
|
|
// To do that we'll, construct the sighash of the commitment
|
|
|
|
// transaction corresponding to this newly proposed state update. If
|
|
|
|
// this is a taproot channel, then in order to validate the sighash,
|
|
|
|
// we'll need to call into the relevant tapscript methods.
|
|
|
|
if lc.channelState.ChanType.IsTaproot() {
|
|
|
|
localSession := lc.musigSessions.LocalSession
|
|
|
|
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
partialSig, err := commitSigs.PartialSig.UnwrapOrErrV(
|
|
|
|
errNoPartialSig,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
// As we want to ensure we never write nonces to disk, we'll
|
|
|
|
// use the shachain state to generate a nonce for our next
|
|
|
|
// local state. Similar to generateRevocation, we do height + 2
|
|
|
|
// (next height + 1) here, as this is for the _next_ local
|
|
|
|
// state, and we're about to accept height + 1.
|
|
|
|
localCtrNonce := WithLocalCounterNonce(
|
|
|
|
nextHeight+1, lc.taprootNonceProducer,
|
|
|
|
)
|
|
|
|
nextVerificationNonce, err := localSession.VerifyCommitSig(
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
localCommitTx, &partialSig, localCtrNonce,
|
2023-01-20 04:24:48 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
close(cancelChan)
|
2022-02-23 14:48:00 +01:00
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
var sigErr invalidPartialSigError
|
|
|
|
if errors.As(err, &sigErr) {
|
|
|
|
// If we fail to validate their commitment
|
|
|
|
// signature, we'll generate a special error to
|
|
|
|
// send over the protocol. We'll include the
|
|
|
|
// exact signature and commitment we failed to
|
|
|
|
// verify against in order to aide debugging.
|
|
|
|
var txBytes bytes.Buffer
|
2023-07-23 17:35:20 +02:00
|
|
|
_ = localCommitTx.Serialize(&txBytes)
|
2023-01-20 04:24:48 +01:00
|
|
|
return &InvalidPartialCommitSigError{
|
|
|
|
invalidPartialSigError: &sigErr,
|
|
|
|
InvalidCommitSigError: InvalidCommitSigError{ //nolint:lll
|
|
|
|
commitHeight: nextHeight,
|
|
|
|
commitTx: txBytes.Bytes(),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2018-01-09 03:50:24 +01:00
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we have the next verification nonce for our local
|
|
|
|
// session, we'll refresh it to yield a new session we'll use
|
|
|
|
// for the next incoming signature.
|
|
|
|
newLocalSession, err := lc.musigSessions.LocalSession.Refresh(
|
|
|
|
nextVerificationNonce,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
lc.musigSessions.LocalSession = newLocalSession
|
|
|
|
} else {
|
|
|
|
multiSigScript := lc.signDesc.WitnessScript
|
|
|
|
prevFetcher := txscript.NewCannedPrevOutputFetcher(
|
|
|
|
multiSigScript, int64(lc.channelState.Capacity),
|
|
|
|
)
|
|
|
|
hashCache := txscript.NewTxSigHashes(localCommitTx, prevFetcher)
|
|
|
|
|
|
|
|
sigHash, err := txscript.CalcWitnessSigHash(
|
|
|
|
multiSigScript, hashCache, txscript.SigHashAll,
|
|
|
|
localCommitTx, 0, int64(lc.channelState.Capacity),
|
|
|
|
)
|
|
|
|
if err != nil {
|
2023-07-23 17:35:20 +02:00
|
|
|
// TODO(roasbeef): fetchview has already mutated the
|
|
|
|
// HTLCs... * need to either roll-back, or make pure
|
2023-01-20 04:24:48 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
verifyKey := lc.channelState.RemoteChanCfg.MultiSigKey.PubKey
|
|
|
|
|
|
|
|
cSig, err := commitSigs.CommitSig.ToSignature()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !cSig.Verify(sigHash, verifyKey) {
|
|
|
|
close(cancelChan)
|
|
|
|
|
|
|
|
// If we fail to validate their commitment signature,
|
|
|
|
// we'll generate a special error to send over the
|
|
|
|
// protocol. We'll include the exact signature and
|
|
|
|
// commitment we failed to verify against in order to
|
|
|
|
// aide debugging.
|
|
|
|
var txBytes bytes.Buffer
|
2023-07-23 17:35:20 +02:00
|
|
|
_ = localCommitTx.Serialize(&txBytes)
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
return &InvalidCommitSigError{
|
|
|
|
commitHeight: nextHeight,
|
2023-07-23 17:35:20 +02:00
|
|
|
commitSig: commitSigs.CommitSig.ToSignatureBytes(), //nolint:lll
|
2023-01-20 04:24:48 +01:00
|
|
|
sigHash: sigHash,
|
|
|
|
commitTx: txBytes.Bytes(),
|
|
|
|
}
|
2018-01-09 03:50:24 +01:00
|
|
|
}
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2017-07-30 21:55:27 +02:00
|
|
|
// With the primary commitment transaction validated, we'll check each
|
|
|
|
// of the HTLC validation jobs.
|
|
|
|
for i := 0; i < len(verifyJobs); i++ {
|
|
|
|
// In the case that a single signature is invalid, we'll exit
|
|
|
|
// early and cancel all the outstanding verification jobs.
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
htlcErr := <-verifyResps
|
|
|
|
if htlcErr != nil {
|
|
|
|
close(cancelChan)
|
2018-04-05 02:41:05 +02:00
|
|
|
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
sig, err := lnwire.NewSigFromSignature(
|
|
|
|
htlcErr.Sig,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sigHash, err := htlcErr.SigHash()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-05 02:41:05 +02:00
|
|
|
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
var txBytes bytes.Buffer
|
2024-08-09 22:00:59 +02:00
|
|
|
err = localCommitTx.Serialize(&txBytes)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 01:35:07 +01:00
|
|
|
return &InvalidHtlcSigError{
|
|
|
|
commitHeight: nextHeight,
|
|
|
|
htlcSig: sig.ToSignatureBytes(),
|
|
|
|
htlcIndex: htlcErr.HtlcIndex,
|
|
|
|
sigHash: sigHash,
|
|
|
|
commitTx: txBytes.Bytes(),
|
2017-11-12 00:00:45 +01:00
|
|
|
}
|
2017-07-30 21:55:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-09 04:48:36 +02:00
|
|
|
// Now that we know all the normal sigs are valid, we'll also verify
|
|
|
|
// the aux jobs, if any exist.
|
|
|
|
err = fn.MapOptionZ(lc.auxSigner, func(a AuxSigner) error {
|
|
|
|
return a.VerifySecondLevelSigs(
|
|
|
|
NewAuxChanState(lc.channelState), localCommitTx,
|
|
|
|
auxVerifyJobs,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to validate aux sigs: %w", err)
|
|
|
|
}
|
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// The signature checks out, so we can now add the new commitment to
|
2023-01-20 04:24:48 +01:00
|
|
|
// our local commitment chain. For regular channels, we can just
|
|
|
|
// serialize the ECDSA sig. For taproot channels, we'll serialize the
|
|
|
|
// partial sig that includes the nonce that was used for signing.
|
|
|
|
if lc.channelState.ChanType.IsTaproot() {
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
partialSig, err := commitSigs.PartialSig.UnwrapOrErrV(
|
|
|
|
errNoPartialSig,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
var sigBytes [lnwire.PartialSigWithNonceLen]byte
|
|
|
|
b := bytes.NewBuffer(sigBytes[0:0])
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
if err := partialSig.Encode(b); err != nil {
|
2023-01-20 04:24:48 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
localCommitmentView.sig = sigBytes[:]
|
|
|
|
} else {
|
2023-07-23 17:35:20 +02:00
|
|
|
localCommitmentView.sig = commitSigs.CommitSig.ToSignatureBytes() //nolint:lll
|
2023-01-20 04:24:48 +01:00
|
|
|
}
|
|
|
|
|
2024-08-09 21:47:58 +02:00
|
|
|
lc.commitChains.Local.addCommitment(localCommitmentView)
|
2016-01-05 22:01:42 +01:00
|
|
|
|
|
|
|
return nil
|
2015-12-17 05:58:01 +01:00
|
|
|
}
|
|
|
|
|
2021-08-10 22:56:45 +02:00
|
|
|
// IsChannelClean returns true if neither side has pending commitments, neither
|
|
|
|
// side has HTLC's, and all updates are locked in irrevocably. Internally, it
|
|
|
|
// utilizes the oweCommitment function by calling it for local and remote
|
|
|
|
// evaluation. We check if we have a pending commitment for our local state
|
|
|
|
// since this function may be called by sub-systems that are not the link (e.g.
|
|
|
|
// the rpcserver), and the ReceiveNewCommitment & RevokeCurrentCommitment calls
|
|
|
|
// are not atomic, even though link processing ensures no updates can happen in
|
|
|
|
// between.
|
|
|
|
func (lc *LightningChannel) IsChannelClean() bool {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
// Check whether we have a pending commitment for our local state.
|
2024-08-09 21:47:58 +02:00
|
|
|
if lc.commitChains.Local.hasUnackedCommitment() {
|
2021-08-10 22:56:45 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether our counterparty has a pending commitment for their
|
|
|
|
// state.
|
2024-08-09 21:47:58 +02:00
|
|
|
if lc.commitChains.Remote.hasUnackedCommitment() {
|
2021-08-10 22:56:45 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// We call ActiveHtlcs to ensure there are no HTLCs on either
|
|
|
|
// commitment.
|
|
|
|
if len(lc.channelState.ActiveHtlcs()) != 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now check that both local and remote commitments are signing the
|
|
|
|
// same updates.
|
2024-07-31 01:44:18 +02:00
|
|
|
if lc.oweCommitment(lntypes.Local) {
|
2021-08-10 22:56:45 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
if lc.oweCommitment(lntypes.Remote) {
|
2021-08-10 22:56:45 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we reached this point, the channel has no HTLCs and both
|
|
|
|
// commitments sign the same updates.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-09-24 14:33:59 +02:00
|
|
|
// OweCommitment returns a boolean value reflecting whether we need to send
|
|
|
|
// out a commitment signature because there are outstanding local updates and/or
|
|
|
|
// updates in the local commit tx that aren't reflected in the remote commit tx
|
|
|
|
// yet.
|
2022-03-19 17:59:19 +01:00
|
|
|
func (lc *LightningChannel) OweCommitment() bool {
|
2019-09-24 14:33:59 +02:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
return lc.oweCommitment(lntypes.Local)
|
2019-09-24 14:33:59 +02:00
|
|
|
}
|
|
|
|
|
2023-11-28 05:26:21 +01:00
|
|
|
// NeedCommitment returns a boolean value reflecting whether we are waiting on
|
|
|
|
// a commitment signature because there are outstanding remote updates and/or
|
|
|
|
// updates in the remote commit tx that aren't reflected in the local commit tx
|
|
|
|
// yet.
|
|
|
|
func (lc *LightningChannel) NeedCommitment() bool {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
return lc.oweCommitment(lntypes.Remote)
|
2023-11-28 05:26:21 +01:00
|
|
|
}
|
|
|
|
|
2019-09-24 14:33:59 +02:00
|
|
|
// oweCommitment is the internal version of OweCommitment. This function expects
|
|
|
|
// to be executed with a lock held.
|
2024-07-31 01:44:18 +02:00
|
|
|
func (lc *LightningChannel) oweCommitment(issuer lntypes.ChannelParty) bool {
|
2019-09-24 14:33:59 +02:00
|
|
|
var (
|
|
|
|
remoteUpdatesPending, localUpdatesPending bool
|
|
|
|
|
2024-08-09 21:47:58 +02:00
|
|
|
lastLocalCommit = lc.commitChains.Local.tip()
|
|
|
|
lastRemoteCommit = lc.commitChains.Remote.tip()
|
2019-09-24 14:33:59 +02:00
|
|
|
|
|
|
|
perspective string
|
|
|
|
)
|
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
if issuer.IsLocal() {
|
2019-09-24 14:33:59 +02:00
|
|
|
perspective = "local"
|
|
|
|
|
|
|
|
// There are local updates pending if our local update log is
|
|
|
|
// not in sync with our remote commitment tx.
|
2024-08-09 22:00:59 +02:00
|
|
|
localUpdatesPending = lc.updateLogs.Local.logIndex !=
|
2024-08-09 23:52:21 +02:00
|
|
|
lastRemoteCommit.messageIndices.Local
|
2019-09-24 14:33:59 +02:00
|
|
|
|
|
|
|
// There are remote updates pending if their remote commitment
|
|
|
|
// tx (our local commitment tx) contains updates that we don't
|
|
|
|
// have added to our remote commitment tx yet.
|
2024-08-09 23:52:21 +02:00
|
|
|
remoteUpdatesPending = lastLocalCommit.messageIndices.Remote !=
|
|
|
|
lastRemoteCommit.messageIndices.Remote
|
2019-09-24 14:33:59 +02:00
|
|
|
} else {
|
|
|
|
perspective = "remote"
|
|
|
|
|
|
|
|
// There are local updates pending (local updates from the
|
|
|
|
// perspective of the remote party) if the remote party has
|
|
|
|
// updates to their remote tx pending for which they haven't
|
|
|
|
// signed yet.
|
2024-08-09 22:00:59 +02:00
|
|
|
localUpdatesPending = lc.updateLogs.Remote.logIndex !=
|
2024-08-09 23:52:21 +02:00
|
|
|
lastLocalCommit.messageIndices.Remote
|
2019-09-24 14:33:59 +02:00
|
|
|
|
|
|
|
// There are remote updates pending (remote updates from the
|
|
|
|
// perspective of the remote party) if we have updates on our
|
|
|
|
// remote commitment tx that they haven't added to theirs yet.
|
2024-08-09 23:52:21 +02:00
|
|
|
remoteUpdatesPending = lastRemoteCommit.messageIndices.Local !=
|
|
|
|
lastLocalCommit.messageIndices.Local
|
2019-09-24 14:33:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// If any of the conditions above is true, we owe a commitment
|
|
|
|
// signature.
|
|
|
|
oweCommitment := localUpdatesPending || remoteUpdatesPending
|
|
|
|
|
|
|
|
lc.log.Tracef("%v owes commit: %v (local updates: %v, "+
|
|
|
|
"remote updates %v)", perspective, oweCommitment,
|
|
|
|
localUpdatesPending, remoteUpdatesPending)
|
|
|
|
|
|
|
|
return oweCommitment
|
|
|
|
}
|
|
|
|
|
2023-12-12 04:18:57 +01:00
|
|
|
// NumPendingUpdates returns the number of updates originated by whoseUpdates
|
|
|
|
// that have not been committed to the *tip* of whoseCommit's commitment chain.
|
|
|
|
func (lc *LightningChannel) NumPendingUpdates(whoseUpdates lntypes.ChannelParty,
|
|
|
|
whoseCommit lntypes.ChannelParty) uint64 {
|
|
|
|
|
2019-04-10 13:10:25 +02:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
2023-12-12 04:18:57 +01:00
|
|
|
lastCommit := lc.commitChains.GetForParty(whoseCommit).tip()
|
|
|
|
updateIndex := lc.updateLogs.GetForParty(whoseUpdates).logIndex
|
2019-04-10 13:10:25 +02:00
|
|
|
|
2023-12-12 04:18:57 +01:00
|
|
|
return updateIndex - lastCommit.messageIndices.GetForParty(whoseUpdates)
|
2019-04-10 13:10:25 +02:00
|
|
|
}
|
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// RevokeCurrentCommitment revokes the next lowest unrevoked commitment
|
|
|
|
// transaction in the local commitment chain. As a result the edge of our
|
|
|
|
// revocation window is extended by one, and the tail of our local commitment
|
|
|
|
// chain is advanced by a single commitment. This now lowest unrevoked
|
2018-01-17 03:09:32 +01:00
|
|
|
// commitment becomes our currently accepted state within the channel. This
|
|
|
|
// method also returns the set of HTLC's currently active within the commitment
|
2022-05-10 12:33:44 +02:00
|
|
|
// transaction and the htlcs the were resolved. This return value allows callers
|
|
|
|
// to act once an HTLC has been locked into our commitment transaction.
|
|
|
|
func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck,
|
|
|
|
[]channeldb.HTLC, map[uint64]bool, error) {
|
|
|
|
|
2016-11-21 04:18:30 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2017-07-09 01:30:20 +02:00
|
|
|
revocationMsg, err := lc.generateRevocation(lc.currentHeight)
|
2016-07-06 02:01:55 +02:00
|
|
|
if err != nil {
|
2022-05-10 12:33:44 +02:00
|
|
|
return nil, nil, nil, err
|
2015-12-31 07:36:01 +01:00
|
|
|
}
|
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Tracef("revoking height=%v, now at height=%v",
|
2024-08-09 21:47:58 +02:00
|
|
|
lc.commitChains.Local.tail().height,
|
2017-11-10 08:01:00 +01:00
|
|
|
lc.currentHeight+1)
|
2016-07-13 02:35:51 +02:00
|
|
|
|
2016-07-06 02:01:55 +02:00
|
|
|
// Advance our tail, as we've revoked our previous state.
|
2024-08-09 21:47:58 +02:00
|
|
|
lc.commitChains.Local.advanceTail()
|
2016-07-06 02:01:55 +02:00
|
|
|
lc.currentHeight++
|
|
|
|
|
2016-09-07 19:45:27 +02:00
|
|
|
// Additionally, generate a channel delta for this state transition for
|
|
|
|
// persistent storage.
|
2024-08-09 21:47:58 +02:00
|
|
|
chainTail := lc.commitChains.Local.tail()
|
2024-07-31 01:44:18 +02:00
|
|
|
newCommitment := chainTail.toDiskCommit(lntypes.Local)
|
2020-01-03 15:53:51 +01:00
|
|
|
|
|
|
|
// Get the unsigned acked remotes updates that are currently in memory.
|
|
|
|
// We need them after a restart to sync our remote commitment with what
|
|
|
|
// is committed locally.
|
|
|
|
unsignedAckedUpdates := lc.getUnsignedAckedUpdates()
|
|
|
|
|
2022-05-10 12:33:44 +02:00
|
|
|
finalHtlcs, err := lc.channelState.UpdateCommitment(
|
2020-01-03 15:53:51 +01:00
|
|
|
newCommitment, unsignedAckedUpdates,
|
|
|
|
)
|
2016-09-07 19:45:27 +02:00
|
|
|
if err != nil {
|
2022-05-10 12:33:44 +02:00
|
|
|
return nil, nil, nil, err
|
2016-09-07 19:45:27 +02:00
|
|
|
}
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Tracef("state transition accepted: "+
|
2020-01-03 15:53:51 +01:00
|
|
|
"our_balance=%v, their_balance=%v, unsigned_acked_updates=%v",
|
2019-09-24 13:12:53 +02:00
|
|
|
chainTail.ourBalance,
|
2020-01-03 15:53:51 +01:00
|
|
|
chainTail.theirBalance,
|
|
|
|
len(unsignedAckedUpdates))
|
2016-07-13 02:35:51 +02:00
|
|
|
|
2017-07-30 21:25:41 +02:00
|
|
|
revocationMsg.ChanID = lnwire.NewChanIDFromOutPoint(
|
2024-01-29 22:19:15 +01:00
|
|
|
lc.channelState.FundingOutpoint,
|
2017-07-30 21:25:41 +02:00
|
|
|
)
|
|
|
|
|
2022-05-10 12:33:44 +02:00
|
|
|
return revocationMsg, newCommitment.Htlcs, finalHtlcs, nil
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReceiveRevocation processes a revocation sent by the remote party for the
|
|
|
|
// lowest unrevoked commitment within their commitment chain. We receive a
|
|
|
|
// revocation either during the initial session negotiation wherein revocation
|
|
|
|
// windows are extended, or in response to a state update that we initiate. If
|
|
|
|
// successful, then the remote commitment chain is advanced by a single
|
2018-02-28 05:04:41 +01:00
|
|
|
// commitment, and a log compaction is attempted.
|
|
|
|
//
|
|
|
|
// The returned values correspond to:
|
2022-08-22 20:58:42 +02:00
|
|
|
// 1. The forwarding package corresponding to the remote commitment height
|
|
|
|
// that was revoked.
|
2024-08-16 23:35:49 +02:00
|
|
|
// 2. The set of HTLCs present on the current valid commitment transaction
|
2022-08-22 20:58:42 +02:00
|
|
|
// for the remote party.
|
2018-02-28 05:01:41 +01:00
|
|
|
func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) (
|
2024-08-16 23:35:49 +02:00
|
|
|
*channeldb.FwdPkg, []channeldb.HTLC, error) {
|
2018-02-28 05:01:41 +01:00
|
|
|
|
2016-11-21 04:18:30 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2016-12-14 15:01:48 +01:00
|
|
|
// Ensure that the new pre-image can be placed in preimage store.
|
|
|
|
store := lc.channelState.RevocationStore
|
2017-07-30 21:32:24 +02:00
|
|
|
revocation, err := chainhash.NewHash(revMsg.Revocation[:])
|
|
|
|
if err != nil {
|
2024-08-16 23:35:49 +02:00
|
|
|
return nil, nil, err
|
2015-12-31 07:36:01 +01:00
|
|
|
}
|
2017-07-30 21:32:24 +02:00
|
|
|
if err := store.AddNextEntry(revocation); err != nil {
|
2024-08-16 23:35:49 +02:00
|
|
|
return nil, nil, err
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2015-12-31 07:36:01 +01:00
|
|
|
|
2017-07-30 21:32:24 +02:00
|
|
|
// Verify that if we use the commitment point computed based off of the
|
|
|
|
// revealed secret to derive a revocation key with our revocation base
|
|
|
|
// point, then it matches the current revocation of the remote party.
|
|
|
|
currentCommitPoint := lc.channelState.RemoteCurrentRevocation
|
2019-01-16 15:47:43 +01:00
|
|
|
derivedCommitPoint := input.ComputeCommitmentPoint(revMsg.Revocation[:])
|
2017-07-30 21:32:24 +02:00
|
|
|
if !derivedCommitPoint.IsEqual(currentCommitPoint) {
|
2024-08-16 23:35:49 +02:00
|
|
|
return nil, nil, fmt.Errorf("revocation key mismatch")
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2017-07-30 21:32:24 +02:00
|
|
|
// Now that we've verified that the prior commitment has been properly
|
|
|
|
// revoked, we'll advance the revocation state we track for the remote
|
|
|
|
// party: the new current revocation is what was previously the next
|
|
|
|
// revocation, and the new next revocation is set to the key included
|
|
|
|
// in the message.
|
|
|
|
lc.channelState.RemoteCurrentRevocation = lc.channelState.RemoteNextRevocation
|
|
|
|
lc.channelState.RemoteNextRevocation = revMsg.NextRevocationKey
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2019-09-24 13:12:53 +02:00
|
|
|
lc.log.Tracef("remote party accepted state transition, revoked height "+
|
|
|
|
"%v, now at %v",
|
2024-08-09 21:47:58 +02:00
|
|
|
lc.commitChains.Remote.tail().height,
|
|
|
|
lc.commitChains.Remote.tail().height+1)
|
2016-07-13 02:32:32 +02:00
|
|
|
|
2018-02-28 05:01:41 +01:00
|
|
|
// Add one to the remote tail since this will be height *after* we write
|
|
|
|
// the revocation to disk, the local height will remain unchanged.
|
2024-08-09 21:47:58 +02:00
|
|
|
remoteChainTail := lc.commitChains.Remote.tail().height + 1
|
|
|
|
localChainTail := lc.commitChains.Local.tail().height
|
2016-07-13 02:32:32 +02:00
|
|
|
|
2018-05-28 12:59:41 +02:00
|
|
|
source := lc.ShortChanID()
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2018-02-28 05:01:41 +01:00
|
|
|
// Determine the set of htlcs that can be forwarded as a result of
|
|
|
|
// having received the revocation. We will simultaneously construct the
|
|
|
|
// log updates and payment descriptors, allowing us to persist the log
|
|
|
|
// updates to disk and optimistically buffer the forwarding package in
|
|
|
|
// memory.
|
|
|
|
var (
|
2024-08-16 23:35:49 +02:00
|
|
|
addUpdatesToForward []channeldb.LogUpdate
|
|
|
|
settleFailUpdatesToForward []channeldb.LogUpdate
|
2018-02-28 05:01:41 +01:00
|
|
|
)
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2018-02-28 05:01:41 +01:00
|
|
|
var addIndex, settleFailIndex uint16
|
2024-08-09 22:00:59 +02:00
|
|
|
for e := lc.updateLogs.Remote.Front(); e != nil; e = e.Next() {
|
2024-05-01 00:23:50 +02:00
|
|
|
pd := e.Value
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2019-01-10 12:23:56 +01:00
|
|
|
// Fee updates are local to this particular channel, and should
|
|
|
|
// never be forwarded.
|
|
|
|
if pd.EntryType == FeeUpdate {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-02-28 05:01:41 +01:00
|
|
|
if pd.isForwarded {
|
2016-07-17 03:12:36 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-05-28 12:59:41 +02:00
|
|
|
// For each type of HTLC, we will only consider forwarding it if
|
|
|
|
// both of the remote and local heights are non-zero. If either
|
|
|
|
// of these values is zero, it has yet to be committed in both
|
|
|
|
// the local and remote chains.
|
2024-07-17 00:35:58 +02:00
|
|
|
committedAdd := pd.addCommitHeights.Remote > 0 &&
|
|
|
|
pd.addCommitHeights.Local > 0
|
|
|
|
committedRmv := pd.removeCommitHeights.Remote > 0 &&
|
|
|
|
pd.removeCommitHeights.Local > 0
|
2016-07-22 01:50:20 +02:00
|
|
|
|
2018-02-28 05:01:41 +01:00
|
|
|
// Using the height of the remote and local commitments,
|
|
|
|
// preemptively compute whether or not to forward this HTLC for
|
|
|
|
// the case in which this in an Add HTLC, or if this is a
|
|
|
|
// Settle, Fail, or MalformedFail.
|
2024-07-17 00:35:58 +02:00
|
|
|
shouldFwdAdd := remoteChainTail == pd.addCommitHeights.Remote &&
|
|
|
|
localChainTail >= pd.addCommitHeights.Local
|
|
|
|
shouldFwdRmv := remoteChainTail ==
|
|
|
|
pd.removeCommitHeights.Remote &&
|
|
|
|
localChainTail >= pd.removeCommitHeights.Local
|
2018-02-28 05:01:41 +01:00
|
|
|
|
lnwallet: only forward freshly locked in HTLC's in ReceiveRevocation
This commit fixes a nasty bug that has been lingering within lnd, and
has been noticed due to the added retransmission logic. Before this
commit, upon a restart, if we had an active HTLC and received a new
commitment update, then we would re-forward ALL active HTLC’s. This
could at times lead to a nasty cycle:
* We re-forward an HTLC already processed.
* We then notice that the time-lock is out of date (retransmitted
HTLC), so we go to fail it.
* This is detected as a replay attack, so we send an
UpdateMalformedHTLC
* This second failure ends up creating a nil entry in the log,
leading to a panic.
* Remote party disconnects.
* Upon reconnect we send again as we need to retransmit the changes,
this goes on forever.
In order to fix this, we now ensure that we only forward HTLC’s that
have been newly locked in at this next state. With this, we now avoid
the loop described above, and also ensure that we don’t accidentally
attempt an HTLC replay attack on our selves.
Fixes #528.
Fixes #545.
2018-01-09 04:45:36 +01:00
|
|
|
// We'll only forward any new HTLC additions iff, it's "freshly
|
|
|
|
// locked in". Meaning that the HTLC was only *just* considered
|
|
|
|
// locked-in at this new state. By doing this we ensure that we
|
|
|
|
// don't re-forward any already processed HTLC's after a
|
|
|
|
// restart.
|
2018-02-28 05:01:41 +01:00
|
|
|
switch {
|
2018-05-28 12:59:41 +02:00
|
|
|
case pd.EntryType == Add && committedAdd && shouldFwdAdd:
|
2018-02-28 05:01:41 +01:00
|
|
|
// Construct a reference specifying the location that
|
|
|
|
// this forwarded Add will be written in the forwarding
|
|
|
|
// package constructed at this remote height.
|
|
|
|
pd.SourceRef = &channeldb.AddRef{
|
|
|
|
Height: remoteChainTail,
|
|
|
|
Index: addIndex,
|
|
|
|
}
|
|
|
|
addIndex++
|
|
|
|
|
|
|
|
pd.isForwarded = true
|
2024-08-16 23:35:49 +02:00
|
|
|
|
|
|
|
// At this point we put the update into our list of
|
|
|
|
// updates that we will eventually put into the
|
|
|
|
// FwdPkg at this height.
|
|
|
|
addUpdatesToForward = append(
|
2024-06-15 01:30:28 +02:00
|
|
|
addUpdatesToForward, pd.toLogUpdate(),
|
2024-08-16 23:35:49 +02:00
|
|
|
)
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
|
2018-05-28 12:59:41 +02:00
|
|
|
case pd.EntryType != Add && committedRmv && shouldFwdRmv:
|
2018-02-28 05:01:41 +01:00
|
|
|
// Construct a reference specifying the location that
|
|
|
|
// this forwarded Settle/Fail will be written in the
|
|
|
|
// forwarding package constructed at this remote height.
|
|
|
|
pd.DestRef = &channeldb.SettleFailRef{
|
2018-05-28 12:59:41 +02:00
|
|
|
Source: source,
|
2018-02-28 05:01:41 +01:00
|
|
|
Height: remoteChainTail,
|
|
|
|
Index: settleFailIndex,
|
|
|
|
}
|
|
|
|
settleFailIndex++
|
|
|
|
|
|
|
|
pd.isForwarded = true
|
2024-08-16 23:35:49 +02:00
|
|
|
|
|
|
|
// At this point we put the update into our list of
|
|
|
|
// updates that we will eventually put into the
|
|
|
|
// FwdPkg at this height.
|
|
|
|
settleFailUpdatesToForward = append(
|
2024-06-15 01:30:28 +02:00
|
|
|
settleFailUpdatesToForward, pd.toLogUpdate(),
|
2024-08-16 23:35:49 +02:00
|
|
|
)
|
2018-02-28 05:01:41 +01:00
|
|
|
|
|
|
|
default:
|
2024-06-15 01:30:28 +02:00
|
|
|
// The update was not "freshly locked in" so we will
|
|
|
|
// ignore it as we construct the forwarding package.
|
lnwallet: only forward freshly locked in HTLC's in ReceiveRevocation
This commit fixes a nasty bug that has been lingering within lnd, and
has been noticed due to the added retransmission logic. Before this
commit, upon a restart, if we had an active HTLC and received a new
commitment update, then we would re-forward ALL active HTLC’s. This
could at times lead to a nasty cycle:
* We re-forward an HTLC already processed.
* We then notice that the time-lock is out of date (retransmitted
HTLC), so we go to fail it.
* This is detected as a replay attack, so we send an
UpdateMalformedHTLC
* This second failure ends up creating a nil entry in the log,
leading to a panic.
* Remote party disconnects.
* Upon reconnect we send again as we need to retransmit the changes,
this goes on forever.
In order to fix this, we now ensure that we only forward HTLC’s that
have been newly locked in at this next state. With this, we now avoid
the loop described above, and also ensure that we don’t accidentally
attempt an HTLC replay attack on our selves.
Fixes #528.
Fixes #545.
2018-01-09 04:45:36 +01:00
|
|
|
continue
|
|
|
|
}
|
2016-07-22 01:50:20 +02:00
|
|
|
}
|
|
|
|
|
2020-07-02 08:16:04 +02:00
|
|
|
// We use the remote commitment chain's tip as it will soon become the tail
|
|
|
|
// once advanceTail is called.
|
2024-08-09 23:52:21 +02:00
|
|
|
remoteMessageIndex := lc.commitChains.Remote.tip().messageIndices.Local
|
|
|
|
localMessageIndex := lc.commitChains.Local.tail().messageIndices.Local
|
2020-07-02 08:16:04 +02:00
|
|
|
|
|
|
|
localPeerUpdates := lc.unsignedLocalUpdates(
|
2024-06-15 02:45:36 +02:00
|
|
|
remoteMessageIndex, localMessageIndex,
|
2020-07-02 08:16:04 +02:00
|
|
|
)
|
|
|
|
|
2018-02-28 05:01:41 +01:00
|
|
|
// Now that we have gathered the set of HTLCs to forward, separated by
|
|
|
|
// type, construct a forwarding package using the height that the remote
|
|
|
|
// commitment chain will be extended after persisting the revocation.
|
|
|
|
fwdPkg := channeldb.NewFwdPkg(
|
2024-08-16 23:35:49 +02:00
|
|
|
source, remoteChainTail, addUpdatesToForward,
|
|
|
|
settleFailUpdatesToForward,
|
2018-02-28 05:01:41 +01:00
|
|
|
)
|
|
|
|
|
2022-04-08 01:36:26 +02:00
|
|
|
// We will soon be saving the current remote commitment to revocation
|
|
|
|
// log bucket, which is `lc.channelState.RemoteCommitment`. After that,
|
|
|
|
// the `RemoteCommitment` will be replaced with a newer version found
|
|
|
|
// in `CommitDiff`. Thus we need to compute the output indexes here
|
|
|
|
// before the change since the indexes are meant for the current,
|
|
|
|
// revoked remote commitment.
|
|
|
|
ourOutputIndex, theirOutputIndex, err := findOutputIndexesFromRemote(
|
2024-03-17 21:53:38 +01:00
|
|
|
revocation, lc.channelState, lc.leafStore,
|
2022-04-08 01:36:26 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
2024-08-16 23:35:49 +02:00
|
|
|
return nil, nil, err
|
2022-04-08 01:36:26 +02:00
|
|
|
}
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
// Now that we have a new verification nonce from them, we can refresh
|
|
|
|
// our remote musig2 session which allows us to create another state.
|
|
|
|
if lc.channelState.ChanType.IsTaproot() {
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
localNonce, err := revMsg.LocalNonce.UnwrapOrErrV(errNoNonce)
|
|
|
|
if err != nil {
|
2024-08-16 23:35:49 +02:00
|
|
|
return nil, nil, err
|
2023-01-20 04:24:48 +01:00
|
|
|
}
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
|
|
|
|
session, err := lc.musigSessions.RemoteSession.Refresh(
|
2023-01-20 04:24:48 +01:00
|
|
|
&musig2.Nonces{
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
PubNonce: localNonce,
|
2023-01-20 04:24:48 +01:00
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
2024-08-16 23:35:49 +02:00
|
|
|
return nil, nil, err
|
2023-01-20 04:24:48 +01:00
|
|
|
}
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
|
|
|
|
lc.musigSessions.RemoteSession = session
|
2023-01-20 04:24:48 +01:00
|
|
|
}
|
|
|
|
|
2018-02-28 05:01:41 +01:00
|
|
|
// At this point, the revocation has been accepted, and we've rotated
|
|
|
|
// the current revocation key+hash for the remote party. Therefore we
|
|
|
|
// sync now to ensure the revocation producer state is consistent with
|
|
|
|
// the current commitment height and also to advance the on-disk
|
|
|
|
// commitment chain.
|
2022-04-08 01:36:26 +02:00
|
|
|
err = lc.channelState.AdvanceCommitChainTail(
|
|
|
|
fwdPkg, localPeerUpdates,
|
|
|
|
ourOutputIndex, theirOutputIndex,
|
|
|
|
)
|
2018-02-28 05:01:41 +01:00
|
|
|
if err != nil {
|
2024-08-16 23:35:49 +02:00
|
|
|
return nil, nil, err
|
2018-02-28 05:01:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Since they revoked the current lowest height in their commitment
|
|
|
|
// chain, we can advance their chain by a single commitment.
|
2024-08-09 21:47:58 +02:00
|
|
|
lc.commitChains.Remote.advanceTail()
|
2018-02-28 05:01:41 +01:00
|
|
|
|
2017-02-25 01:09:21 +01:00
|
|
|
// As we've just completed a new state transition, attempt to see if we
|
|
|
|
// can remove any entries from the update log which have been removed
|
|
|
|
// from the PoV of both commitment chains.
|
2019-03-09 01:05:28 +01:00
|
|
|
compactLogs(
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local, lc.updateLogs.Remote, localChainTail,
|
2019-03-09 01:05:28 +01:00
|
|
|
remoteChainTail,
|
|
|
|
)
|
2016-07-22 01:50:20 +02:00
|
|
|
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 02:23:26 +02:00
|
|
|
remoteHTLCs := lc.channelState.RemoteCommitment.Htlcs
|
|
|
|
|
2024-08-16 23:35:49 +02:00
|
|
|
return fwdPkg, remoteHTLCs, nil
|
2015-12-31 07:36:01 +01:00
|
|
|
}
|
|
|
|
|
2018-02-28 05:01:41 +01:00
|
|
|
// LoadFwdPkgs loads any pending log updates from disk and returns the payment
|
|
|
|
// descriptors to be processed by the link.
|
|
|
|
func (lc *LightningChannel) LoadFwdPkgs() ([]*channeldb.FwdPkg, error) {
|
|
|
|
return lc.channelState.LoadFwdPkgs()
|
|
|
|
}
|
|
|
|
|
2018-07-27 12:22:15 +02:00
|
|
|
// AckAddHtlcs sets a bit in the FwdFilter of a forwarding package belonging to
|
|
|
|
// this channel, that corresponds to the given AddRef. This method also succeeds
|
|
|
|
// if no forwarding package is found.
|
|
|
|
func (lc *LightningChannel) AckAddHtlcs(addRef channeldb.AddRef) error {
|
|
|
|
return lc.channelState.AckAddHtlcs(addRef)
|
|
|
|
}
|
|
|
|
|
|
|
|
// AckSettleFails sets a bit in the SettleFailFilter of a forwarding package
|
|
|
|
// belonging to this channel, that corresponds to the given SettleFailRef. This
|
|
|
|
// method also succeeds if no forwarding package is found.
|
|
|
|
func (lc *LightningChannel) AckSettleFails(
|
|
|
|
settleFailRefs ...channeldb.SettleFailRef) error {
|
|
|
|
|
|
|
|
return lc.channelState.AckSettleFails(settleFailRefs...)
|
|
|
|
}
|
|
|
|
|
2018-02-28 05:01:41 +01:00
|
|
|
// SetFwdFilter writes the forwarding decision for a given remote commitment
|
|
|
|
// height.
|
|
|
|
func (lc *LightningChannel) SetFwdFilter(height uint64,
|
|
|
|
fwdFilter *channeldb.PkgFilter) error {
|
|
|
|
|
|
|
|
return lc.channelState.SetFwdFilter(height, fwdFilter)
|
|
|
|
}
|
|
|
|
|
2020-08-19 16:52:44 +02:00
|
|
|
// RemoveFwdPkgs permanently deletes the forwarding package at the given heights.
|
|
|
|
func (lc *LightningChannel) RemoveFwdPkgs(heights ...uint64) error {
|
|
|
|
return lc.channelState.RemoveFwdPkgs(heights...)
|
2018-02-28 05:01:41 +01:00
|
|
|
}
|
|
|
|
|
2017-07-30 21:32:24 +02:00
|
|
|
// NextRevocationKey returns the commitment point for the _next_ commitment
|
2017-02-25 01:09:21 +01:00
|
|
|
// height. The pubkey returned by this function is required by the remote party
|
2018-04-18 04:03:27 +02:00
|
|
|
// along with their revocation base to extend our commitment chain with a
|
2017-07-30 21:32:24 +02:00
|
|
|
// new commitment.
|
2017-07-31 03:22:38 +02:00
|
|
|
func (lc *LightningChannel) NextRevocationKey() (*btcec.PublicKey, error) {
|
2017-02-25 01:09:21 +01:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
nextHeight := lc.currentHeight + 1
|
|
|
|
revocation, err := lc.channelState.RevocationProducer.AtIndex(nextHeight)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-01-16 15:47:43 +01:00
|
|
|
return input.ComputeCommitmentPoint(revocation[:]), nil
|
2017-07-30 21:32:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// InitNextRevocation inserts the passed commitment point as the _next_
|
2017-09-25 20:25:58 +02:00
|
|
|
// revocation to be used when creating a new commitment state for the remote
|
2017-07-30 21:32:24 +02:00
|
|
|
// party. This function MUST be called before the channel can accept or propose
|
|
|
|
// any new states.
|
|
|
|
func (lc *LightningChannel) InitNextRevocation(revKey *btcec.PublicKey) error {
|
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
|
|
|
return lc.channelState.InsertNextRevocation(revKey)
|
2017-02-25 01:09:21 +01:00
|
|
|
}
|
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
// AddHTLC is a wrapper of the `addHTLC` function which always enforces the
|
|
|
|
// FeeBuffer on the local balance if being the initiator of the channel. This
|
|
|
|
// method should be called when preparing to send an outgoing HTLC.
|
2018-02-28 05:04:41 +01:00
|
|
|
//
|
|
|
|
// The additional openKey argument corresponds to the incoming CircuitKey of the
|
|
|
|
// committed circuit for this HTLC. This value should never be nil.
|
|
|
|
//
|
|
|
|
// NOTE: It is okay for sourceRef to be nil when unit testing the wallet.
|
|
|
|
func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC,
|
2022-11-18 12:15:22 +01:00
|
|
|
openKey *models.CircuitKey) (uint64, error) {
|
2018-02-28 05:01:41 +01:00
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
return lc.addHTLC(htlc, openKey, FeeBuffer)
|
|
|
|
}
|
|
|
|
|
|
|
|
// addHTLC adds an HTLC to the state machine's local update log. It provides
|
|
|
|
// the ability to enforce a buffer on the local balance when we are the
|
|
|
|
// initiator of the channel. This is useful when checking the edge cases of a
|
|
|
|
// channel state e.g. the BOLT 03 test vectors.
|
|
|
|
//
|
|
|
|
// The additional openKey argument corresponds to the incoming CircuitKey of the
|
|
|
|
// committed circuit for this HTLC. This value should never be nil.
|
|
|
|
//
|
|
|
|
// NOTE: It is okay for sourceRef to be nil when unit testing the wallet.
|
|
|
|
func (lc *LightningChannel) addHTLC(htlc *lnwire.UpdateAddHTLC,
|
|
|
|
openKey *models.CircuitKey, buffer BufferType) (uint64, error) {
|
|
|
|
|
2016-11-21 04:18:30 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2021-06-22 13:56:07 +02:00
|
|
|
pd := lc.htlcAddDescriptor(htlc, openKey)
|
2023-11-05 11:29:34 +01:00
|
|
|
if err := lc.validateAddHtlc(pd, buffer); err != nil {
|
2021-06-22 13:56:07 +02:00
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.appendHtlc(pd)
|
2021-06-22 13:56:07 +02:00
|
|
|
|
|
|
|
return pd.HtlcIndex, nil
|
|
|
|
}
|
|
|
|
|
2021-09-28 17:34:10 +02:00
|
|
|
// GetDustSum takes in a boolean that determines which commitment to evaluate
|
|
|
|
// the dust sum on. The return value is the sum of dust on the desired
|
|
|
|
// commitment tx.
|
|
|
|
//
|
|
|
|
// NOTE: This over-estimates the dust exposure.
|
2024-07-31 01:44:18 +02:00
|
|
|
func (lc *LightningChannel) GetDustSum(whoseCommit lntypes.ChannelParty,
|
2024-06-03 18:43:33 +02:00
|
|
|
dryRunFee fn.Option[chainfee.SatPerKWeight]) lnwire.MilliSatoshi {
|
|
|
|
|
2021-09-28 17:34:10 +02:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
var dustSum lnwire.MilliSatoshi
|
|
|
|
|
|
|
|
dustLimit := lc.channelState.LocalChanCfg.DustLimit
|
|
|
|
commit := lc.channelState.LocalCommitment
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsRemote() {
|
2021-09-28 17:34:10 +02:00
|
|
|
// Calculate dust sum on the remote's commitment.
|
|
|
|
dustLimit = lc.channelState.RemoteChanCfg.DustLimit
|
|
|
|
commit = lc.channelState.RemoteCommitment
|
|
|
|
}
|
|
|
|
|
|
|
|
chanType := lc.channelState.ChanType
|
|
|
|
feeRate := chainfee.SatPerKWeight(commit.FeePerKw)
|
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
// Optionally use the dry-run fee-rate.
|
|
|
|
feeRate = dryRunFee.UnwrapOr(feeRate)
|
|
|
|
|
2021-09-28 17:34:10 +02:00
|
|
|
// Grab all of our HTLCs and evaluate against the dust limit.
|
2024-08-09 22:00:59 +02:00
|
|
|
for e := lc.updateLogs.Local.Front(); e != nil; e = e.Next() {
|
2024-05-01 00:23:50 +02:00
|
|
|
pd := e.Value
|
2021-09-28 17:34:10 +02:00
|
|
|
if pd.EntryType != Add {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
amt := pd.Amount.ToSatoshis()
|
|
|
|
|
|
|
|
// If the satoshi amount is under the dust limit, add the msat
|
|
|
|
// amount to the dust sum.
|
|
|
|
if HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, false, whoseCommit, feeRate, amt, dustLimit,
|
2021-09-28 17:34:10 +02:00
|
|
|
) {
|
2022-02-07 13:58:28 +01:00
|
|
|
|
2021-09-28 17:34:10 +02:00
|
|
|
dustSum += pd.Amount
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Grab all of their HTLCs and evaluate against the dust limit.
|
2024-08-09 22:00:59 +02:00
|
|
|
for e := lc.updateLogs.Remote.Front(); e != nil; e = e.Next() {
|
2024-05-01 00:23:50 +02:00
|
|
|
pd := e.Value
|
2021-09-28 17:34:10 +02:00
|
|
|
if pd.EntryType != Add {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
amt := pd.Amount.ToSatoshis()
|
|
|
|
|
|
|
|
// If the satoshi amount is under the dust limit, add the msat
|
|
|
|
// amount to the dust sum.
|
|
|
|
if HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, true, whoseCommit, feeRate,
|
|
|
|
amt, dustLimit,
|
2021-09-28 17:34:10 +02:00
|
|
|
) {
|
2022-02-07 13:58:28 +01:00
|
|
|
|
2021-09-28 17:34:10 +02:00
|
|
|
dustSum += pd.Amount
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return dustSum
|
|
|
|
}
|
|
|
|
|
2021-06-22 13:56:08 +02:00
|
|
|
// MayAddOutgoingHtlc validates whether we can add an outgoing htlc to this
|
2021-10-19 09:37:47 +02:00
|
|
|
// channel. We don't have a circuit for this htlc, because we just want to test
|
|
|
|
// that we have slots for a potential htlc so we use a "mock" htlc to validate
|
|
|
|
// a potential commitment state with one more outgoing htlc. If a zero htlc
|
|
|
|
// amount is provided, we'll attempt to add the smallest possible htlc to the
|
|
|
|
// channel (either the minimum htlc, or 1 sat).
|
|
|
|
func (lc *LightningChannel) MayAddOutgoingHtlc(amt lnwire.MilliSatoshi) error {
|
2021-06-22 13:56:08 +02:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2021-10-19 09:37:47 +02:00
|
|
|
var mockHtlcAmt lnwire.MilliSatoshi
|
|
|
|
switch {
|
|
|
|
// If the caller specifically set an amount, we use it.
|
|
|
|
case amt != 0:
|
|
|
|
mockHtlcAmt = amt
|
|
|
|
|
|
|
|
// In absence of a specific amount, we want to use minimum htlc value
|
|
|
|
// for the channel. However certain implementations may set this value
|
|
|
|
// to zero, so we only use this value if it is non-zero.
|
|
|
|
case lc.channelState.LocalChanCfg.MinHTLC != 0:
|
|
|
|
mockHtlcAmt = lc.channelState.LocalChanCfg.MinHTLC
|
|
|
|
|
|
|
|
// As a last resort, we just add a non-zero amount.
|
|
|
|
default:
|
2021-07-07 01:37:20 +02:00
|
|
|
mockHtlcAmt++
|
|
|
|
}
|
|
|
|
|
2021-06-22 13:56:08 +02:00
|
|
|
// Create a "mock" outgoing htlc, using the smallest amount we can add
|
|
|
|
// to the commitment so that we validate commitment slots rather than
|
|
|
|
// available balance, since our actual htlc amount is unknown at this
|
|
|
|
// stage.
|
|
|
|
pd := lc.htlcAddDescriptor(
|
|
|
|
&lnwire.UpdateAddHTLC{
|
2021-07-07 01:37:20 +02:00
|
|
|
Amount: mockHtlcAmt,
|
2021-06-22 13:56:08 +02:00
|
|
|
},
|
2022-11-18 12:15:22 +01:00
|
|
|
&models.CircuitKey{},
|
2021-06-22 13:56:08 +02:00
|
|
|
)
|
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
// Enforce the FeeBuffer because we are evaluating whether we can add
|
|
|
|
// another htlc to the channel state.
|
|
|
|
if err := lc.validateAddHtlc(pd, FeeBuffer); err != nil {
|
2021-06-22 13:56:08 +02:00
|
|
|
lc.log.Debugf("May add outgoing htlc rejected: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-22 13:56:07 +02:00
|
|
|
// htlcAddDescriptor returns a payment descriptor for the htlc and open key
|
|
|
|
// provided to add to our local update log.
|
|
|
|
func (lc *LightningChannel) htlcAddDescriptor(htlc *lnwire.UpdateAddHTLC,
|
2024-06-15 01:30:28 +02:00
|
|
|
openKey *models.CircuitKey) *paymentDescriptor {
|
2021-06-22 13:56:07 +02:00
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
return &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: htlc.ChanID,
|
2018-02-28 05:04:41 +01:00
|
|
|
EntryType: Add,
|
|
|
|
RHash: PaymentHash(htlc.PaymentHash),
|
|
|
|
Timeout: htlc.Expiry,
|
|
|
|
Amount: htlc.Amount,
|
2024-08-09 22:00:59 +02:00
|
|
|
LogIndex: lc.updateLogs.Local.logIndex,
|
|
|
|
HtlcIndex: lc.updateLogs.Local.htlcCounter,
|
2024-06-15 02:14:25 +02:00
|
|
|
OnionBlob: htlc.OnionBlob,
|
2018-02-28 05:04:41 +01:00
|
|
|
OpenCircuitKey: openKey,
|
2024-04-02 14:46:14 +02:00
|
|
|
BlindingPoint: htlc.BlindingPoint,
|
2024-04-16 12:29:15 +02:00
|
|
|
CustomRecords: htlc.CustomRecords.Copy(),
|
2015-12-31 07:36:01 +01:00
|
|
|
}
|
2021-06-22 13:56:07 +02:00
|
|
|
}
|
2015-12-31 07:36:01 +01:00
|
|
|
|
2021-06-22 13:56:07 +02:00
|
|
|
// validateAddHtlc validates the addition of an outgoing htlc to our local and
|
|
|
|
// remote commitments.
|
2024-06-15 01:30:28 +02:00
|
|
|
func (lc *LightningChannel) validateAddHtlc(pd *paymentDescriptor,
|
2023-11-05 11:29:34 +01:00
|
|
|
buffer BufferType) error {
|
2018-02-25 04:19:24 +01:00
|
|
|
// Make sure adding this HTLC won't violate any of the constraints we
|
2020-02-19 12:27:41 +01:00
|
|
|
// must keep on the commitment transactions.
|
2024-08-09 23:52:21 +02:00
|
|
|
remoteACKedIndex := lc.commitChains.Local.tail().messageIndices.Remote
|
2020-02-19 12:27:41 +01:00
|
|
|
|
|
|
|
// First we'll check whether this HTLC can be added to the remote
|
|
|
|
// commitment transaction without violation any of the constraints.
|
2018-02-25 04:19:24 +01:00
|
|
|
err := lc.validateCommitmentSanity(
|
2024-08-09 22:00:59 +02:00
|
|
|
remoteACKedIndex, lc.updateLogs.Local.logIndex, lntypes.Remote,
|
2023-11-05 11:29:34 +01:00
|
|
|
buffer, pd, nil,
|
2018-02-25 04:19:24 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
2021-06-22 13:56:07 +02:00
|
|
|
return err
|
2017-11-29 14:20:02 +01:00
|
|
|
}
|
|
|
|
|
2020-02-19 12:27:41 +01:00
|
|
|
// We must also check whether it can be added to our own commitment
|
|
|
|
// transaction, or the remote node will refuse to sign. This is not
|
|
|
|
// totally bullet proof, as the remote might be adding updates
|
|
|
|
// concurrently, but if we fail this check there is for sure not
|
|
|
|
// possible for us to add the HTLC.
|
|
|
|
err = lc.validateCommitmentSanity(
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.logIndex, lc.updateLogs.Local.logIndex,
|
2024-07-31 01:44:18 +02:00
|
|
|
lntypes.Local, buffer, pd, nil,
|
2020-02-19 12:27:41 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
2021-06-22 13:56:07 +02:00
|
|
|
return err
|
2020-02-19 12:27:41 +01:00
|
|
|
}
|
|
|
|
|
2021-06-22 13:56:07 +02:00
|
|
|
return nil
|
2016-07-22 01:50:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReceiveHTLC adds an HTLC to the state machine's remote update log. This
|
|
|
|
// method should be called in response to receiving a new HTLC from the remote
|
|
|
|
// party.
|
2024-04-02 04:56:50 +02:00
|
|
|
func (lc *LightningChannel) ReceiveHTLC(htlc *lnwire.UpdateAddHTLC) (uint64,
|
|
|
|
error) {
|
|
|
|
|
2016-11-21 04:18:30 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
if htlc.ID != lc.updateLogs.Remote.htlcCounter {
|
2024-05-27 13:36:01 +02:00
|
|
|
return 0, fmt.Errorf("ID %d on HTLC add does not match "+
|
|
|
|
"expected next ID %d", htlc.ID,
|
|
|
|
lc.updateLogs.Remote.htlcCounter)
|
2017-10-24 09:48:52 +02:00
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
pd := &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: htlc.ChanID,
|
2023-11-06 21:36:31 +01:00
|
|
|
EntryType: Add,
|
|
|
|
RHash: PaymentHash(htlc.PaymentHash),
|
|
|
|
Timeout: htlc.Expiry,
|
|
|
|
Amount: htlc.Amount,
|
2024-08-09 22:00:59 +02:00
|
|
|
LogIndex: lc.updateLogs.Remote.logIndex,
|
|
|
|
HtlcIndex: lc.updateLogs.Remote.htlcCounter,
|
2024-06-15 02:14:25 +02:00
|
|
|
OnionBlob: htlc.OnionBlob,
|
2024-04-02 14:46:14 +02:00
|
|
|
BlindingPoint: htlc.BlindingPoint,
|
2024-04-16 12:29:15 +02:00
|
|
|
CustomRecords: htlc.CustomRecords.Copy(),
|
2015-12-31 07:36:01 +01:00
|
|
|
}
|
|
|
|
|
2024-08-09 23:52:21 +02:00
|
|
|
localACKedIndex := lc.commitChains.Remote.tail().messageIndices.Local
|
2020-02-15 15:45:25 +01:00
|
|
|
|
|
|
|
// Clamp down on the number of HTLC's we can receive by checking the
|
|
|
|
// commitment sanity.
|
2023-11-05 11:29:34 +01:00
|
|
|
// We do not enforce the FeeBuffer here because one of the reasons it
|
|
|
|
// was introduced is to protect against asynchronous sending of htlcs so
|
|
|
|
// we use it here. The current lightning protocol does not allow to
|
|
|
|
// reject ADDs already sent by the peer.
|
2020-02-15 15:45:25 +01:00
|
|
|
err := lc.validateCommitmentSanity(
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.logIndex, localACKedIndex, lntypes.Local,
|
2024-07-31 01:44:18 +02:00
|
|
|
NoBuffer, nil, pd,
|
2020-02-15 15:45:25 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.appendHtlc(pd)
|
2016-07-06 02:01:55 +02:00
|
|
|
|
2017-10-23 01:28:30 +02:00
|
|
|
return pd.HtlcIndex, nil
|
2015-12-17 05:58:01 +01:00
|
|
|
}
|
|
|
|
|
2016-10-15 15:18:38 +02:00
|
|
|
// SettleHTLC attempts to settle an existing outstanding received HTLC. The
|
2016-07-22 01:50:20 +02:00
|
|
|
// remote log index of the HTLC settled is returned in order to facilitate
|
2017-01-13 06:01:50 +01:00
|
|
|
// creating the corresponding wire message. In the case the supplied preimage
|
2018-02-28 05:04:41 +01:00
|
|
|
// is invalid, an error is returned.
|
|
|
|
//
|
|
|
|
// The additional arguments correspond to:
|
|
|
|
//
|
2022-08-22 20:58:42 +02:00
|
|
|
// - sourceRef: specifies the location of the Add HTLC within a forwarding
|
|
|
|
// package that this HTLC is settling. Every Settle fails exactly one Add,
|
|
|
|
// so this should never be empty in practice.
|
2018-02-28 05:04:41 +01:00
|
|
|
//
|
2022-08-22 20:58:42 +02:00
|
|
|
// - destRef: specifies the location of the Settle HTLC within another
|
|
|
|
// channel's forwarding package. This value can be nil if the corresponding
|
|
|
|
// Add HTLC was never locked into an outgoing commitment txn, or this
|
|
|
|
// HTLC does not originate as a response from the peer on the outgoing
|
|
|
|
// link, e.g. on-chain resolutions.
|
|
|
|
//
|
|
|
|
// - closeKey: identifies the circuit that should be deleted after this Settle
|
|
|
|
// HTLC is included in a commitment txn. This value should only be nil if
|
|
|
|
// the HTLC was settled locally before committing a circuit to the circuit
|
|
|
|
// map.
|
2018-02-28 05:04:41 +01:00
|
|
|
//
|
|
|
|
// NOTE: It is okay for sourceRef, destRef, and closeKey to be nil when unit
|
|
|
|
// testing the wallet.
|
|
|
|
func (lc *LightningChannel) SettleHTLC(preimage [32]byte,
|
|
|
|
htlcIndex uint64, sourceRef *channeldb.AddRef,
|
2022-11-18 12:15:22 +01:00
|
|
|
destRef *channeldb.SettleFailRef, closeKey *models.CircuitKey) error {
|
2017-10-24 09:48:52 +02:00
|
|
|
|
2016-11-21 04:18:30 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
htlc := lc.updateLogs.Remote.lookupHtlc(htlcIndex)
|
2017-10-24 09:48:52 +02:00
|
|
|
if htlc == nil {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrUnknownHtlcIndex{lc.ShortChanID(), htlcIndex}
|
2017-10-24 09:48:52 +02:00
|
|
|
}
|
|
|
|
|
2018-05-26 03:39:55 +02:00
|
|
|
// Now that we know the HTLC exists, before checking to see if the
|
|
|
|
// preimage matches, we'll ensure that we haven't already attempted to
|
|
|
|
// modify the HTLC.
|
2024-08-09 22:00:59 +02:00
|
|
|
if lc.updateLogs.Remote.htlcHasModification(htlcIndex) {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrHtlcIndexAlreadySettled(htlcIndex)
|
2018-05-26 03:39:55 +02:00
|
|
|
}
|
|
|
|
|
2017-10-24 09:48:52 +02:00
|
|
|
if htlc.RHash != sha256.Sum256(preimage[:]) {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrInvalidSettlePreimage{preimage[:], htlc.RHash[:]}
|
2016-01-05 22:01:42 +01:00
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
pd := &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: lc.ChannelID(),
|
2018-02-28 05:04:41 +01:00
|
|
|
Amount: htlc.Amount,
|
|
|
|
RPreimage: preimage,
|
2024-08-09 22:00:59 +02:00
|
|
|
LogIndex: lc.updateLogs.Local.logIndex,
|
2018-02-28 05:04:41 +01:00
|
|
|
ParentIndex: htlcIndex,
|
|
|
|
EntryType: Settle,
|
|
|
|
SourceRef: sourceRef,
|
|
|
|
DestRef: destRef,
|
|
|
|
ClosedCircuitKey: closeKey,
|
2016-01-05 22:01:42 +01:00
|
|
|
}
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.appendUpdate(pd)
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2018-05-26 03:39:55 +02:00
|
|
|
// With the settle added to our local log, we'll now mark the HTLC as
|
|
|
|
// modified to prevent ourselves from accidentally attempting a
|
|
|
|
// duplicate settle.
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.markHtlcModified(htlcIndex)
|
2018-05-26 03:39:55 +02:00
|
|
|
|
2017-10-24 09:48:52 +02:00
|
|
|
return nil
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2016-07-22 01:50:20 +02:00
|
|
|
// ReceiveHTLCSettle attempts to settle an existing outgoing HTLC indexed by an
|
|
|
|
// index into the local log. If the specified index doesn't exist within the
|
|
|
|
// log, and error is returned. Similarly if the preimage is invalid w.r.t to
|
|
|
|
// the referenced of then a distinct error is returned.
|
2017-10-23 01:28:30 +02:00
|
|
|
func (lc *LightningChannel) ReceiveHTLCSettle(preimage [32]byte, htlcIndex uint64) error {
|
2016-11-21 04:18:30 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
htlc := lc.updateLogs.Local.lookupHtlc(htlcIndex)
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
if htlc == nil {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrUnknownHtlcIndex{lc.ShortChanID(), htlcIndex}
|
2016-07-22 01:50:20 +02:00
|
|
|
}
|
|
|
|
|
2018-05-26 03:39:55 +02:00
|
|
|
// Now that we know the HTLC exists, before checking to see if the
|
|
|
|
// preimage matches, we'll ensure that they haven't already attempted
|
|
|
|
// to modify the HTLC.
|
2024-08-09 22:00:59 +02:00
|
|
|
if lc.updateLogs.Local.htlcHasModification(htlcIndex) {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrHtlcIndexAlreadySettled(htlcIndex)
|
2018-05-26 03:39:55 +02:00
|
|
|
}
|
|
|
|
|
2017-10-24 09:48:52 +02:00
|
|
|
if htlc.RHash != sha256.Sum256(preimage[:]) {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrInvalidSettlePreimage{preimage[:], htlc.RHash[:]}
|
2016-07-22 01:50:20 +02:00
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
pd := &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: lc.ChannelID(),
|
2016-07-22 01:50:20 +02:00
|
|
|
Amount: htlc.Amount,
|
2016-09-22 03:27:46 +02:00
|
|
|
RPreimage: preimage,
|
2017-10-23 01:28:30 +02:00
|
|
|
ParentIndex: htlc.HtlcIndex,
|
2017-03-06 15:00:32 +01:00
|
|
|
RHash: htlc.RHash,
|
2024-08-09 22:00:59 +02:00
|
|
|
LogIndex: lc.updateLogs.Remote.logIndex,
|
2016-07-22 01:50:20 +02:00
|
|
|
EntryType: Settle,
|
|
|
|
}
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.appendUpdate(pd)
|
2018-05-26 03:39:55 +02:00
|
|
|
|
|
|
|
// With the settle added to the remote log, we'll now mark the HTLC as
|
|
|
|
// modified to prevent the remote party from accidentally attempting a
|
|
|
|
// duplicate settle.
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.markHtlcModified(htlcIndex)
|
2018-05-26 03:39:55 +02:00
|
|
|
|
2016-07-22 01:50:20 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
// FailHTLC attempts to fail a targeted HTLC by its payment hash, inserting an
|
|
|
|
// entry which will remove the target log entry within the next commitment
|
2017-01-06 05:54:39 +01:00
|
|
|
// update. This method is intended to be called in order to cancel in
|
|
|
|
// _incoming_ HTLC.
|
2018-02-28 05:04:41 +01:00
|
|
|
//
|
|
|
|
// The additional arguments correspond to:
|
|
|
|
//
|
2022-08-22 20:58:42 +02:00
|
|
|
// - sourceRef: specifies the location of the Add HTLC within a forwarding
|
|
|
|
// package that this HTLC is failing. Every Fail fails exactly one Add, so
|
|
|
|
// this should never be empty in practice.
|
|
|
|
//
|
|
|
|
// - destRef: specifies the location of the Fail HTLC within another channel's
|
|
|
|
// forwarding package. This value can be nil if the corresponding Add HTLC
|
|
|
|
// was never locked into an outgoing commitment txn, or this HTLC does not
|
|
|
|
// originate as a response from the peer on the outgoing link, e.g.
|
|
|
|
// on-chain resolutions.
|
2018-02-28 05:04:41 +01:00
|
|
|
//
|
2022-08-22 20:58:42 +02:00
|
|
|
// - closeKey: identifies the circuit that should be deleted after this Fail
|
|
|
|
// HTLC is included in a commitment txn. This value should only be nil if
|
|
|
|
// the HTLC was failed locally before committing a circuit to the circuit
|
|
|
|
// map.
|
2018-02-28 05:04:41 +01:00
|
|
|
//
|
|
|
|
// NOTE: It is okay for sourceRef, destRef, and closeKey to be nil when unit
|
|
|
|
// testing the wallet.
|
|
|
|
func (lc *LightningChannel) FailHTLC(htlcIndex uint64, reason []byte,
|
|
|
|
sourceRef *channeldb.AddRef, destRef *channeldb.SettleFailRef,
|
2022-11-18 12:15:22 +01:00
|
|
|
closeKey *models.CircuitKey) error {
|
2018-02-28 05:01:41 +01:00
|
|
|
|
2017-01-06 05:54:39 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
htlc := lc.updateLogs.Remote.lookupHtlc(htlcIndex)
|
2017-10-24 09:48:52 +02:00
|
|
|
if htlc == nil {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrUnknownHtlcIndex{lc.ShortChanID(), htlcIndex}
|
2017-01-06 23:10:21 +01:00
|
|
|
}
|
2017-01-16 12:57:26 +01:00
|
|
|
|
2018-05-26 03:40:25 +02:00
|
|
|
// Now that we know the HTLC exists, we'll ensure that we haven't
|
|
|
|
// already attempted to fail the HTLC.
|
2024-08-09 22:00:59 +02:00
|
|
|
if lc.updateLogs.Remote.htlcHasModification(htlcIndex) {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrHtlcIndexAlreadyFailed(htlcIndex)
|
2018-05-26 03:40:25 +02:00
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
pd := &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: lc.ChannelID(),
|
2018-02-28 05:04:41 +01:00
|
|
|
Amount: htlc.Amount,
|
|
|
|
RHash: htlc.RHash,
|
|
|
|
ParentIndex: htlcIndex,
|
2024-08-09 22:00:59 +02:00
|
|
|
LogIndex: lc.updateLogs.Local.logIndex,
|
2018-02-28 05:04:41 +01:00
|
|
|
EntryType: Fail,
|
|
|
|
FailReason: reason,
|
|
|
|
SourceRef: sourceRef,
|
|
|
|
DestRef: destRef,
|
|
|
|
ClosedCircuitKey: closeKey,
|
2017-01-06 05:54:39 +01:00
|
|
|
}
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.appendUpdate(pd)
|
2017-01-06 05:54:39 +01:00
|
|
|
|
2018-05-26 03:40:25 +02:00
|
|
|
// With the fail added to the remote log, we'll now mark the HTLC as
|
|
|
|
// modified to prevent ourselves from accidentally attempting a
|
|
|
|
// duplicate fail.
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.markHtlcModified(htlcIndex)
|
2018-05-26 03:40:25 +02:00
|
|
|
|
2017-10-24 09:48:52 +02:00
|
|
|
return nil
|
2017-01-06 05:54:39 +01:00
|
|
|
}
|
|
|
|
|
2017-08-14 13:21:57 +02:00
|
|
|
// MalformedFailHTLC attempts to fail a targeted HTLC by its payment hash,
|
|
|
|
// inserting an entry which will remove the target log entry within the next
|
|
|
|
// commitment update. This method is intended to be called in order to cancel
|
|
|
|
// in _incoming_ HTLC.
|
2018-02-28 05:04:41 +01:00
|
|
|
//
|
|
|
|
// The additional sourceRef specifies the location of the Add HTLC within a
|
|
|
|
// forwarding package that this HTLC is failing. This value should never be
|
|
|
|
// empty.
|
|
|
|
//
|
|
|
|
// NOTE: It is okay for sourceRef to be nil when unit testing the wallet.
|
2017-10-24 09:48:52 +02:00
|
|
|
func (lc *LightningChannel) MalformedFailHTLC(htlcIndex uint64,
|
2018-02-28 05:04:41 +01:00
|
|
|
failCode lnwire.FailCode, shaOnionBlob [sha256.Size]byte,
|
|
|
|
sourceRef *channeldb.AddRef) error {
|
2017-10-24 09:48:52 +02:00
|
|
|
|
2017-08-14 13:21:57 +02:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
htlc := lc.updateLogs.Remote.lookupHtlc(htlcIndex)
|
2017-10-24 09:48:52 +02:00
|
|
|
if htlc == nil {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrUnknownHtlcIndex{lc.ShortChanID(), htlcIndex}
|
2017-08-14 13:21:57 +02:00
|
|
|
}
|
|
|
|
|
2018-05-26 03:40:25 +02:00
|
|
|
// Now that we know the HTLC exists, we'll ensure that we haven't
|
|
|
|
// already attempted to fail the HTLC.
|
2024-08-09 22:00:59 +02:00
|
|
|
if lc.updateLogs.Remote.htlcHasModification(htlcIndex) {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrHtlcIndexAlreadyFailed(htlcIndex)
|
2018-05-26 03:40:25 +02:00
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
pd := &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: lc.ChannelID(),
|
2017-10-24 09:48:52 +02:00
|
|
|
Amount: htlc.Amount,
|
|
|
|
RHash: htlc.RHash,
|
|
|
|
ParentIndex: htlcIndex,
|
2024-08-09 22:00:59 +02:00
|
|
|
LogIndex: lc.updateLogs.Local.logIndex,
|
2017-08-14 13:21:57 +02:00
|
|
|
EntryType: MalformedFail,
|
|
|
|
FailCode: failCode,
|
|
|
|
ShaOnionBlob: shaOnionBlob,
|
2018-02-28 05:04:41 +01:00
|
|
|
SourceRef: sourceRef,
|
2017-08-14 13:21:57 +02:00
|
|
|
}
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.appendUpdate(pd)
|
2017-08-14 13:21:57 +02:00
|
|
|
|
2018-05-26 03:40:25 +02:00
|
|
|
// With the fail added to the remote log, we'll now mark the HTLC as
|
|
|
|
// modified to prevent ourselves from accidentally attempting a
|
|
|
|
// duplicate fail.
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.markHtlcModified(htlcIndex)
|
2018-05-26 03:40:25 +02:00
|
|
|
|
2017-10-24 09:48:52 +02:00
|
|
|
return nil
|
2017-08-14 13:21:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReceiveFailHTLC attempts to cancel a targeted HTLC by its log index,
|
2017-01-06 05:54:39 +01:00
|
|
|
// inserting an entry which will remove the target log entry within the next
|
|
|
|
// commitment update. This method should be called in response to the upstream
|
2022-04-17 00:53:32 +02:00
|
|
|
// party cancelling an outgoing HTLC.
|
2017-10-24 09:48:52 +02:00
|
|
|
func (lc *LightningChannel) ReceiveFailHTLC(htlcIndex uint64, reason []byte,
|
|
|
|
) error {
|
2017-07-10 12:48:43 +02:00
|
|
|
|
2017-01-06 05:54:39 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
htlc := lc.updateLogs.Local.lookupHtlc(htlcIndex)
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
if htlc == nil {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrUnknownHtlcIndex{lc.ShortChanID(), htlcIndex}
|
2017-01-06 05:54:39 +01:00
|
|
|
}
|
|
|
|
|
2018-05-26 03:40:25 +02:00
|
|
|
// Now that we know the HTLC exists, we'll ensure that they haven't
|
|
|
|
// already attempted to fail the HTLC.
|
2024-08-09 22:00:59 +02:00
|
|
|
if lc.updateLogs.Local.htlcHasModification(htlcIndex) {
|
2018-07-27 12:22:15 +02:00
|
|
|
return ErrHtlcIndexAlreadyFailed(htlcIndex)
|
2018-05-26 03:40:25 +02:00
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
pd := &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: lc.ChannelID(),
|
2017-01-06 05:54:39 +01:00
|
|
|
Amount: htlc.Amount,
|
2017-01-08 04:58:54 +01:00
|
|
|
RHash: htlc.RHash,
|
2017-10-23 01:28:30 +02:00
|
|
|
ParentIndex: htlc.HtlcIndex,
|
2024-08-09 22:00:59 +02:00
|
|
|
LogIndex: lc.updateLogs.Remote.logIndex,
|
lnwallet: update state machine to the version within the spec
This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.
At a high level the following modifications have been made:
* When signing we no loner include the index of the remote party’s
log
that our signature covers. Instead we include ALL of our current
updates, but only the updates of the remote party that we’ve
ACK’d.
* A pending change is considered ACK’d once a revocation message
has been received, locking in the changes in the remote party’s
commitment transaction.
* When sending a new commitment, we remember the index of our
log at that point so we can mark that portion of the log as ACK’d
once we receive a revocation message from the remote party.
* When receiving a new commitment signature, we include ALL of
the remote party’s changes that we’ve received but only our set
of changes that’ve been ACK’d by the remote party.
* Implicitly a revocation message now also implicitly serves to ACK
all the changes that were included in the CommitSig message
received before it.
The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
* Alice -> Add, Add, Add
* Alice -> Sig
* Revoke <- Bob
* Sig <- Bob
* Alice -> Revoke
One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.
Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
2017-02-21 02:55:33 +01:00
|
|
|
EntryType: Fail,
|
2017-07-10 12:48:43 +02:00
|
|
|
FailReason: reason,
|
2017-01-06 05:54:39 +01:00
|
|
|
}
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.appendUpdate(pd)
|
2017-09-25 05:17:51 +02:00
|
|
|
|
2018-05-26 03:40:25 +02:00
|
|
|
// With the fail added to the remote log, we'll now mark the HTLC as
|
|
|
|
// modified to prevent ourselves from accidentally attempting a
|
|
|
|
// duplicate fail.
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.markHtlcModified(htlcIndex)
|
2018-05-26 03:40:25 +02:00
|
|
|
|
2017-10-24 09:48:52 +02:00
|
|
|
return nil
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ChannelPoint returns the outpoint of the original funding transaction which
|
|
|
|
// created this active channel. This outpoint is used throughout various
|
2017-01-13 06:01:50 +01:00
|
|
|
// subsystems to uniquely identify an open channel.
|
2024-01-29 21:59:51 +01:00
|
|
|
func (lc *LightningChannel) ChannelPoint() wire.OutPoint {
|
|
|
|
return lc.channelState.FundingOutpoint
|
2016-07-06 02:01:55 +02:00
|
|
|
}
|
2016-01-05 22:01:42 +01:00
|
|
|
|
2024-06-06 20:35:52 +02:00
|
|
|
// ChannelID returns the ChannelID of this LightningChannel. This is the same
|
|
|
|
// ChannelID that is used in update messages for this channel.
|
|
|
|
func (lc *LightningChannel) ChannelID() lnwire.ChannelID {
|
|
|
|
return lnwire.NewChanIDFromOutPoint(lc.ChannelPoint())
|
|
|
|
}
|
|
|
|
|
2017-06-16 23:25:07 +02:00
|
|
|
// ShortChanID returns the short channel ID for the channel. The short channel
|
|
|
|
// ID encodes the exact location in the main chain that the original
|
|
|
|
// funding output can be found.
|
|
|
|
func (lc *LightningChannel) ShortChanID() lnwire.ShortChannelID {
|
2018-05-02 01:27:20 +02:00
|
|
|
return lc.channelState.ShortChanID()
|
2017-06-16 23:25:07 +02:00
|
|
|
}
|
|
|
|
|
2019-12-03 10:38:29 +01:00
|
|
|
// LocalUpfrontShutdownScript returns the local upfront shutdown script for the
|
|
|
|
// channel. If it was not set, an empty byte array is returned.
|
|
|
|
func (lc *LightningChannel) LocalUpfrontShutdownScript() lnwire.DeliveryAddress {
|
2019-12-03 10:38:29 +01:00
|
|
|
return lc.channelState.LocalShutdownScript
|
2019-12-03 10:38:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// RemoteUpfrontShutdownScript returns the remote upfront shutdown script for the
|
|
|
|
// channel. If it was not set, an empty byte array is returned.
|
|
|
|
func (lc *LightningChannel) RemoteUpfrontShutdownScript() lnwire.DeliveryAddress {
|
2019-12-03 10:38:29 +01:00
|
|
|
return lc.channelState.RemoteShutdownScript
|
2019-12-03 10:38:29 +01:00
|
|
|
}
|
|
|
|
|
2022-07-27 01:40:58 +02:00
|
|
|
// AbsoluteThawHeight determines a frozen channel's absolute thaw height. If
|
|
|
|
// the channel is not frozen, then 0 is returned.
|
|
|
|
//
|
2023-01-20 03:27:07 +01:00
|
|
|
// An error is returned if the channel is pending, or is an unconfirmed zero
|
2022-07-27 01:40:58 +02:00
|
|
|
// conf channel.
|
|
|
|
func (lc *LightningChannel) AbsoluteThawHeight() (uint32, error) {
|
|
|
|
return lc.channelState.AbsoluteThawHeight()
|
|
|
|
}
|
|
|
|
|
2017-11-10 08:06:10 +01:00
|
|
|
// getSignedCommitTx function take the latest commitment transaction and
|
|
|
|
// populate it with witness data.
|
2017-05-16 03:12:52 +02:00
|
|
|
func (lc *LightningChannel) getSignedCommitTx() (*wire.MsgTx, error) {
|
|
|
|
// Fetch the current commitment transaction, along with their signature
|
|
|
|
// for the transaction.
|
2017-11-10 08:06:10 +01:00
|
|
|
localCommit := lc.channelState.LocalCommitment
|
2019-10-01 04:50:31 +02:00
|
|
|
commitTx := localCommit.CommitTx.Copy()
|
2020-04-06 02:07:01 +02:00
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
ourKey := lc.channelState.LocalChanCfg.MultiSigKey
|
|
|
|
theirKey := lc.channelState.RemoteChanCfg.MultiSigKey
|
2017-05-16 03:12:52 +02:00
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
var witness wire.TxWitness
|
|
|
|
switch {
|
|
|
|
// If this is a taproot channel, then we'll need to re-derive the nonce
|
|
|
|
// we need to generate a new signature
|
|
|
|
case lc.channelState.ChanType.IsTaproot():
|
|
|
|
// First, we'll need to re-derive the local nonce we sent to
|
|
|
|
// the remote party to create this musig session. We pass in
|
|
|
|
// the same height here as we're generating the nonce needed
|
|
|
|
// for the _current_ state.
|
2023-07-12 03:56:15 +02:00
|
|
|
localNonce, err := channeldb.NewMusigVerificationNonce(
|
2023-01-20 04:24:48 +01:00
|
|
|
ourKey.PubKey, lc.currentHeight,
|
|
|
|
lc.taprootNonceProducer,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2023-08-11 03:25:46 +02:00
|
|
|
return nil, fmt.Errorf("unable to re-derive "+
|
|
|
|
"verification nonce: %w", err)
|
2023-01-20 04:24:48 +01:00
|
|
|
}
|
2017-05-16 03:12:52 +02:00
|
|
|
|
2024-03-13 15:54:49 +01:00
|
|
|
tapscriptTweak := fn.MapOption(TapscriptRootToTweak)(
|
|
|
|
lc.channelState.TapscriptRoot,
|
|
|
|
)
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
// Now that we have the local nonce, we'll re-create the musig
|
|
|
|
// session we had for this height.
|
|
|
|
musigSession := NewPartialMusigSession(
|
|
|
|
*localNonce, ourKey, theirKey, lc.Signer,
|
2024-03-13 15:54:49 +01:00
|
|
|
&lc.fundingOutput, LocalMusigCommit, tapscriptTweak,
|
2023-01-20 04:24:48 +01:00
|
|
|
)
|
2017-05-16 03:12:52 +02:00
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
var remoteSig lnwire.PartialSigWithNonce
|
|
|
|
err = remoteSig.Decode(
|
|
|
|
bytes.NewReader(localCommit.CommitSig),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to decode remote "+
|
|
|
|
"partial sig: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll manually finalize the session with the signing
|
|
|
|
// nonce we got from the remote party which is embedded in the
|
|
|
|
// signature we have.
|
|
|
|
err = musigSession.FinalizeSession(musig2.Nonces{
|
|
|
|
PubNonce: remoteSig.Nonce,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to finalize musig "+
|
|
|
|
"session: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the session has been finalized, we can generate our
|
|
|
|
// half of the signature for the state. We don't capture the
|
|
|
|
// sig as it's stored within the session.
|
|
|
|
if _, err := musigSession.SignCommit(commitTx); err != nil {
|
2023-08-11 03:25:46 +02:00
|
|
|
return nil, fmt.Errorf("unable to sign musig2 "+
|
|
|
|
"commitment: %w", err)
|
2023-01-20 04:24:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// The final step is now to combine this signature we generated
|
|
|
|
// above, with the remote party's signature. We only need to
|
|
|
|
// pass the remote sig, as the local sig was already cached in
|
|
|
|
// the session.
|
|
|
|
var partialSig MusigPartialSig
|
|
|
|
partialSig.FromWireSig(&remoteSig)
|
|
|
|
finalSig, err := musigSession.CombineSigs(partialSig.sig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to combine musig "+
|
|
|
|
"partial sigs: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The witness is the single keyspend schnorr sig.
|
|
|
|
witness = wire.TxWitness{
|
|
|
|
finalSig.Serialize(),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, the final witness we generate will be a normal p2wsh
|
|
|
|
// multi-sig spend.
|
|
|
|
default:
|
|
|
|
theirSig, err := ecdsa.ParseDERSignature(localCommit.CommitSig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With this, we then generate the full witness so the caller
|
|
|
|
// can broadcast a fully signed transaction.
|
|
|
|
lc.signDesc.SigHashes = input.NewTxSigHashesV0Only(commitTx)
|
|
|
|
ourSig, err := lc.Signer.SignOutputRaw(commitTx, lc.signDesc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the final signature generated, create the witness stack
|
|
|
|
// required to spend from the multi-sig output.
|
|
|
|
witness = input.SpendMultiSig(
|
|
|
|
lc.signDesc.WitnessScript,
|
|
|
|
ourKey.PubKey.SerializeCompressed(), ourSig,
|
|
|
|
theirKey.PubKey.SerializeCompressed(), theirSig,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
commitTx.TxIn[0].Witness = witness
|
2017-05-16 03:12:52 +02:00
|
|
|
|
2017-11-10 08:06:10 +01:00
|
|
|
return commitTx, nil
|
2017-07-30 21:25:41 +02:00
|
|
|
}
|
2017-07-30 22:20:58 +02:00
|
|
|
|
2018-01-17 03:17:18 +01:00
|
|
|
// CommitOutputResolution carries the necessary information required to allow
|
2020-03-06 16:11:46 +01:00
|
|
|
// us to sweep our commitment output in the case that either party goes to
|
|
|
|
// chain.
|
2018-01-17 03:17:18 +01:00
|
|
|
type CommitOutputResolution struct {
|
|
|
|
// SelfOutPoint is the full outpoint that points to out pay-to-self
|
|
|
|
// output within the closing commitment transaction.
|
|
|
|
SelfOutPoint wire.OutPoint
|
|
|
|
|
|
|
|
// SelfOutputSignDesc is a fully populated sign descriptor capable of
|
|
|
|
// generating a valid signature to sweep the output paying to us.
|
2019-01-16 15:47:43 +01:00
|
|
|
SelfOutputSignDesc input.SignDescriptor
|
2018-01-17 03:17:18 +01:00
|
|
|
|
|
|
|
// MaturityDelay is the relative time-lock, in blocks for all outputs
|
|
|
|
// that pay to the local party within the broadcast commitment
|
2020-03-06 16:11:46 +01:00
|
|
|
// transaction.
|
2018-01-17 03:17:18 +01:00
|
|
|
MaturityDelay uint32
|
2024-06-04 07:58:57 +02:00
|
|
|
|
|
|
|
// ResolutionBlob is a blob used for aux channels that permits a
|
|
|
|
// spender of the output to properly resolve it in the case of a force
|
|
|
|
// close.
|
|
|
|
ResolutionBlob fn.Option[tlv.Blob]
|
2018-01-17 03:17:18 +01:00
|
|
|
}
|
|
|
|
|
2017-07-30 22:20:58 +02:00
|
|
|
// UnilateralCloseSummary describes the details of a detected unilateral
|
|
|
|
// channel closure. This includes the information about with which
|
|
|
|
// transactions, and block the channel was unilaterally closed, as well as
|
|
|
|
// summarization details concerning the _state_ of the channel at the point of
|
|
|
|
// channel closure. Additionally, if we had a commitment output above dust on
|
|
|
|
// the remote party's commitment transaction, the necessary a SignDescriptor
|
|
|
|
// with the material necessary to seep the output are returned. Finally, if we
|
|
|
|
// had any outgoing HTLC's within the commitment transaction, then an
|
|
|
|
// OutgoingHtlcResolution for each output will included.
|
|
|
|
type UnilateralCloseSummary struct {
|
2018-04-11 15:16:50 +02:00
|
|
|
// SpendDetail is a struct that describes how and when the funding
|
2017-07-30 22:20:58 +02:00
|
|
|
// output was spent.
|
|
|
|
*chainntnfs.SpendDetail
|
|
|
|
|
|
|
|
// ChannelCloseSummary is a struct describing the final state of the
|
|
|
|
// channel and in which state is was closed.
|
|
|
|
channeldb.ChannelCloseSummary
|
|
|
|
|
2018-01-17 03:17:18 +01:00
|
|
|
// CommitResolution contains all the data required to sweep the output
|
|
|
|
// to ourselves. If this is our commitment transaction, then we'll need
|
|
|
|
// to wait a time delay before we can sweep the output.
|
|
|
|
//
|
|
|
|
// NOTE: If our commitment delivery output is below the dust limit,
|
|
|
|
// then this will be nil.
|
|
|
|
CommitResolution *CommitOutputResolution
|
|
|
|
|
2018-01-17 03:36:31 +01:00
|
|
|
// HtlcResolutions contains a fully populated HtlcResolutions struct
|
|
|
|
// which contains all the data required to sweep any outgoing HTLC's,
|
|
|
|
// and also any incoming HTLC's that we know the pre-image to.
|
|
|
|
HtlcResolutions *HtlcResolutions
|
2018-01-17 03:17:18 +01:00
|
|
|
|
2018-01-18 22:49:35 +01:00
|
|
|
// RemoteCommit is the exact commitment state that the remote party
|
|
|
|
// broadcast.
|
|
|
|
RemoteCommit channeldb.ChannelCommitment
|
2019-12-13 11:14:22 +01:00
|
|
|
|
|
|
|
// AnchorResolution contains the data required to sweep our anchor
|
|
|
|
// output. If the channel type doesn't include anchors, the value of
|
|
|
|
// this field will be nil.
|
|
|
|
AnchorResolution *AnchorResolution
|
2018-01-18 22:49:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewUnilateralCloseSummary creates a new summary that provides the caller
|
|
|
|
// with all the information required to claim all funds on chain in the event
|
2018-07-12 11:02:52 +02:00
|
|
|
// that the remote party broadcasts their commitment. The commitPoint argument
|
|
|
|
// should be set to the per_commitment_point corresponding to the spending
|
|
|
|
// commitment.
|
2018-07-12 11:02:52 +02:00
|
|
|
//
|
|
|
|
// NOTE: The remoteCommit argument should be set to the stored commitment for
|
|
|
|
// this particular state. If we don't have the commitment stored (should only
|
|
|
|
// happen in case we have lost state) it should be set to an empty struct, in
|
|
|
|
// which case we will attempt to sweep the non-HTLC output using the passed
|
|
|
|
// commitPoint.
|
2024-04-25 19:00:42 +02:00
|
|
|
func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel,
|
|
|
|
signer input.Signer, commitSpend *chainntnfs.SpendDetail,
|
|
|
|
remoteCommit channeldb.ChannelCommitment, commitPoint *btcec.PublicKey,
|
2024-06-04 07:58:57 +02:00
|
|
|
leafStore fn.Option[AuxLeafStore],
|
|
|
|
auxResolver fn.Option[AuxContractResolver]) (*UnilateralCloseSummary,
|
|
|
|
error) {
|
2018-01-18 22:49:35 +01:00
|
|
|
|
|
|
|
// First, we'll generate the commitment point and the revocation point
|
2018-07-12 11:02:52 +02:00
|
|
|
// so we can re-construct the HTLC state and also our payment key.
|
2024-07-31 01:44:18 +02:00
|
|
|
commitType := lntypes.Remote
|
2019-09-17 04:06:19 +02:00
|
|
|
keyRing := DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
commitPoint, commitType, chanState.ChanType,
|
2020-01-06 11:42:04 +01:00
|
|
|
&chanState.LocalChanCfg, &chanState.RemoteChanCfg,
|
2018-01-18 22:49:35 +01:00
|
|
|
)
|
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
auxResult, err := fn.MapOptionZ(
|
|
|
|
leafStore, func(s AuxLeafStore) fn.Result[CommitDiffAuxResult] {
|
|
|
|
return s.FetchLeavesFromCommit(
|
|
|
|
NewAuxChanState(chanState), remoteCommit,
|
|
|
|
*keyRing,
|
|
|
|
)
|
|
|
|
},
|
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch aux leaves: %w", err)
|
|
|
|
}
|
|
|
|
|
2018-01-18 22:49:35 +01:00
|
|
|
// Next, we'll obtain HTLC resolutions for all the outgoing HTLC's we
|
|
|
|
// had on their commitment transaction.
|
2024-06-04 07:58:57 +02:00
|
|
|
var (
|
|
|
|
leaseExpiry uint32
|
|
|
|
selfPoint *wire.OutPoint
|
|
|
|
localBalance int64
|
|
|
|
isRemoteInitiator = !chanState.IsInitiator
|
|
|
|
commitTxBroadcast = commitSpend.SpendingTx
|
|
|
|
)
|
|
|
|
|
2021-07-15 02:16:13 +02:00
|
|
|
if chanState.ChanType.HasLeaseExpiration() {
|
|
|
|
leaseExpiry = chanState.ThawHeight
|
|
|
|
}
|
2018-01-18 22:49:35 +01:00
|
|
|
htlcResolutions, err := extractHtlcResolutions(
|
2024-07-31 01:44:18 +02:00
|
|
|
chainfee.SatPerKWeight(remoteCommit.FeePerKw), commitType,
|
2021-07-15 02:16:13 +02:00
|
|
|
signer, remoteCommit.Htlcs, keyRing, &chanState.LocalChanCfg,
|
2020-12-09 12:24:01 +01:00
|
|
|
&chanState.RemoteChanCfg, commitSpend.SpendingTx,
|
2021-07-15 02:16:13 +02:00
|
|
|
chanState.ChanType, isRemoteInitiator, leaseExpiry,
|
2024-04-25 19:01:37 +02:00
|
|
|
auxResult.AuxLeaves,
|
2018-01-18 22:49:35 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
2024-06-04 07:58:57 +02:00
|
|
|
return nil, fmt.Errorf("unable to create htlc resolutions: %w",
|
|
|
|
err)
|
2018-01-18 22:49:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Before we can generate the proper sign descriptor, we'll need to
|
|
|
|
// locate the output index of our non-delayed output on the commitment
|
|
|
|
// transaction.
|
2024-04-25 19:01:37 +02:00
|
|
|
remoteAuxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
return l.RemoteAuxLeaf
|
|
|
|
},
|
|
|
|
)(auxResult.AuxLeaves)
|
2020-03-06 16:11:46 +01:00
|
|
|
selfScript, maturityDelay, err := CommitScriptToRemote(
|
2021-07-15 02:16:13 +02:00
|
|
|
chanState.ChanType, isRemoteInitiator, keyRing.ToRemoteKey,
|
2024-04-25 19:01:37 +02:00
|
|
|
leaseExpiry, remoteAuxLeaf,
|
2020-01-06 11:42:04 +01:00
|
|
|
)
|
2018-01-18 22:49:35 +01:00
|
|
|
if err != nil {
|
2019-03-11 00:42:12 +01:00
|
|
|
return nil, fmt.Errorf("unable to create self commit "+
|
2024-06-04 07:58:57 +02:00
|
|
|
"script: %w", err)
|
2018-01-18 22:49:35 +01:00
|
|
|
}
|
|
|
|
for outputIndex, txOut := range commitTxBroadcast.TxOut {
|
2023-08-08 06:09:58 +02:00
|
|
|
if bytes.Equal(txOut.PkScript, selfScript.PkScript()) {
|
2018-01-18 22:49:35 +01:00
|
|
|
selfPoint = &wire.OutPoint{
|
|
|
|
Hash: *commitSpend.SpenderTxHash,
|
|
|
|
Index: uint32(outputIndex),
|
|
|
|
}
|
2018-07-12 11:02:52 +02:00
|
|
|
localBalance = txOut.Value
|
2018-01-18 22:49:35 +01:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the HTLC's taken care of, we'll generate the sign descriptor
|
|
|
|
// necessary to sweep our commitment output, but only if we had a
|
|
|
|
// non-trimmed balance.
|
|
|
|
var commitResolution *CommitOutputResolution
|
|
|
|
if selfPoint != nil {
|
|
|
|
localPayBase := chanState.LocalChanCfg.PaymentBasePoint
|
2023-08-08 06:09:58 +02:00
|
|
|
|
|
|
|
// As the remote party has force closed, we just need the
|
|
|
|
// success witness script.
|
|
|
|
witnessScript, err := selfScript.WitnessScriptForPath(
|
|
|
|
input.ScriptPathSuccess,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-01-18 22:49:35 +01:00
|
|
|
commitResolution = &CommitOutputResolution{
|
|
|
|
SelfOutPoint: *selfPoint,
|
2019-01-16 15:47:43 +01:00
|
|
|
SelfOutputSignDesc: input.SignDescriptor{
|
2018-02-18 00:17:40 +01:00
|
|
|
KeyDesc: localPayBase,
|
2018-01-18 22:49:35 +01:00
|
|
|
SingleTweak: keyRing.LocalCommitKeyTweak,
|
2023-08-08 06:09:58 +02:00
|
|
|
WitnessScript: witnessScript,
|
2018-01-18 22:49:35 +01:00
|
|
|
Output: &wire.TxOut{
|
2018-07-12 11:02:52 +02:00
|
|
|
Value: localBalance,
|
2023-08-08 06:09:58 +02:00
|
|
|
PkScript: selfScript.PkScript(),
|
2018-01-18 22:49:35 +01:00
|
|
|
},
|
2023-08-19 00:17:51 +02:00
|
|
|
HashType: sweepSigHash(chanState.ChanType),
|
2018-01-18 22:49:35 +01:00
|
|
|
},
|
2020-03-06 16:11:46 +01:00
|
|
|
MaturityDelay: maturityDelay,
|
2018-01-18 22:49:35 +01:00
|
|
|
}
|
2023-03-02 06:44:14 +01:00
|
|
|
|
|
|
|
// For taproot channels, we'll need to set some additional
|
|
|
|
// fields to ensure the output can be swept.
|
2023-08-09 07:22:12 +02:00
|
|
|
//
|
|
|
|
//nolint:lll
|
2023-08-08 06:09:58 +02:00
|
|
|
if scriptTree, ok := selfScript.(input.TapscriptDescriptor); ok {
|
2023-03-02 06:44:14 +01:00
|
|
|
commitResolution.SelfOutputSignDesc.SignMethod =
|
|
|
|
input.TaprootScriptSpendSignMethod
|
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
ctrlBlock, err := scriptTree.CtrlBlockForPath(
|
|
|
|
input.ScriptPathSuccess,
|
2023-03-02 06:44:14 +01:00
|
|
|
)
|
2023-08-08 06:09:58 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-08-09 07:22:12 +02:00
|
|
|
//nolint:lll
|
2023-03-02 06:44:14 +01:00
|
|
|
commitResolution.SelfOutputSignDesc.ControlBlock, err = ctrlBlock.ToBytes()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2024-06-04 07:58:57 +02:00
|
|
|
|
|
|
|
// At this point, we'll check to see if we need any extra
|
|
|
|
// resolution data for this output.
|
|
|
|
resolveReq := ResolutionReq{
|
|
|
|
ChanPoint: chanState.FundingOutpoint,
|
|
|
|
ShortChanID: chanState.ShortChanID(),
|
|
|
|
Initiator: chanState.IsInitiator,
|
|
|
|
CommitBlob: chanState.RemoteCommitment.CustomBlob,
|
|
|
|
FundingBlob: chanState.CustomBlob,
|
|
|
|
Type: input.TaprootRemoteCommitSpend,
|
|
|
|
CloseType: RemoteForceClose,
|
|
|
|
CommitTx: commitTxBroadcast,
|
|
|
|
ContractPoint: *selfPoint,
|
|
|
|
SignDesc: commitResolution.SelfOutputSignDesc,
|
|
|
|
KeyRing: keyRing,
|
|
|
|
CsvDelay: maturityDelay,
|
|
|
|
CommitFee: chanState.RemoteCommitment.CommitFee,
|
|
|
|
}
|
|
|
|
resolveBlob := fn.MapOptionZ(
|
|
|
|
auxResolver,
|
|
|
|
func(a AuxContractResolver) fn.Result[tlv.Blob] {
|
|
|
|
return a.ResolveContract(resolveReq)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err := resolveBlob.Err(); err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to aux resolve: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
commitResolution.ResolutionBlob = resolveBlob.Option()
|
2018-01-18 22:49:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
closeSummary := channeldb.ChannelCloseSummary{
|
2018-08-14 04:15:54 +02:00
|
|
|
ChanPoint: chanState.FundingOutpoint,
|
|
|
|
ChainHash: chanState.ChainHash,
|
|
|
|
ClosingTXID: *commitSpend.SpenderTxHash,
|
|
|
|
CloseHeight: uint32(commitSpend.SpendingHeight),
|
|
|
|
RemotePub: chanState.IdentityPub,
|
|
|
|
Capacity: chanState.Capacity,
|
|
|
|
SettledBalance: btcutil.Amount(localBalance),
|
|
|
|
CloseType: channeldb.RemoteForceClose,
|
|
|
|
IsPending: true,
|
|
|
|
RemoteCurrentRevocation: chanState.RemoteCurrentRevocation,
|
|
|
|
RemoteNextRevocation: chanState.RemoteNextRevocation,
|
2018-10-19 22:48:59 +02:00
|
|
|
ShortChanID: chanState.ShortChanID(),
|
2018-08-14 04:15:54 +02:00
|
|
|
LocalChanConfig: chanState.LocalChanCfg,
|
2018-01-18 22:49:35 +01:00
|
|
|
}
|
|
|
|
|
2018-11-20 15:09:45 +01:00
|
|
|
// Attempt to add a channel sync message to the close summary.
|
2019-09-11 11:15:57 +02:00
|
|
|
chanSync, err := chanState.ChanSyncMsg()
|
2018-11-20 15:09:45 +01:00
|
|
|
if err != nil {
|
|
|
|
walletLog.Errorf("ChannelPoint(%v): unable to create channel sync "+
|
|
|
|
"message: %v", chanState.FundingOutpoint, err)
|
|
|
|
} else {
|
|
|
|
closeSummary.LastChanSyncMsg = chanSync
|
|
|
|
}
|
|
|
|
|
2019-12-13 11:14:22 +01:00
|
|
|
anchorResolution, err := NewAnchorResolution(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanState, commitTxBroadcast, keyRing, lntypes.Remote,
|
2019-12-13 11:14:22 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-01-18 22:49:35 +01:00
|
|
|
return &UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
ChannelCloseSummary: closeSummary,
|
|
|
|
CommitResolution: commitResolution,
|
|
|
|
HtlcResolutions: htlcResolutions,
|
|
|
|
RemoteCommit: remoteCommit,
|
2019-12-13 11:14:22 +01:00
|
|
|
AnchorResolution: anchorResolution,
|
2018-01-18 22:49:35 +01:00
|
|
|
}, nil
|
2018-01-17 03:17:18 +01:00
|
|
|
}
|
2017-07-31 02:42:09 +02:00
|
|
|
|
2018-01-17 03:23:41 +01:00
|
|
|
// IncomingHtlcResolution houses the information required to sweep any incoming
|
|
|
|
// HTLC's that we know the preimage to. We'll need to sweep an HTLC manually
|
|
|
|
// using this struct if we need to go on-chain for any reason, or if we detect
|
|
|
|
// that the remote party broadcasts their commitment transaction.
|
|
|
|
type IncomingHtlcResolution struct {
|
2019-04-15 14:24:43 +02:00
|
|
|
// Preimage is the preimage that will be used to satisfy the contract of
|
|
|
|
// the HTLC.
|
2018-01-17 03:23:41 +01:00
|
|
|
//
|
2019-04-15 14:24:43 +02:00
|
|
|
// NOTE: This field will only be populated in the incoming contest
|
|
|
|
// resolver.
|
2018-01-17 03:23:41 +01:00
|
|
|
Preimage [32]byte
|
|
|
|
|
|
|
|
// SignedSuccessTx is the fully signed HTLC success transaction. This
|
|
|
|
// transaction (if non-nil) can be broadcast immediately. After a csv
|
|
|
|
// delay (included below), then the output created by this transactions
|
|
|
|
// can be swept on-chain.
|
|
|
|
//
|
|
|
|
// NOTE: If this field is nil, then this indicates that we don't need
|
|
|
|
// to go to the second level to claim this HTLC. Instead, it can be
|
|
|
|
// claimed directly from the outpoint listed below.
|
|
|
|
SignedSuccessTx *wire.MsgTx
|
|
|
|
|
2020-12-09 12:24:01 +01:00
|
|
|
// SignDetails is non-nil if SignedSuccessTx is non-nil, and the
|
|
|
|
// channel is of the anchor type. As the above HTLC transaction will be
|
|
|
|
// signed by the channel peer using SINGLE|ANYONECANPAY for such
|
|
|
|
// channels, we can use the sign details to add the input-output pair
|
|
|
|
// of the HTLC transaction to another transaction, thereby aggregating
|
|
|
|
// multiple HTLC transactions together, and adding fees as needed.
|
|
|
|
SignDetails *input.SignDetails
|
|
|
|
|
2018-01-17 03:23:41 +01:00
|
|
|
// CsvDelay is the relative time lock (expressed in blocks) that must
|
|
|
|
// pass after the SignedSuccessTx is confirmed in the chain before the
|
|
|
|
// output can be swept.
|
|
|
|
//
|
2020-03-06 16:11:47 +01:00
|
|
|
// NOTE: If SignedTimeoutTx is nil, then this field denotes the CSV
|
|
|
|
// delay needed to spend from the commitment transaction.
|
2018-01-17 03:23:41 +01:00
|
|
|
CsvDelay uint32
|
2017-07-30 22:20:58 +02:00
|
|
|
|
2018-01-17 03:23:41 +01:00
|
|
|
// ClaimOutpoint is the final outpoint that needs to be spent in order
|
|
|
|
// to fully sweep the HTLC. The SignDescriptor below should be used to
|
|
|
|
// spend this outpoint. In the case of a second-level HTLC (non-nil
|
|
|
|
// SignedTimeoutTx), then we'll be spending a new transaction.
|
|
|
|
// Otherwise, it'll be an output in the commitment transaction.
|
|
|
|
ClaimOutpoint wire.OutPoint
|
2017-09-27 03:00:16 +02:00
|
|
|
|
2018-01-17 03:23:41 +01:00
|
|
|
// SweepSignDesc is a sign descriptor that has been populated with the
|
|
|
|
// necessary items required to spend the sole output of the above
|
|
|
|
// transaction.
|
2019-01-16 15:47:43 +01:00
|
|
|
SweepSignDesc input.SignDescriptor
|
2017-07-30 22:20:58 +02:00
|
|
|
}
|
|
|
|
|
2018-01-17 03:26:11 +01:00
|
|
|
// OutgoingHtlcResolution houses the information necessary to sweep any
|
|
|
|
// outgoing HTLC's after their contract has expired. This struct will be needed
|
|
|
|
// in one of two cases: the local party force closes the commitment transaction
|
|
|
|
// or the remote party unilaterally closes with their version of the commitment
|
2017-07-30 22:20:58 +02:00
|
|
|
// transaction.
|
|
|
|
type OutgoingHtlcResolution struct {
|
|
|
|
// Expiry the absolute timeout of the HTLC. This value is expressed in
|
|
|
|
// block height, meaning after this height the HLTC can be swept.
|
|
|
|
Expiry uint32
|
|
|
|
|
|
|
|
// SignedTimeoutTx is the fully signed HTLC timeout transaction. This
|
|
|
|
// must be broadcast immediately after timeout has passed. Once this
|
|
|
|
// has been confirmed, the HTLC output will transition into the
|
|
|
|
// delay+claim state.
|
2018-01-17 03:26:11 +01:00
|
|
|
//
|
|
|
|
// NOTE: If this field is nil, then this indicates that we don't need
|
|
|
|
// to go to the second level to claim this HTLC. Instead, it can be
|
|
|
|
// claimed directly from the outpoint listed below.
|
2017-07-30 22:20:58 +02:00
|
|
|
SignedTimeoutTx *wire.MsgTx
|
|
|
|
|
2020-12-09 12:24:01 +01:00
|
|
|
// SignDetails is non-nil if SignedTimeoutTx is non-nil, and the
|
|
|
|
// channel is of the anchor type. As the above HTLC transaction will be
|
|
|
|
// signed by the channel peer using SINGLE|ANYONECANPAY for such
|
|
|
|
// channels, we can use the sign details to add the input-output pair
|
|
|
|
// of the HTLC transaction to another transaction, thereby aggregating
|
|
|
|
// multiple HTLC transactions together, and adding fees as needed.
|
|
|
|
SignDetails *input.SignDetails
|
|
|
|
|
2018-01-17 03:26:11 +01:00
|
|
|
// CsvDelay is the relative time lock (expressed in blocks) that must
|
|
|
|
// pass after the SignedTimeoutTx is confirmed in the chain before the
|
|
|
|
// output can be swept.
|
|
|
|
//
|
2020-03-06 16:11:47 +01:00
|
|
|
// NOTE: If SignedTimeoutTx is nil, then this field denotes the CSV
|
|
|
|
// delay needed to spend from the commitment transaction.
|
2018-01-17 03:26:11 +01:00
|
|
|
CsvDelay uint32
|
|
|
|
|
|
|
|
// ClaimOutpoint is the final outpoint that needs to be spent in order
|
|
|
|
// to fully sweep the HTLC. The SignDescriptor below should be used to
|
|
|
|
// spend this outpoint. In the case of a second-level HTLC (non-nil
|
|
|
|
// SignedTimeoutTx), then we'll be spending a new transaction.
|
|
|
|
// Otherwise, it'll be an output in the commitment transaction.
|
|
|
|
ClaimOutpoint wire.OutPoint
|
|
|
|
|
2017-07-30 22:20:58 +02:00
|
|
|
// SweepSignDesc is a sign descriptor that has been populated with the
|
|
|
|
// necessary items required to spend the sole output of the above
|
|
|
|
// transaction.
|
2019-01-16 15:47:43 +01:00
|
|
|
SweepSignDesc input.SignDescriptor
|
2017-07-30 22:20:58 +02:00
|
|
|
}
|
|
|
|
|
2018-01-17 03:36:31 +01:00
|
|
|
// HtlcResolutions contains the items necessary to sweep HTLC's on chain
|
|
|
|
// directly from a commitment transaction. We'll use this in case either party
|
|
|
|
// goes broadcasts a commitment transaction with live HTLC's.
|
|
|
|
type HtlcResolutions struct {
|
|
|
|
// IncomingHTLCs contains a set of structs that can be used to sweep
|
|
|
|
// all the incoming HTL'C that we know the preimage to.
|
|
|
|
IncomingHTLCs []IncomingHtlcResolution
|
|
|
|
|
|
|
|
// OutgoingHTLCs contains a set of structs that contains all the info
|
|
|
|
// needed to sweep an outgoing HTLC we've sent to the remote party
|
|
|
|
// after an absolute delay has expired.
|
|
|
|
OutgoingHTLCs []OutgoingHtlcResolution
|
|
|
|
}
|
|
|
|
|
2018-01-17 03:26:11 +01:00
|
|
|
// newOutgoingHtlcResolution generates a new HTLC resolution capable of
|
|
|
|
// allowing the caller to sweep an outgoing HTLC present on either their, or
|
|
|
|
// the remote party's commitment transaction.
|
2019-10-31 03:43:05 +01:00
|
|
|
func newOutgoingHtlcResolution(signer input.Signer,
|
2020-12-09 12:24:01 +01:00
|
|
|
localChanCfg *channeldb.ChannelConfig, commitTx *wire.MsgTx,
|
2019-10-31 03:43:05 +01:00
|
|
|
htlc *channeldb.HTLC, keyRing *CommitmentKeyRing,
|
2021-07-15 02:16:13 +02:00
|
|
|
feePerKw chainfee.SatPerKWeight, csvDelay, leaseExpiry uint32,
|
2024-07-31 01:44:18 +02:00
|
|
|
whoseCommit lntypes.ChannelParty, isCommitFromInitiator bool,
|
2024-04-25 19:01:37 +02:00
|
|
|
chanType channeldb.ChannelType,
|
|
|
|
auxLeaves fn.Option[CommitAuxLeaves]) (*OutgoingHtlcResolution, error) {
|
2017-07-30 22:20:58 +02:00
|
|
|
|
|
|
|
op := wire.OutPoint{
|
2020-12-09 12:24:01 +01:00
|
|
|
Hash: commitTx.TxHash(),
|
2017-07-30 22:20:58 +02:00
|
|
|
Index: uint32(htlc.OutputIndex),
|
|
|
|
}
|
|
|
|
|
2023-03-02 06:38:53 +01:00
|
|
|
// First, we'll re-generate the script used to send the HTLC to the
|
|
|
|
// remote party within their commitment transaction.
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf := fn.ChainOption(func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
return l.OutgoingHtlcLeaves[htlc.HtlcIndex].AuxTapLeaf
|
|
|
|
})(auxLeaves)
|
2023-03-02 06:38:53 +01:00
|
|
|
htlcScriptInfo, err := genHtlcScript(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, false, whoseCommit, htlc.RefundTimeout, htlc.RHash,
|
2024-04-25 19:01:37 +02:00
|
|
|
keyRing, auxLeaf,
|
2020-03-06 16:11:44 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-08-08 06:09:58 +02:00
|
|
|
htlcPkScript := htlcScriptInfo.PkScript()
|
|
|
|
|
|
|
|
// As this is an outgoing HTLC, we just care about the timeout path
|
|
|
|
// here.
|
|
|
|
scriptPath := input.ScriptPathTimeout
|
|
|
|
htlcWitnessScript, err := htlcScriptInfo.WitnessScriptForPath(
|
|
|
|
scriptPath,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-06 16:11:44 +01:00
|
|
|
|
2018-01-17 03:26:11 +01:00
|
|
|
// If we're spending this HTLC output from the remote node's
|
|
|
|
// commitment, then we won't need to go to the second level as our
|
|
|
|
// outputs don't have a CSV delay.
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsRemote() {
|
2018-01-17 03:26:11 +01:00
|
|
|
// With the script generated, we can completely populated the
|
|
|
|
// SignDescriptor needed to sweep the output.
|
2023-08-19 00:47:52 +02:00
|
|
|
prevFetcher := txscript.NewCannedPrevOutputFetcher(
|
|
|
|
htlcPkScript, int64(htlc.Amt.ToSatoshis()),
|
|
|
|
)
|
2023-03-02 06:42:44 +01:00
|
|
|
signDesc := input.SignDescriptor{
|
|
|
|
KeyDesc: localChanCfg.HtlcBasePoint,
|
|
|
|
SingleTweak: keyRing.LocalHtlcKeyTweak,
|
|
|
|
WitnessScript: htlcWitnessScript,
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
PkScript: htlcPkScript,
|
|
|
|
Value: int64(htlc.Amt.ToSatoshis()),
|
|
|
|
},
|
2023-08-19 00:47:52 +02:00
|
|
|
HashType: sweepSigHash(chanType),
|
|
|
|
PrevOutputFetcher: prevFetcher,
|
2023-03-02 06:42:44 +01:00
|
|
|
}
|
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
scriptTree, ok := htlcScriptInfo.(input.TapscriptDescriptor)
|
|
|
|
if ok {
|
2023-03-02 06:42:44 +01:00
|
|
|
signDesc.SignMethod = input.TaprootScriptSpendSignMethod
|
2023-08-08 06:09:58 +02:00
|
|
|
|
|
|
|
ctrlBlock, err := scriptTree.CtrlBlockForPath(
|
|
|
|
scriptPath,
|
2023-03-02 06:42:44 +01:00
|
|
|
)
|
2023-08-08 06:09:58 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-03-02 06:42:44 +01:00
|
|
|
signDesc.ControlBlock, err = ctrlBlock.ToBytes()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-17 03:26:11 +01:00
|
|
|
return &OutgoingHtlcResolution{
|
|
|
|
Expiry: htlc.RefundTimeout,
|
|
|
|
ClaimOutpoint: op,
|
2023-03-02 06:42:44 +01:00
|
|
|
SweepSignDesc: signDesc,
|
|
|
|
CsvDelay: HtlcSecondLevelInputSequence(chanType),
|
2018-01-17 03:26:11 +01:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll need to craft a second level HTLC transaction, as
|
|
|
|
// well as a sign desc to sweep after the CSV delay.
|
|
|
|
|
2017-07-30 22:20:58 +02:00
|
|
|
// In order to properly reconstruct the HTLC transaction, we'll need to
|
|
|
|
// re-calculate the fee required at this state, so we can add the
|
|
|
|
// correct output value amount to the transaction.
|
2020-03-06 16:11:49 +01:00
|
|
|
htlcFee := HtlcTimeoutFee(chanType, feePerKw)
|
2017-08-22 08:20:29 +02:00
|
|
|
secondLevelOutputAmt := htlc.Amt.ToSatoshis() - htlcFee
|
2017-07-30 22:20:58 +02:00
|
|
|
|
|
|
|
// With the fee calculated, re-construct the second level timeout
|
|
|
|
// transaction.
|
2024-04-25 19:01:37 +02:00
|
|
|
secondLevelAuxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
leaves := l.OutgoingHtlcLeaves
|
|
|
|
return leaves[htlc.HtlcIndex].SecondLevelLeaf
|
|
|
|
},
|
|
|
|
)(auxLeaves)
|
2020-11-17 12:50:41 +01:00
|
|
|
timeoutTx, err := CreateHtlcTimeoutTx(
|
2021-07-15 02:16:13 +02:00
|
|
|
chanType, isCommitFromInitiator, op, secondLevelOutputAmt,
|
2024-04-25 19:01:37 +02:00
|
|
|
htlc.RefundTimeout, csvDelay, leaseExpiry,
|
|
|
|
keyRing.RevocationKey, keyRing.ToLocalKey, secondLevelAuxLeaf,
|
2017-07-30 22:20:58 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the transaction created, we can generate a sign descriptor
|
|
|
|
// that's capable of generating the signature required to spend the
|
|
|
|
// HTLC output using the timeout transaction.
|
2020-12-09 12:24:01 +01:00
|
|
|
txOut := commitTx.TxOut[htlc.OutputIndex]
|
2023-03-02 06:42:44 +01:00
|
|
|
prevFetcher := txscript.NewCannedPrevOutputFetcher(
|
|
|
|
txOut.PkScript, txOut.Value,
|
|
|
|
)
|
|
|
|
hashCache := txscript.NewTxSigHashes(timeoutTx, prevFetcher)
|
2019-01-16 15:47:43 +01:00
|
|
|
timeoutSignDesc := input.SignDescriptor{
|
2023-03-02 06:42:44 +01:00
|
|
|
KeyDesc: localChanCfg.HtlcBasePoint,
|
|
|
|
SingleTweak: keyRing.LocalHtlcKeyTweak,
|
|
|
|
WitnessScript: htlcWitnessScript,
|
|
|
|
Output: txOut,
|
2023-08-19 00:17:51 +02:00
|
|
|
HashType: sweepSigHash(chanType),
|
2023-03-02 06:42:44 +01:00
|
|
|
PrevOutputFetcher: prevFetcher,
|
|
|
|
SigHashes: hashCache,
|
|
|
|
InputIndex: 0,
|
2017-07-30 22:20:58 +02:00
|
|
|
}
|
|
|
|
|
2023-03-02 06:42:44 +01:00
|
|
|
htlcSig, err := input.ParseSignature(htlc.Signature)
|
2020-04-06 02:06:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-07-30 22:20:58 +02:00
|
|
|
// With the sign desc created, we can now construct the full witness
|
|
|
|
// for the timeout transaction, and populate it as well.
|
2020-03-06 16:11:47 +01:00
|
|
|
sigHashType := HtlcSigHashType(chanType)
|
2023-03-02 06:42:44 +01:00
|
|
|
var timeoutWitness wire.TxWitness
|
2023-08-08 06:09:58 +02:00
|
|
|
if scriptTree, ok := htlcScriptInfo.(input.TapscriptDescriptor); ok {
|
2023-03-02 06:42:44 +01:00
|
|
|
timeoutSignDesc.SignMethod = input.TaprootScriptSpendSignMethod
|
|
|
|
|
|
|
|
timeoutWitness, err = input.SenderHTLCScriptTaprootTimeout(
|
|
|
|
htlcSig, sigHashType, signer, &timeoutSignDesc,
|
|
|
|
timeoutTx, keyRing.RevocationKey,
|
2023-08-08 06:09:58 +02:00
|
|
|
scriptTree.TapScriptTree(),
|
2023-03-02 06:42:44 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The control block is always the final element of the witness
|
|
|
|
// stack. We set this here as eventually the sweeper will need
|
|
|
|
// to re-sign, so it needs the isolated control block.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): move this into input.go?
|
|
|
|
ctlrBlkIdx := len(timeoutWitness) - 1
|
|
|
|
timeoutSignDesc.ControlBlock = timeoutWitness[ctlrBlkIdx]
|
|
|
|
} else {
|
|
|
|
timeoutWitness, err = input.SenderHtlcSpendTimeout(
|
|
|
|
htlcSig, sigHashType, signer, &timeoutSignDesc,
|
|
|
|
timeoutTx,
|
|
|
|
)
|
|
|
|
}
|
2017-07-30 22:20:58 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-03-02 06:42:44 +01:00
|
|
|
|
2017-07-30 22:20:58 +02:00
|
|
|
timeoutTx.TxIn[0].Witness = timeoutWitness
|
|
|
|
|
2020-12-09 12:24:01 +01:00
|
|
|
// If this is an anchor type channel, the sign details will let us
|
|
|
|
// re-sign an aggregated tx later.
|
|
|
|
txSignDetails := HtlcSignDetails(
|
|
|
|
chanType, timeoutSignDesc, sigHashType, htlcSig,
|
|
|
|
)
|
|
|
|
|
2017-07-30 22:20:58 +02:00
|
|
|
// Finally, we'll generate the script output that the timeout
|
|
|
|
// transaction creates so we can generate the signDesc required to
|
|
|
|
// complete the claim process after a delay period.
|
2023-03-02 06:42:44 +01:00
|
|
|
var (
|
2023-08-08 06:09:58 +02:00
|
|
|
htlcSweepScript input.ScriptDescriptor
|
2023-03-02 06:42:44 +01:00
|
|
|
signMethod input.SignMethod
|
|
|
|
ctrlBlock []byte
|
2017-09-27 04:03:04 +02:00
|
|
|
)
|
2023-03-02 06:42:44 +01:00
|
|
|
if !chanType.IsTaproot() {
|
|
|
|
htlcSweepScript, err = SecondLevelHtlcScript(
|
|
|
|
chanType, isCommitFromInitiator, keyRing.RevocationKey,
|
|
|
|
keyRing.ToLocalKey, csvDelay, leaseExpiry,
|
2024-04-25 19:01:37 +02:00
|
|
|
secondLevelAuxLeaf,
|
2023-03-02 06:42:44 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
2023-08-09 07:22:12 +02:00
|
|
|
//nolint:lll
|
2023-03-02 06:42:44 +01:00
|
|
|
secondLevelScriptTree, err := input.TaprootSecondLevelScriptTree(
|
|
|
|
keyRing.RevocationKey, keyRing.ToLocalKey, csvDelay,
|
2024-04-25 19:01:37 +02:00
|
|
|
secondLevelAuxLeaf,
|
2023-03-02 06:42:44 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
signMethod = input.TaprootScriptSpendSignMethod
|
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
controlBlock, err := secondLevelScriptTree.CtrlBlockForPath(
|
|
|
|
input.ScriptPathSuccess,
|
2023-03-02 06:42:44 +01:00
|
|
|
)
|
2023-08-08 06:09:58 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-03-02 06:42:44 +01:00
|
|
|
ctrlBlock, err = controlBlock.ToBytes()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-08-08 06:09:58 +02:00
|
|
|
|
|
|
|
htlcSweepScript = secondLevelScriptTree
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this case, the witness script that needs to be signed will always
|
|
|
|
// be that of the success path.
|
|
|
|
htlcSweepWitnessScript, err := htlcSweepScript.WitnessScriptForPath(
|
|
|
|
input.ScriptPathSuccess,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-07-30 22:20:58 +02:00
|
|
|
}
|
|
|
|
|
2019-01-16 15:47:43 +01:00
|
|
|
localDelayTweak := input.SingleTweakBytes(
|
2018-02-18 00:17:40 +01:00
|
|
|
keyRing.CommitPoint, localChanCfg.DelayBasePoint.PubKey,
|
|
|
|
)
|
2023-07-21 09:57:50 +02:00
|
|
|
|
2017-07-30 22:20:58 +02:00
|
|
|
return &OutgoingHtlcResolution{
|
|
|
|
Expiry: htlc.RefundTimeout,
|
|
|
|
SignedTimeoutTx: timeoutTx,
|
2020-12-09 12:24:01 +01:00
|
|
|
SignDetails: txSignDetails,
|
2018-01-17 03:23:41 +01:00
|
|
|
CsvDelay: csvDelay,
|
|
|
|
ClaimOutpoint: wire.OutPoint{
|
|
|
|
Hash: timeoutTx.TxHash(),
|
|
|
|
Index: 0,
|
|
|
|
},
|
2019-01-16 15:47:43 +01:00
|
|
|
SweepSignDesc: input.SignDescriptor{
|
2018-02-18 00:17:40 +01:00
|
|
|
KeyDesc: localChanCfg.DelayBasePoint,
|
2018-01-17 03:23:41 +01:00
|
|
|
SingleTweak: localDelayTweak,
|
2023-08-08 06:09:58 +02:00
|
|
|
WitnessScript: htlcSweepWitnessScript,
|
2018-01-17 03:23:41 +01:00
|
|
|
Output: &wire.TxOut{
|
2023-08-08 06:09:58 +02:00
|
|
|
PkScript: htlcSweepScript.PkScript(),
|
2018-01-17 03:23:41 +01:00
|
|
|
Value: int64(secondLevelOutputAmt),
|
|
|
|
},
|
2023-08-19 00:47:52 +02:00
|
|
|
HashType: sweepSigHash(chanType),
|
|
|
|
PrevOutputFetcher: txscript.NewCannedPrevOutputFetcher(
|
|
|
|
htlcSweepScript.PkScript(),
|
|
|
|
int64(secondLevelOutputAmt),
|
|
|
|
),
|
2023-03-02 06:42:44 +01:00
|
|
|
SignMethod: signMethod,
|
|
|
|
ControlBlock: ctrlBlock,
|
2018-01-17 03:23:41 +01:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// newIncomingHtlcResolution creates a new HTLC resolution capable of allowing
|
|
|
|
// the caller to sweep an incoming HTLC. If the HTLC is on the caller's
|
|
|
|
// commitment transaction, then they'll need to broadcast a second-level
|
|
|
|
// transaction before sweeping the output (and incur a CSV delay). Otherwise,
|
|
|
|
// they can just sweep the output immediately with knowledge of the pre-image.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef) consolidate code with above func
|
2020-03-06 16:11:45 +01:00
|
|
|
func newIncomingHtlcResolution(signer input.Signer,
|
2020-12-09 12:24:01 +01:00
|
|
|
localChanCfg *channeldb.ChannelConfig, commitTx *wire.MsgTx,
|
2020-03-06 16:11:45 +01:00
|
|
|
htlc *channeldb.HTLC, keyRing *CommitmentKeyRing,
|
2021-07-15 02:16:13 +02:00
|
|
|
feePerKw chainfee.SatPerKWeight, csvDelay, leaseExpiry uint32,
|
2024-07-31 01:44:18 +02:00
|
|
|
whoseCommit lntypes.ChannelParty, isCommitFromInitiator bool,
|
2024-04-25 19:01:37 +02:00
|
|
|
chanType channeldb.ChannelType,
|
|
|
|
auxLeaves fn.Option[CommitAuxLeaves]) (*IncomingHtlcResolution, error) {
|
2018-01-17 03:23:41 +01:00
|
|
|
|
|
|
|
op := wire.OutPoint{
|
2020-12-09 12:24:01 +01:00
|
|
|
Hash: commitTx.TxHash(),
|
2018-01-17 03:23:41 +01:00
|
|
|
Index: uint32(htlc.OutputIndex),
|
|
|
|
}
|
|
|
|
|
2020-03-06 16:11:44 +01:00
|
|
|
// First, we'll re-generate the script the remote party used to
|
|
|
|
// send the HTLC to us in their commitment transaction.
|
2024-04-25 19:01:37 +02:00
|
|
|
auxLeaf := fn.ChainOption(func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
return l.IncomingHtlcLeaves[htlc.HtlcIndex].AuxTapLeaf
|
|
|
|
})(auxLeaves)
|
2023-03-02 06:38:53 +01:00
|
|
|
scriptInfo, err := genHtlcScript(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, true, whoseCommit, htlc.RefundTimeout, htlc.RHash,
|
2024-04-25 19:01:37 +02:00
|
|
|
keyRing, auxLeaf,
|
2020-03-06 16:11:44 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
htlcPkScript := scriptInfo.PkScript()
|
|
|
|
|
|
|
|
// As this is an incoming HTLC, we're attempting to sweep with the
|
|
|
|
// success path.
|
|
|
|
scriptPath := input.ScriptPathSuccess
|
|
|
|
htlcWitnessScript, err := scriptInfo.WitnessScriptForPath(
|
|
|
|
scriptPath,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-03-02 06:38:53 +01:00
|
|
|
|
2018-01-17 03:23:41 +01:00
|
|
|
// If we're spending this output from the remote node's commitment,
|
|
|
|
// then we can skip the second layer and spend the output directly.
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsRemote() {
|
2018-01-17 03:23:41 +01:00
|
|
|
// With the script generated, we can completely populated the
|
|
|
|
// SignDescriptor needed to sweep the output.
|
2023-08-19 00:47:52 +02:00
|
|
|
prevFetcher := txscript.NewCannedPrevOutputFetcher(
|
|
|
|
htlcPkScript, int64(htlc.Amt.ToSatoshis()),
|
|
|
|
)
|
2023-03-02 06:43:37 +01:00
|
|
|
signDesc := input.SignDescriptor{
|
|
|
|
KeyDesc: localChanCfg.HtlcBasePoint,
|
|
|
|
SingleTweak: keyRing.LocalHtlcKeyTweak,
|
|
|
|
WitnessScript: htlcWitnessScript,
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
PkScript: htlcPkScript,
|
|
|
|
Value: int64(htlc.Amt.ToSatoshis()),
|
|
|
|
},
|
2023-08-19 00:47:52 +02:00
|
|
|
HashType: sweepSigHash(chanType),
|
|
|
|
PrevOutputFetcher: prevFetcher,
|
2023-03-02 06:43:37 +01:00
|
|
|
}
|
|
|
|
|
2023-08-09 07:22:12 +02:00
|
|
|
//nolint:lll
|
2023-08-08 06:09:58 +02:00
|
|
|
if scriptTree, ok := scriptInfo.(input.TapscriptDescriptor); ok {
|
2023-03-02 06:43:37 +01:00
|
|
|
signDesc.SignMethod = input.TaprootScriptSpendSignMethod
|
2023-08-09 07:22:12 +02:00
|
|
|
ctrlBlock, err := scriptTree.CtrlBlockForPath(
|
|
|
|
scriptPath,
|
|
|
|
)
|
2023-08-08 06:09:58 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-03-02 06:43:37 +01:00
|
|
|
signDesc.ControlBlock, err = ctrlBlock.ToBytes()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-17 03:23:41 +01:00
|
|
|
return &IncomingHtlcResolution{
|
|
|
|
ClaimOutpoint: op,
|
2023-03-02 06:43:37 +01:00
|
|
|
SweepSignDesc: signDesc,
|
|
|
|
CsvDelay: HtlcSecondLevelInputSequence(chanType),
|
2018-01-17 03:23:41 +01:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
secondLevelAuxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
leaves := l.IncomingHtlcLeaves
|
|
|
|
return leaves[htlc.HtlcIndex].SecondLevelLeaf
|
|
|
|
},
|
|
|
|
)(auxLeaves)
|
|
|
|
|
2018-01-17 03:23:41 +01:00
|
|
|
// Otherwise, we'll need to go to the second level to sweep this HTLC.
|
2023-03-02 06:43:37 +01:00
|
|
|
//
|
2018-01-17 03:23:41 +01:00
|
|
|
// First, we'll reconstruct the original HTLC success transaction,
|
|
|
|
// taking into account the fee rate used.
|
2020-03-06 16:11:49 +01:00
|
|
|
htlcFee := HtlcSuccessFee(chanType, feePerKw)
|
2018-01-17 03:23:41 +01:00
|
|
|
secondLevelOutputAmt := htlc.Amt.ToSatoshis() - htlcFee
|
2020-11-17 12:50:41 +01:00
|
|
|
successTx, err := CreateHtlcSuccessTx(
|
2021-07-15 02:16:13 +02:00
|
|
|
chanType, isCommitFromInitiator, op, secondLevelOutputAmt,
|
2023-08-09 07:22:12 +02:00
|
|
|
csvDelay, leaseExpiry, keyRing.RevocationKey,
|
2024-04-25 19:01:37 +02:00
|
|
|
keyRing.ToLocalKey, secondLevelAuxLeaf,
|
2018-01-17 03:23:41 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Once we've created the second-level transaction, we'll generate the
|
|
|
|
// SignDesc needed spend the HTLC output using the success transaction.
|
2020-12-09 12:24:01 +01:00
|
|
|
txOut := commitTx.TxOut[htlc.OutputIndex]
|
2023-03-02 06:43:37 +01:00
|
|
|
prevFetcher := txscript.NewCannedPrevOutputFetcher(
|
|
|
|
txOut.PkScript, txOut.Value,
|
|
|
|
)
|
|
|
|
hashCache := txscript.NewTxSigHashes(successTx, prevFetcher)
|
2019-01-16 15:47:43 +01:00
|
|
|
successSignDesc := input.SignDescriptor{
|
2023-03-02 06:43:37 +01:00
|
|
|
KeyDesc: localChanCfg.HtlcBasePoint,
|
|
|
|
SingleTweak: keyRing.LocalHtlcKeyTweak,
|
|
|
|
WitnessScript: htlcWitnessScript,
|
|
|
|
Output: txOut,
|
2023-08-19 00:17:51 +02:00
|
|
|
HashType: sweepSigHash(chanType),
|
2023-03-02 06:43:37 +01:00
|
|
|
PrevOutputFetcher: prevFetcher,
|
|
|
|
SigHashes: hashCache,
|
|
|
|
InputIndex: 0,
|
2018-01-17 03:23:41 +01:00
|
|
|
}
|
|
|
|
|
2023-03-02 06:43:37 +01:00
|
|
|
htlcSig, err := input.ParseSignature(htlc.Signature)
|
2020-04-06 02:06:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-04-15 14:24:43 +02:00
|
|
|
// Next, we'll construct the full witness needed to satisfy the input of
|
|
|
|
// the success transaction. Don't specify the preimage yet. The preimage
|
|
|
|
// will be supplied by the contract resolver, either directly or when it
|
|
|
|
// becomes known.
|
2023-03-02 06:43:37 +01:00
|
|
|
var successWitness wire.TxWitness
|
2020-03-06 16:11:47 +01:00
|
|
|
sigHashType := HtlcSigHashType(chanType)
|
2023-08-08 06:09:58 +02:00
|
|
|
if scriptTree, ok := scriptInfo.(input.TapscriptDescriptor); ok {
|
2023-03-02 06:43:37 +01:00
|
|
|
successSignDesc.SignMethod = input.TaprootScriptSpendSignMethod
|
|
|
|
|
|
|
|
successWitness, err = input.ReceiverHTLCScriptTaprootRedeem(
|
|
|
|
htlcSig, sigHashType, nil, signer, &successSignDesc,
|
2023-08-08 06:09:58 +02:00
|
|
|
successTx, keyRing.RevocationKey,
|
|
|
|
scriptTree.TapScriptTree(),
|
2023-03-02 06:43:37 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The control block is always the final element of the witness
|
|
|
|
// stack. We set this here as eventually the sweeper will need
|
|
|
|
// to re-sign, so it needs the isolated control block.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): move this into input.go?
|
|
|
|
ctlrBlkIdx := len(successWitness) - 1
|
|
|
|
successSignDesc.ControlBlock = successWitness[ctlrBlkIdx]
|
|
|
|
} else {
|
|
|
|
successWitness, err = input.ReceiverHtlcSpendRedeem(
|
|
|
|
htlcSig, sigHashType, nil, signer, &successSignDesc,
|
|
|
|
successTx,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-01-17 03:23:41 +01:00
|
|
|
}
|
|
|
|
successTx.TxIn[0].Witness = successWitness
|
|
|
|
|
2020-12-09 12:24:01 +01:00
|
|
|
// If this is an anchor type channel, the sign details will let us
|
|
|
|
// re-sign an aggregated tx later.
|
|
|
|
txSignDetails := HtlcSignDetails(
|
|
|
|
chanType, successSignDesc, sigHashType, htlcSig,
|
|
|
|
)
|
|
|
|
|
2018-01-17 03:23:41 +01:00
|
|
|
// Finally, we'll generate the script that the second-level transaction
|
|
|
|
// creates so we can generate the proper signDesc to sweep it after the
|
|
|
|
// CSV delay has passed.
|
2023-03-02 06:43:37 +01:00
|
|
|
var (
|
2023-08-08 06:09:58 +02:00
|
|
|
htlcSweepScript input.ScriptDescriptor
|
2023-03-02 06:43:37 +01:00
|
|
|
signMethod input.SignMethod
|
|
|
|
ctrlBlock []byte
|
2018-01-17 03:23:41 +01:00
|
|
|
)
|
2023-03-02 06:43:37 +01:00
|
|
|
if !chanType.IsTaproot() {
|
|
|
|
htlcSweepScript, err = SecondLevelHtlcScript(
|
|
|
|
chanType, isCommitFromInitiator, keyRing.RevocationKey,
|
|
|
|
keyRing.ToLocalKey, csvDelay, leaseExpiry,
|
2024-04-25 19:01:37 +02:00
|
|
|
secondLevelAuxLeaf,
|
2023-03-02 06:43:37 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
2023-08-09 07:22:12 +02:00
|
|
|
//nolint:lll
|
2023-03-02 06:43:37 +01:00
|
|
|
secondLevelScriptTree, err := input.TaprootSecondLevelScriptTree(
|
|
|
|
keyRing.RevocationKey, keyRing.ToLocalKey, csvDelay,
|
2024-04-25 19:01:37 +02:00
|
|
|
secondLevelAuxLeaf,
|
2023-03-02 06:43:37 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
signMethod = input.TaprootScriptSpendSignMethod
|
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
controlBlock, err := secondLevelScriptTree.CtrlBlockForPath(
|
|
|
|
input.ScriptPathSuccess,
|
2023-03-02 06:43:37 +01:00
|
|
|
)
|
2023-08-08 06:09:58 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-03-02 06:43:37 +01:00
|
|
|
ctrlBlock, err = controlBlock.ToBytes()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
htlcSweepScript = secondLevelScriptTree
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this case, the witness script that needs to be signed will always
|
|
|
|
// be that of the success path.
|
|
|
|
htlcSweepWitnessScript, err := htlcSweepScript.WitnessScriptForPath(
|
|
|
|
input.ScriptPathSuccess,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-01-17 03:23:41 +01:00
|
|
|
}
|
|
|
|
|
2019-01-16 15:47:43 +01:00
|
|
|
localDelayTweak := input.SingleTweakBytes(
|
2018-02-18 00:17:40 +01:00
|
|
|
keyRing.CommitPoint, localChanCfg.DelayBasePoint.PubKey,
|
2018-01-18 22:45:30 +01:00
|
|
|
)
|
2018-01-17 03:23:41 +01:00
|
|
|
return &IncomingHtlcResolution{
|
|
|
|
SignedSuccessTx: successTx,
|
2020-12-09 12:24:01 +01:00
|
|
|
SignDetails: txSignDetails,
|
2018-01-17 03:23:41 +01:00
|
|
|
CsvDelay: csvDelay,
|
|
|
|
ClaimOutpoint: wire.OutPoint{
|
|
|
|
Hash: successTx.TxHash(),
|
|
|
|
Index: 0,
|
|
|
|
},
|
2019-01-16 15:47:43 +01:00
|
|
|
SweepSignDesc: input.SignDescriptor{
|
2018-02-18 00:17:40 +01:00
|
|
|
KeyDesc: localChanCfg.DelayBasePoint,
|
2017-09-22 23:15:01 +02:00
|
|
|
SingleTweak: localDelayTweak,
|
2023-08-08 06:09:58 +02:00
|
|
|
WitnessScript: htlcSweepWitnessScript,
|
2017-07-30 22:20:58 +02:00
|
|
|
Output: &wire.TxOut{
|
2023-08-08 06:09:58 +02:00
|
|
|
PkScript: htlcSweepScript.PkScript(),
|
2017-09-29 23:28:10 +02:00
|
|
|
Value: int64(secondLevelOutputAmt),
|
2017-07-30 22:20:58 +02:00
|
|
|
},
|
2023-08-19 00:47:52 +02:00
|
|
|
HashType: sweepSigHash(chanType),
|
|
|
|
PrevOutputFetcher: txscript.NewCannedPrevOutputFetcher(
|
|
|
|
htlcSweepScript.PkScript(),
|
|
|
|
int64(secondLevelOutputAmt),
|
|
|
|
),
|
2023-03-02 06:43:37 +01:00
|
|
|
SignMethod: signMethod,
|
|
|
|
ControlBlock: ctrlBlock,
|
2017-07-30 22:20:58 +02:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-01-23 05:45:29 +01:00
|
|
|
// HtlcPoint returns the htlc's outpoint on the commitment tx.
|
|
|
|
func (r *IncomingHtlcResolution) HtlcPoint() wire.OutPoint {
|
|
|
|
// If we have a success transaction, then the htlc's outpoint
|
|
|
|
// is the transaction's only input. Otherwise, it's the claim
|
|
|
|
// point.
|
|
|
|
if r.SignedSuccessTx != nil {
|
|
|
|
return r.SignedSuccessTx.TxIn[0].PreviousOutPoint
|
|
|
|
}
|
|
|
|
|
|
|
|
return r.ClaimOutpoint
|
|
|
|
}
|
|
|
|
|
|
|
|
// HtlcPoint returns the htlc's outpoint on the commitment tx.
|
|
|
|
func (r *OutgoingHtlcResolution) HtlcPoint() wire.OutPoint {
|
|
|
|
// If we have a timeout transaction, then the htlc's outpoint
|
|
|
|
// is the transaction's only input. Otherwise, it's the claim
|
|
|
|
// point.
|
|
|
|
if r.SignedTimeoutTx != nil {
|
|
|
|
return r.SignedTimeoutTx.TxIn[0].PreviousOutPoint
|
|
|
|
}
|
|
|
|
|
|
|
|
return r.ClaimOutpoint
|
|
|
|
}
|
|
|
|
|
2017-07-30 22:20:58 +02:00
|
|
|
// extractHtlcResolutions creates a series of outgoing HTLC resolutions, and
|
|
|
|
// the local key used when generating the HTLC scrips. This function is to be
|
|
|
|
// used in two cases: force close, or a unilateral close.
|
2024-07-31 01:44:18 +02:00
|
|
|
func extractHtlcResolutions(feePerKw chainfee.SatPerKWeight,
|
|
|
|
whoseCommit lntypes.ChannelParty, signer input.Signer,
|
|
|
|
htlcs []channeldb.HTLC, keyRing *CommitmentKeyRing,
|
2017-07-30 22:20:58 +02:00
|
|
|
localChanCfg, remoteChanCfg *channeldb.ChannelConfig,
|
2021-07-15 02:16:13 +02:00
|
|
|
commitTx *wire.MsgTx, chanType channeldb.ChannelType,
|
2024-04-25 19:01:37 +02:00
|
|
|
isCommitFromInitiator bool, leaseExpiry uint32,
|
|
|
|
auxLeaves fn.Option[CommitAuxLeaves]) (*HtlcResolutions, error) {
|
2017-07-30 22:20:58 +02:00
|
|
|
|
2018-01-17 03:36:31 +01:00
|
|
|
// TODO(roasbeef): don't need to swap csv delay?
|
2017-07-30 22:20:58 +02:00
|
|
|
dustLimit := remoteChanCfg.DustLimit
|
2017-09-27 04:03:04 +02:00
|
|
|
csvDelay := remoteChanCfg.CsvDelay
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsLocal() {
|
2017-07-30 22:20:58 +02:00
|
|
|
dustLimit = localChanCfg.DustLimit
|
2017-09-27 04:03:04 +02:00
|
|
|
csvDelay = localChanCfg.CsvDelay
|
2017-07-30 22:20:58 +02:00
|
|
|
}
|
|
|
|
|
2018-01-17 03:36:31 +01:00
|
|
|
incomingResolutions := make([]IncomingHtlcResolution, 0, len(htlcs))
|
|
|
|
outgoingResolutions := make([]OutgoingHtlcResolution, 0, len(htlcs))
|
2017-09-29 23:32:11 +02:00
|
|
|
for _, htlc := range htlcs {
|
2020-03-06 16:11:45 +01:00
|
|
|
htlc := htlc
|
|
|
|
|
2018-01-17 03:36:31 +01:00
|
|
|
// We'll skip any HTLC's which were dust on the commitment
|
2017-07-30 22:20:58 +02:00
|
|
|
// transaction, as these don't have a corresponding output
|
|
|
|
// within the commitment transaction.
|
2021-09-28 17:34:10 +02:00
|
|
|
if HtlcIsDust(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanType, htlc.Incoming, whoseCommit, feePerKw,
|
2020-03-06 16:11:49 +01:00
|
|
|
htlc.Amt.ToSatoshis(), dustLimit,
|
|
|
|
) {
|
2022-02-07 13:58:28 +01:00
|
|
|
|
2017-07-30 22:20:58 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-01-17 03:36:31 +01:00
|
|
|
// If the HTLC is incoming, then we'll attempt to see if we
|
|
|
|
// know the pre-image to the HTLC.
|
|
|
|
if htlc.Incoming {
|
|
|
|
// Otherwise, we'll create an incoming HTLC resolution
|
|
|
|
// as we can satisfy the contract.
|
|
|
|
ihr, err := newIncomingHtlcResolution(
|
2020-12-09 12:24:01 +01:00
|
|
|
signer, localChanCfg, commitTx, &htlc,
|
2024-04-25 19:01:37 +02:00
|
|
|
keyRing, feePerKw, uint32(csvDelay),
|
|
|
|
leaseExpiry, whoseCommit, isCommitFromInitiator,
|
|
|
|
chanType, auxLeaves,
|
2018-01-17 03:36:31 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
2023-03-02 06:43:37 +01:00
|
|
|
return nil, fmt.Errorf("incoming resolution "+
|
|
|
|
"failed: %v", err)
|
2018-01-17 03:36:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
incomingResolutions = append(incomingResolutions, *ihr)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
ohr, err := newOutgoingHtlcResolution(
|
2020-12-09 12:24:01 +01:00
|
|
|
signer, localChanCfg, commitTx, &htlc, keyRing,
|
2024-07-31 01:44:18 +02:00
|
|
|
feePerKw, uint32(csvDelay), leaseExpiry, whoseCommit,
|
2024-04-25 19:01:37 +02:00
|
|
|
isCommitFromInitiator, chanType, auxLeaves,
|
2017-09-27 04:03:04 +02:00
|
|
|
)
|
2017-07-30 22:20:58 +02:00
|
|
|
if err != nil {
|
2023-03-02 06:43:37 +01:00
|
|
|
return nil, fmt.Errorf("outgoing resolution "+
|
|
|
|
"failed: %v", err)
|
2017-07-30 22:20:58 +02:00
|
|
|
}
|
|
|
|
|
2018-01-17 03:36:31 +01:00
|
|
|
outgoingResolutions = append(outgoingResolutions, *ohr)
|
2017-07-30 22:20:58 +02:00
|
|
|
}
|
|
|
|
|
2018-01-17 03:36:31 +01:00
|
|
|
return &HtlcResolutions{
|
|
|
|
IncomingHTLCs: incomingResolutions,
|
|
|
|
OutgoingHTLCs: outgoingResolutions,
|
|
|
|
}, nil
|
2017-05-16 03:12:52 +02:00
|
|
|
}
|
|
|
|
|
2019-12-13 11:14:22 +01:00
|
|
|
// AnchorResolution holds the information necessary to spend our commitment tx
|
|
|
|
// anchor.
|
|
|
|
type AnchorResolution struct {
|
|
|
|
// AnchorSignDescriptor is the sign descriptor for our anchor.
|
|
|
|
AnchorSignDescriptor input.SignDescriptor
|
|
|
|
|
|
|
|
// CommitAnchor is the anchor outpoint on the commit tx.
|
|
|
|
CommitAnchor wire.OutPoint
|
2020-09-04 11:19:27 +02:00
|
|
|
|
|
|
|
// CommitFee is the fee of the commit tx.
|
|
|
|
CommitFee btcutil.Amount
|
|
|
|
|
|
|
|
// CommitWeight is the weight of the commit tx.
|
2024-05-24 15:56:30 +02:00
|
|
|
CommitWeight lntypes.WeightUnit
|
2019-12-13 11:14:22 +01:00
|
|
|
}
|
|
|
|
|
2018-03-19 15:19:19 +01:00
|
|
|
// LocalForceCloseSummary describes the final commitment state before the
|
|
|
|
// channel is locked-down to initiate a force closure by broadcasting the
|
|
|
|
// latest state on-chain. If we intend to broadcast this this state, the
|
|
|
|
// channel should not be used after generating this close summary. The summary
|
|
|
|
// includes all the information required to claim all rightfully owned outputs
|
|
|
|
// when the commitment gets confirmed.
|
|
|
|
type LocalForceCloseSummary struct {
|
2017-05-05 00:37:03 +02:00
|
|
|
// ChanPoint is the outpoint that created the channel which has been
|
|
|
|
// force closed.
|
|
|
|
ChanPoint wire.OutPoint
|
|
|
|
|
2018-03-19 15:19:19 +01:00
|
|
|
// CloseTx is the transaction which can be used to close the channel
|
|
|
|
// on-chain. When we initiate a force close, this will be our latest
|
|
|
|
// commitment state.
|
2016-09-12 21:33:22 +02:00
|
|
|
CloseTx *wire.MsgTx
|
|
|
|
|
2018-01-17 03:17:18 +01:00
|
|
|
// CommitResolution contains all the data required to sweep the output
|
2018-03-19 15:19:19 +01:00
|
|
|
// to ourselves. Since this is our commitment transaction, we'll need
|
2018-01-17 03:17:18 +01:00
|
|
|
// to wait a time delay before we can sweep the output.
|
2017-08-05 03:32:25 +02:00
|
|
|
//
|
2018-01-17 03:17:18 +01:00
|
|
|
// NOTE: If our commitment delivery output is below the dust limit,
|
|
|
|
// then this will be nil.
|
|
|
|
CommitResolution *CommitOutputResolution
|
|
|
|
|
|
|
|
// HtlcResolutions contains all the data required to sweep any outgoing
|
2018-03-19 15:19:19 +01:00
|
|
|
// HTLC's and incoming HTLc's we know the preimage to. For each of these
|
2018-01-17 03:17:18 +01:00
|
|
|
// HTLC's, we'll need to go to the second level to sweep them fully.
|
|
|
|
HtlcResolutions *HtlcResolutions
|
|
|
|
|
|
|
|
// ChanSnapshot is a snapshot of the final state of the channel at the
|
2018-03-19 15:19:19 +01:00
|
|
|
// time the summary was created.
|
2018-01-17 03:17:18 +01:00
|
|
|
ChanSnapshot channeldb.ChannelSnapshot
|
2019-12-13 11:14:22 +01:00
|
|
|
|
|
|
|
// AnchorResolution contains the data required to sweep the anchor
|
|
|
|
// output. If the channel type doesn't include anchors, the value of
|
|
|
|
// this field will be nil.
|
|
|
|
AnchorResolution *AnchorResolution
|
2016-11-23 09:29:05 +01:00
|
|
|
}
|
|
|
|
|
2016-09-12 21:33:22 +02:00
|
|
|
// ForceClose executes a unilateral closure of the transaction at the current
|
|
|
|
// lowest commitment height of the channel. Following a force closure, all
|
|
|
|
// state transitions, or modifications to the state update logs will be
|
2018-03-19 15:19:19 +01:00
|
|
|
// rejected. Additionally, this function also returns a LocalForceCloseSummary
|
|
|
|
// which includes the necessary details required to sweep all the time-locked
|
|
|
|
// outputs within the commitment transaction.
|
2016-09-12 21:33:22 +02:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): all methods need to abort if in dispute state
|
2018-03-19 15:19:19 +01:00
|
|
|
func (lc *LightningChannel) ForceClose() (*LocalForceCloseSummary, error) {
|
2016-09-12 21:33:22 +02:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2018-12-10 04:54:25 +01:00
|
|
|
// If we've detected local data loss for this channel, then we won't
|
|
|
|
// allow a force close, as it may be the case that we have a dated
|
|
|
|
// version of the commitment, or this is actually a channel shell.
|
|
|
|
if lc.channelState.HasChanStatus(channeldb.ChanStatusLocalDataLoss) {
|
2023-09-14 02:15:48 +02:00
|
|
|
return nil, fmt.Errorf("%w: channel_state=%v",
|
|
|
|
ErrForceCloseLocalDataLoss,
|
|
|
|
lc.channelState.ChanStatus())
|
2018-12-10 04:54:25 +01:00
|
|
|
}
|
|
|
|
|
2016-11-23 09:29:05 +01:00
|
|
|
commitTx, err := lc.getSignedCommitTx()
|
2016-09-12 21:33:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-03-19 15:19:19 +01:00
|
|
|
localCommitment := lc.channelState.LocalCommitment
|
2018-12-10 04:54:25 +01:00
|
|
|
summary, err := NewLocalForceCloseSummary(
|
2019-04-15 14:24:43 +02:00
|
|
|
lc.channelState, lc.Signer, commitTx,
|
2024-06-04 07:58:57 +02:00
|
|
|
localCommitment.CommitHeight, lc.leafStore, lc.auxResolver,
|
2018-12-10 04:54:25 +01:00
|
|
|
)
|
2018-03-19 15:19:19 +01:00
|
|
|
if err != nil {
|
2023-03-02 06:40:44 +01:00
|
|
|
return nil, fmt.Errorf("unable to gen force close "+
|
|
|
|
"summary: %w", err)
|
2018-03-19 15:19:19 +01:00
|
|
|
}
|
|
|
|
|
2024-01-31 03:06:49 +01:00
|
|
|
// Mark the channel as closed to block future closure requests.
|
|
|
|
lc.isClosed = true
|
2018-03-19 15:19:19 +01:00
|
|
|
|
|
|
|
return summary, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewLocalForceCloseSummary generates a LocalForceCloseSummary from the given
|
|
|
|
// channel state. The passed commitTx must be a fully signed commitment
|
|
|
|
// transaction corresponding to localCommit.
|
2020-11-18 22:45:35 +01:00
|
|
|
func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel,
|
2024-04-25 19:00:42 +02:00
|
|
|
signer input.Signer, commitTx *wire.MsgTx, stateNum uint64,
|
2024-06-04 07:58:57 +02:00
|
|
|
leafStore fn.Option[AuxLeafStore],
|
|
|
|
auxResolver fn.Option[AuxContractResolver]) (*LocalForceCloseSummary,
|
|
|
|
error) {
|
2018-03-19 15:19:19 +01:00
|
|
|
|
2017-01-24 10:38:25 +01:00
|
|
|
// Re-derive the original pkScript for to-self output within the
|
|
|
|
// commitment transaction. We'll need this to find the corresponding
|
|
|
|
// output in the commitment transaction and potentially for creating
|
|
|
|
// the sign descriptor.
|
2018-03-19 15:19:19 +01:00
|
|
|
csvTimeout := uint32(chanState.LocalChanCfg.CsvDelay)
|
2020-11-18 22:45:35 +01:00
|
|
|
|
|
|
|
// We use the passed state num to derive our scripts, since in case
|
|
|
|
// this is after recovery, our latest channels state might not be up to
|
|
|
|
// date.
|
|
|
|
revocation, err := chanState.RevocationProducer.AtIndex(stateNum)
|
2017-01-24 10:38:25 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-01-16 15:47:43 +01:00
|
|
|
commitPoint := input.ComputeCommitmentPoint(revocation[:])
|
2019-09-17 04:06:19 +02:00
|
|
|
keyRing := DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
commitPoint, lntypes.Local, chanState.ChanType,
|
2019-09-17 04:06:19 +02:00
|
|
|
&chanState.LocalChanCfg, &chanState.RemoteChanCfg,
|
2019-08-01 05:10:45 +02:00
|
|
|
)
|
2020-01-06 11:42:04 +01:00
|
|
|
|
2024-04-25 19:01:37 +02:00
|
|
|
auxResult, err := fn.MapOptionZ(
|
|
|
|
leafStore, func(s AuxLeafStore) fn.Result[CommitDiffAuxResult] {
|
|
|
|
return s.FetchLeavesFromCommit(
|
|
|
|
NewAuxChanState(chanState),
|
|
|
|
chanState.LocalCommitment, *keyRing,
|
|
|
|
)
|
|
|
|
},
|
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch aux leaves: %w", err)
|
|
|
|
}
|
2024-03-17 21:53:38 +01:00
|
|
|
|
2021-07-15 02:16:13 +02:00
|
|
|
var leaseExpiry uint32
|
|
|
|
if chanState.ChanType.HasLeaseExpiration() {
|
|
|
|
leaseExpiry = chanState.ThawHeight
|
|
|
|
}
|
2024-04-25 19:01:37 +02:00
|
|
|
|
|
|
|
localAuxLeaf := fn.ChainOption(
|
|
|
|
func(l CommitAuxLeaves) input.AuxTapLeaf {
|
|
|
|
return l.LocalAuxLeaf
|
|
|
|
},
|
|
|
|
)(auxResult.AuxLeaves)
|
2021-07-15 02:06:13 +02:00
|
|
|
toLocalScript, err := CommitScriptToSelf(
|
2021-07-15 02:16:13 +02:00
|
|
|
chanState.ChanType, chanState.IsInitiator, keyRing.ToLocalKey,
|
2024-04-25 19:01:37 +02:00
|
|
|
keyRing.RevocationKey, csvTimeout, leaseExpiry, localAuxLeaf,
|
2020-01-06 11:42:04 +01:00
|
|
|
)
|
2017-01-24 10:38:25 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-09-12 21:33:22 +02:00
|
|
|
// Locate the output index of the delayed commitment output back to us.
|
|
|
|
// We'll return the details of this output to the caller so they can
|
|
|
|
// sweep it once it's mature.
|
2017-07-30 22:21:26 +02:00
|
|
|
var (
|
2020-11-18 22:45:35 +01:00
|
|
|
delayIndex uint32
|
|
|
|
delayOut *wire.TxOut
|
2017-07-30 22:21:26 +02:00
|
|
|
)
|
2016-09-12 21:33:22 +02:00
|
|
|
for i, txOut := range commitTx.TxOut {
|
2023-08-08 06:09:58 +02:00
|
|
|
if !bytes.Equal(toLocalScript.PkScript(), txOut.PkScript) {
|
2016-09-12 21:33:22 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
delayIndex = uint32(i)
|
2020-11-18 22:45:35 +01:00
|
|
|
delayOut = txOut
|
2017-01-24 10:38:25 +01:00
|
|
|
break
|
2016-09-12 21:33:22 +02:00
|
|
|
}
|
|
|
|
|
2016-11-21 06:54:18 +01:00
|
|
|
// With the necessary information gathered above, create a new sign
|
2016-09-12 21:33:22 +02:00
|
|
|
// descriptor which is capable of generating the signature the caller
|
|
|
|
// needs to sweep this output. The hash cache, and input index are not
|
|
|
|
// set as the caller will decide these values once sweeping the output.
|
2017-07-30 22:21:26 +02:00
|
|
|
// If the output is non-existent (dust), have the sign descriptor be
|
|
|
|
// nil.
|
2018-01-17 03:17:18 +01:00
|
|
|
var commitResolution *CommitOutputResolution
|
2020-11-18 22:45:35 +01:00
|
|
|
if delayOut != nil {
|
2023-08-08 06:09:58 +02:00
|
|
|
// When attempting to sweep our own output, we only need the
|
|
|
|
// witness script for the delay path
|
|
|
|
scriptPath := input.ScriptPathDelay
|
|
|
|
witnessScript, err := toLocalScript.WitnessScriptForPath(
|
|
|
|
scriptPath,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-11-18 22:45:35 +01:00
|
|
|
localBalance := delayOut.Value
|
2018-01-17 03:17:18 +01:00
|
|
|
commitResolution = &CommitOutputResolution{
|
|
|
|
SelfOutPoint: wire.OutPoint{
|
|
|
|
Hash: commitTx.TxHash(),
|
|
|
|
Index: delayIndex,
|
2017-01-24 10:38:25 +01:00
|
|
|
},
|
2019-01-16 15:47:43 +01:00
|
|
|
SelfOutputSignDesc: input.SignDescriptor{
|
2018-03-19 15:19:19 +01:00
|
|
|
KeyDesc: chanState.LocalChanCfg.DelayBasePoint,
|
2020-01-06 11:42:05 +01:00
|
|
|
SingleTweak: keyRing.LocalCommitKeyTweak,
|
2023-08-08 06:09:58 +02:00
|
|
|
WitnessScript: witnessScript,
|
2018-01-17 03:17:18 +01:00
|
|
|
Output: &wire.TxOut{
|
2020-11-18 22:45:35 +01:00
|
|
|
PkScript: delayOut.PkScript,
|
|
|
|
Value: localBalance,
|
2018-01-17 03:17:18 +01:00
|
|
|
},
|
2023-08-19 00:17:51 +02:00
|
|
|
HashType: sweepSigHash(chanState.ChanType),
|
2018-01-17 03:17:18 +01:00
|
|
|
},
|
|
|
|
MaturityDelay: csvTimeout,
|
2017-01-24 10:38:25 +01:00
|
|
|
}
|
2023-03-02 06:43:58 +01:00
|
|
|
|
|
|
|
// For taproot channels, we'll need to set some additional
|
|
|
|
// fields to ensure the output can be swept.
|
2023-08-08 06:09:58 +02:00
|
|
|
scriptTree, ok := toLocalScript.(input.TapscriptDescriptor)
|
|
|
|
if ok {
|
2023-03-02 06:43:58 +01:00
|
|
|
commitResolution.SelfOutputSignDesc.SignMethod =
|
|
|
|
input.TaprootScriptSpendSignMethod
|
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
ctrlBlock, err := scriptTree.CtrlBlockForPath(
|
|
|
|
scriptPath,
|
2023-03-02 06:43:58 +01:00
|
|
|
)
|
2023-08-08 06:09:58 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-08-09 07:22:12 +02:00
|
|
|
//nolint:lll
|
2023-03-02 06:43:58 +01:00
|
|
|
commitResolution.SelfOutputSignDesc.ControlBlock, err = ctrlBlock.ToBytes()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2024-06-04 07:58:57 +02:00
|
|
|
|
|
|
|
// At this point, we'll check to see if we need any extra
|
|
|
|
// resolution data for this output.
|
|
|
|
resolveBlob := fn.MapOptionZ(
|
|
|
|
auxResolver,
|
|
|
|
func(a AuxContractResolver) fn.Result[tlv.Blob] {
|
|
|
|
//nolint:lll
|
|
|
|
return a.ResolveContract(ResolutionReq{
|
|
|
|
ChanPoint: chanState.FundingOutpoint,
|
|
|
|
ShortChanID: chanState.ShortChanID(),
|
|
|
|
Initiator: chanState.IsInitiator,
|
|
|
|
CommitBlob: chanState.LocalCommitment.CustomBlob,
|
|
|
|
FundingBlob: chanState.CustomBlob,
|
|
|
|
Type: input.TaprootLocalCommitSpend,
|
|
|
|
CloseType: LocalForceClose,
|
|
|
|
CommitTx: commitTx,
|
|
|
|
ContractPoint: commitResolution.SelfOutPoint,
|
|
|
|
SignDesc: commitResolution.SelfOutputSignDesc,
|
|
|
|
KeyRing: keyRing,
|
|
|
|
CsvDelay: csvTimeout,
|
|
|
|
CommitFee: chanState.LocalCommitment.CommitFee,
|
|
|
|
})
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err := resolveBlob.Err(); err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to aux resolve: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
commitResolution.ResolutionBlob = resolveBlob.Option()
|
2016-09-12 21:33:22 +02:00
|
|
|
}
|
|
|
|
|
2017-07-30 22:21:26 +02:00
|
|
|
// Once the delay output has been found (if it exists), then we'll also
|
|
|
|
// need to create a series of sign descriptors for any lingering
|
2020-11-18 22:45:35 +01:00
|
|
|
// outgoing HTLC's that we'll need to claim as well. If this is after
|
|
|
|
// recovery there is not much we can do with HTLCs, so we'll always
|
|
|
|
// use what we have in our latest state when extracting resolutions.
|
|
|
|
localCommit := chanState.LocalCommitment
|
2017-09-22 23:15:01 +02:00
|
|
|
htlcResolutions, err := extractHtlcResolutions(
|
2024-07-31 01:44:18 +02:00
|
|
|
chainfee.SatPerKWeight(localCommit.FeePerKw), lntypes.Local,
|
|
|
|
signer, localCommit.Htlcs, keyRing, &chanState.LocalChanCfg,
|
2020-12-09 12:24:01 +01:00
|
|
|
&chanState.RemoteChanCfg, commitTx, chanState.ChanType,
|
2024-04-25 19:01:37 +02:00
|
|
|
chanState.IsInitiator, leaseExpiry, auxResult.AuxLeaves,
|
2019-04-15 14:24:43 +02:00
|
|
|
)
|
2017-07-30 22:21:26 +02:00
|
|
|
if err != nil {
|
2023-03-02 06:40:44 +01:00
|
|
|
return nil, fmt.Errorf("unable to gen htlc resolution: %w", err)
|
2017-07-30 22:21:26 +02:00
|
|
|
}
|
|
|
|
|
2019-12-13 11:14:22 +01:00
|
|
|
anchorResolution, err := NewAnchorResolution(
|
2024-07-31 01:44:18 +02:00
|
|
|
chanState, commitTx, keyRing, lntypes.Local,
|
2019-12-13 11:14:22 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
2023-03-02 06:40:44 +01:00
|
|
|
return nil, fmt.Errorf("unable to gen anchor "+
|
|
|
|
"resolution: %w", err)
|
2019-12-13 11:14:22 +01:00
|
|
|
}
|
|
|
|
|
2018-03-19 15:19:19 +01:00
|
|
|
return &LocalForceCloseSummary{
|
|
|
|
ChanPoint: chanState.FundingOutpoint,
|
2018-01-17 03:17:18 +01:00
|
|
|
CloseTx: commitTx,
|
|
|
|
CommitResolution: commitResolution,
|
|
|
|
HtlcResolutions: htlcResolutions,
|
2018-03-19 15:19:19 +01:00
|
|
|
ChanSnapshot: *chanState.Snapshot(),
|
2019-12-13 11:14:22 +01:00
|
|
|
AnchorResolution: anchorResolution,
|
2016-09-12 21:33:22 +02:00
|
|
|
}, nil
|
2015-12-17 05:58:01 +01:00
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:39 +02:00
|
|
|
// CloseOutput wraps a normal tx out with additional metadata that indicates if
|
|
|
|
// the output belongs to the initiator of the channel or not.
|
|
|
|
type CloseOutput struct {
|
|
|
|
wire.TxOut
|
|
|
|
|
|
|
|
// IsLocal indicates if the output belong to the local party.
|
|
|
|
IsLocal bool
|
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:44 +02:00
|
|
|
// CloseSortFunc is a function type alias for a function that sorts the closing
|
|
|
|
// transaction.
|
|
|
|
type CloseSortFunc func(*wire.MsgTx) error
|
|
|
|
|
2023-01-20 04:43:47 +01:00
|
|
|
// chanCloseOpt is a functional option that can be used to modify the co-op
|
|
|
|
// close process.
|
|
|
|
type chanCloseOpt struct {
|
|
|
|
musigSession *MusigSession
|
2024-05-29 19:57:39 +02:00
|
|
|
|
|
|
|
extraCloseOutputs []CloseOutput
|
2024-05-29 19:57:44 +02:00
|
|
|
|
|
|
|
// customSort is a custom function that can be used to sort the
|
|
|
|
// transaction outputs. If this isn't set, then the default BIP-69
|
|
|
|
// sorting is used.
|
|
|
|
customSort CloseSortFunc
|
2023-01-20 04:43:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ChanCloseOpt is a closure type that cen be used to modify the set of default
|
|
|
|
// options.
|
|
|
|
type ChanCloseOpt func(*chanCloseOpt)
|
|
|
|
|
|
|
|
// defaultCloseOpts is the default set of close options.
|
|
|
|
func defaultCloseOpts() *chanCloseOpt {
|
|
|
|
return &chanCloseOpt{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// WithCoopCloseMusigSession can be used to apply an existing musig2 session to
|
|
|
|
// the cooperative close process. If specified, then a musig2 co-op close
|
|
|
|
// (single sig keyspend) will be used.
|
|
|
|
func WithCoopCloseMusigSession(session *MusigSession) ChanCloseOpt {
|
|
|
|
return func(opts *chanCloseOpt) {
|
|
|
|
opts.musigSession = session
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:39 +02:00
|
|
|
// WithExtraCloseOutputs can be used to add extra outputs to the cooperative
|
|
|
|
// transaction.
|
|
|
|
func WithExtraCloseOutputs(extraOutputs []CloseOutput) ChanCloseOpt {
|
|
|
|
return func(opts *chanCloseOpt) {
|
|
|
|
opts.extraCloseOutputs = extraOutputs
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:44 +02:00
|
|
|
// WithCustomCoopSort can be used to modify the way the co-op close transaction
|
|
|
|
// is sorted.
|
|
|
|
func WithCustomCoopSort(sorter CloseSortFunc) ChanCloseOpt {
|
|
|
|
return func(opts *chanCloseOpt) {
|
|
|
|
opts.customSort = sorter
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-25 02:26:09 +01:00
|
|
|
// CreateCloseProposal is used by both parties in a cooperative channel close
|
|
|
|
// workflow to generate proposed close transactions and signatures. This method
|
|
|
|
// should only be executed once all pending HTLCs (if any) on the channel have
|
|
|
|
// been cleared/removed. Upon completion, the source channel will shift into
|
|
|
|
// the "closing" state, which indicates that all incoming/outgoing HTLC
|
|
|
|
// requests should be rejected. A signature for the closing transaction is
|
|
|
|
// returned.
|
2017-11-23 07:36:20 +01:00
|
|
|
func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount,
|
2023-01-20 04:43:47 +01:00
|
|
|
localDeliveryScript []byte, remoteDeliveryScript []byte,
|
|
|
|
closeOpts ...ChanCloseOpt) (input.Signature, *chainhash.Hash,
|
2020-04-06 02:06:38 +02:00
|
|
|
btcutil.Amount, error) {
|
2017-05-18 01:51:10 +02:00
|
|
|
|
2016-06-21 07:09:42 +02:00
|
|
|
lc.Lock()
|
2016-07-27 20:32:27 +02:00
|
|
|
defer lc.Unlock()
|
2016-06-21 07:09:42 +02:00
|
|
|
|
2024-01-31 03:06:49 +01:00
|
|
|
// If we're already closing the channel, then ignore this request.
|
|
|
|
if lc.isClosed {
|
2018-01-20 02:18:26 +01:00
|
|
|
return nil, nil, 0, ErrChanClosing
|
2016-06-21 07:09:42 +02:00
|
|
|
}
|
|
|
|
|
2023-01-20 04:43:47 +01:00
|
|
|
opts := defaultCloseOpts()
|
|
|
|
for _, optFunc := range closeOpts {
|
|
|
|
optFunc(opts)
|
|
|
|
}
|
|
|
|
|
2020-08-24 15:26:06 +02:00
|
|
|
// Get the final balances after subtracting the proposed fee, taking
|
|
|
|
// care not to persist the adjusted balance, as the feeRate may change
|
2017-05-18 01:51:10 +02:00
|
|
|
// during the channel closing process.
|
2020-08-24 15:44:13 +02:00
|
|
|
ourBalance, theirBalance, err := CoopCloseBalance(
|
2020-08-24 15:26:06 +02:00
|
|
|
lc.channelState.ChanType, lc.channelState.IsInitiator,
|
2024-05-29 19:57:43 +02:00
|
|
|
proposedFee,
|
|
|
|
lc.channelState.LocalCommitment.LocalBalance.ToSatoshis(),
|
|
|
|
lc.channelState.LocalCommitment.RemoteBalance.ToSatoshis(),
|
|
|
|
lc.channelState.LocalCommitment.CommitFee,
|
2020-08-24 15:26:06 +02:00
|
|
|
)
|
2020-08-24 15:44:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, 0, err
|
|
|
|
}
|
2017-05-01 20:45:02 +02:00
|
|
|
|
2023-08-09 04:18:34 +02:00
|
|
|
var closeTxOpts []CloseTxOpt
|
|
|
|
|
|
|
|
// If this is a taproot channel, then we use an RBF'able funding input.
|
|
|
|
if lc.channelState.ChanType.IsTaproot() {
|
|
|
|
closeTxOpts = append(closeTxOpts, WithRBFCloseTx())
|
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:39 +02:00
|
|
|
// If we have any extra outputs to pass along, then we'll map that to
|
|
|
|
// the co-op close option txn type.
|
|
|
|
if opts.extraCloseOutputs != nil {
|
|
|
|
closeTxOpts = append(closeTxOpts, WithExtraTxCloseOutputs(
|
|
|
|
opts.extraCloseOutputs,
|
|
|
|
))
|
|
|
|
}
|
2024-05-29 19:57:44 +02:00
|
|
|
if opts.customSort != nil {
|
|
|
|
closeTxOpts = append(
|
|
|
|
closeTxOpts, WithCustomTxSort(opts.customSort),
|
|
|
|
)
|
|
|
|
}
|
2024-05-29 19:57:39 +02:00
|
|
|
|
2024-05-29 19:57:44 +02:00
|
|
|
closeTx, err := CreateCooperativeCloseTx(
|
2020-01-06 11:42:03 +01:00
|
|
|
fundingTxIn(lc.channelState), lc.channelState.LocalChanCfg.DustLimit,
|
2020-01-06 11:42:02 +01:00
|
|
|
lc.channelState.RemoteChanCfg.DustLimit, ourBalance, theirBalance,
|
2023-08-09 04:18:34 +02:00
|
|
|
localDeliveryScript, remoteDeliveryScript, closeTxOpts...,
|
2020-01-06 11:42:02 +01:00
|
|
|
)
|
2024-05-29 19:57:44 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, 0, err
|
|
|
|
}
|
2017-02-28 04:00:18 +01:00
|
|
|
|
2017-03-15 06:53:15 +01:00
|
|
|
// Ensure that the transaction doesn't explicitly violate any
|
2017-02-28 04:00:18 +01:00
|
|
|
// consensus rules such as being too big, or having any value with a
|
|
|
|
// negative output.
|
|
|
|
tx := btcutil.NewTx(closeTx)
|
|
|
|
if err := blockchain.CheckTransactionSanity(tx); err != nil {
|
2018-01-20 02:18:26 +01:00
|
|
|
return nil, nil, 0, err
|
2017-02-28 04:00:18 +01:00
|
|
|
}
|
2016-06-21 07:09:42 +02:00
|
|
|
|
2023-01-20 04:43:47 +01:00
|
|
|
// If we have a co-op close musig session, then this is a taproot
|
|
|
|
// channel, so we'll generate a _partial_ signature.
|
|
|
|
var sig input.Signature
|
|
|
|
if opts.musigSession != nil {
|
|
|
|
sig, err = opts.musigSession.SignCommit(closeTx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, 0, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// For regular channels we'll, sign the completed cooperative
|
|
|
|
// closure transaction. As the initiator we'll simply send our
|
|
|
|
// signature over to the remote party, using the generated txid
|
|
|
|
// to be notified once the closure transaction has been
|
|
|
|
// confirmed.
|
|
|
|
lc.signDesc.SigHashes = input.NewTxSigHashesV0Only(closeTx)
|
|
|
|
sig, err = lc.Signer.SignOutputRaw(closeTx, lc.signDesc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, 0, err
|
|
|
|
}
|
2016-06-21 07:09:42 +02:00
|
|
|
}
|
|
|
|
|
2018-01-20 02:18:26 +01:00
|
|
|
closeTXID := closeTx.TxHash()
|
|
|
|
return sig, &closeTXID, ourBalance, nil
|
2016-06-21 07:09:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// CompleteCooperativeClose completes the cooperative closure of the target
|
2017-03-25 02:26:09 +01:00
|
|
|
// active lightning channel. A fully signed closure transaction as well as the
|
2018-01-09 02:42:13 +01:00
|
|
|
// signature itself are returned. Additionally, we also return our final
|
|
|
|
// settled balance, which reflects any fees we may have paid.
|
2016-08-13 00:50:47 +02:00
|
|
|
//
|
2017-03-25 02:26:09 +01:00
|
|
|
// NOTE: The passed local and remote sigs are expected to be fully complete
|
|
|
|
// signatures including the proper sighash byte.
|
2020-04-06 02:07:01 +02:00
|
|
|
func (lc *LightningChannel) CompleteCooperativeClose(
|
|
|
|
localSig, remoteSig input.Signature,
|
2017-07-30 22:26:09 +02:00
|
|
|
localDeliveryScript, remoteDeliveryScript []byte,
|
2023-01-20 04:43:47 +01:00
|
|
|
proposedFee btcutil.Amount,
|
|
|
|
closeOpts ...ChanCloseOpt) (*wire.MsgTx, btcutil.Amount, error) {
|
2017-07-30 22:26:09 +02:00
|
|
|
|
2016-06-21 07:09:42 +02:00
|
|
|
lc.Lock()
|
2016-07-27 20:32:27 +02:00
|
|
|
defer lc.Unlock()
|
2016-06-21 07:09:42 +02:00
|
|
|
|
2024-01-31 03:06:49 +01:00
|
|
|
// If the channel is already closing, then ignore this request.
|
|
|
|
if lc.isClosed {
|
2016-06-21 07:09:42 +02:00
|
|
|
// TODO(roasbeef): check to ensure no pending payments
|
2018-01-09 02:42:13 +01:00
|
|
|
return nil, 0, ErrChanClosing
|
2016-06-21 07:09:42 +02:00
|
|
|
}
|
|
|
|
|
2023-01-20 04:43:47 +01:00
|
|
|
opts := defaultCloseOpts()
|
|
|
|
for _, optFunc := range closeOpts {
|
|
|
|
optFunc(opts)
|
|
|
|
}
|
|
|
|
|
2020-08-24 15:26:06 +02:00
|
|
|
// Get the final balances after subtracting the proposed fee.
|
2020-08-24 15:44:13 +02:00
|
|
|
ourBalance, theirBalance, err := CoopCloseBalance(
|
2020-08-24 15:26:06 +02:00
|
|
|
lc.channelState.ChanType, lc.channelState.IsInitiator,
|
2024-05-29 19:57:43 +02:00
|
|
|
proposedFee,
|
|
|
|
lc.channelState.LocalCommitment.LocalBalance.ToSatoshis(),
|
|
|
|
lc.channelState.LocalCommitment.RemoteBalance.ToSatoshis(),
|
|
|
|
lc.channelState.LocalCommitment.CommitFee,
|
2020-08-24 15:26:06 +02:00
|
|
|
)
|
2020-08-24 15:44:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
2017-05-18 01:51:10 +02:00
|
|
|
|
2023-08-09 04:18:34 +02:00
|
|
|
var closeTxOpts []CloseTxOpt
|
|
|
|
|
|
|
|
// If this is a taproot channel, then we use an RBF'able funding input.
|
|
|
|
if lc.channelState.ChanType.IsTaproot() {
|
|
|
|
closeTxOpts = append(closeTxOpts, WithRBFCloseTx())
|
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:39 +02:00
|
|
|
// If we have any extra outputs to pass along, then we'll map that to
|
|
|
|
// the co-op close option txn type.
|
|
|
|
if opts.extraCloseOutputs != nil {
|
|
|
|
closeTxOpts = append(closeTxOpts, WithExtraTxCloseOutputs(
|
|
|
|
opts.extraCloseOutputs,
|
|
|
|
))
|
|
|
|
}
|
2024-05-29 19:57:44 +02:00
|
|
|
if opts.customSort != nil {
|
|
|
|
closeTxOpts = append(
|
|
|
|
closeTxOpts, WithCustomTxSort(opts.customSort),
|
|
|
|
)
|
|
|
|
}
|
2024-05-29 19:57:39 +02:00
|
|
|
|
2016-06-21 07:09:42 +02:00
|
|
|
// Create the transaction used to return the current settled balance
|
|
|
|
// on this active channel back to both parties. In this current model,
|
|
|
|
// the initiator pays full fees for the cooperative close transaction.
|
2024-05-29 19:57:44 +02:00
|
|
|
closeTx, err := CreateCooperativeCloseTx(
|
2020-01-06 11:42:03 +01:00
|
|
|
fundingTxIn(lc.channelState), lc.channelState.LocalChanCfg.DustLimit,
|
2020-01-06 11:42:02 +01:00
|
|
|
lc.channelState.RemoteChanCfg.DustLimit, ourBalance, theirBalance,
|
2023-08-09 04:18:34 +02:00
|
|
|
localDeliveryScript, remoteDeliveryScript, closeTxOpts...,
|
2020-01-06 11:42:02 +01:00
|
|
|
)
|
2024-05-29 19:57:44 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
2016-06-21 07:09:42 +02:00
|
|
|
|
2017-02-28 04:00:18 +01:00
|
|
|
// Ensure that the transaction doesn't explicitly validate any
|
|
|
|
// consensus rules such as being too big, or having any value with a
|
|
|
|
// negative output.
|
|
|
|
tx := btcutil.NewTx(closeTx)
|
2023-01-20 04:43:47 +01:00
|
|
|
prevOut := lc.signDesc.Output
|
2017-02-28 04:00:18 +01:00
|
|
|
if err := blockchain.CheckTransactionSanity(tx); err != nil {
|
2018-01-09 02:42:13 +01:00
|
|
|
return nil, 0, err
|
2017-02-28 04:00:18 +01:00
|
|
|
}
|
2023-01-20 04:43:47 +01:00
|
|
|
|
|
|
|
prevOutputFetcher := txscript.NewCannedPrevOutputFetcher(
|
|
|
|
prevOut.PkScript, prevOut.Value,
|
2020-04-06 02:07:01 +02:00
|
|
|
)
|
2023-01-20 04:43:47 +01:00
|
|
|
hashCache := txscript.NewTxSigHashes(closeTx, prevOutputFetcher)
|
|
|
|
|
|
|
|
// Next, we'll complete the co-op close transaction. Depending on the
|
|
|
|
// set of options, we'll either do a regular p2wsh spend, or construct
|
|
|
|
// the final schnorr signature from a set of partial sigs.
|
|
|
|
if opts.musigSession != nil {
|
|
|
|
// For taproot channels, we'll use the attached session to
|
|
|
|
// combine the two partial signatures into a proper schnorr
|
|
|
|
// signature.
|
|
|
|
remotePartialSig, ok := remoteSig.(*MusigPartialSig)
|
|
|
|
if !ok {
|
|
|
|
return nil, 0, fmt.Errorf("expected MusigPartialSig, "+
|
|
|
|
"got %T", remoteSig)
|
|
|
|
}
|
|
|
|
|
|
|
|
finalSchnorrSig, err := opts.musigSession.CombineSigs(
|
|
|
|
remotePartialSig.sig,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, fmt.Errorf("unable to combine "+
|
|
|
|
"final co-op close sig: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The witness for a keyspend is just the signature itself.
|
|
|
|
closeTx.TxIn[0].Witness = wire.TxWitness{
|
|
|
|
finalSchnorrSig.Serialize(),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// For regular channels, we'll need to , construct the witness
|
|
|
|
// stack minding the order of the pubkeys+sigs on the stack.
|
|
|
|
ourKey := lc.channelState.LocalChanCfg.MultiSigKey.PubKey.
|
|
|
|
SerializeCompressed()
|
|
|
|
theirKey := lc.channelState.RemoteChanCfg.MultiSigKey.PubKey.
|
|
|
|
SerializeCompressed()
|
|
|
|
witness := input.SpendMultiSig(
|
|
|
|
lc.signDesc.WitnessScript, ourKey, localSig, theirKey,
|
|
|
|
remoteSig,
|
|
|
|
)
|
|
|
|
closeTx.TxIn[0].Witness = witness
|
|
|
|
}
|
2016-06-21 07:09:42 +02:00
|
|
|
|
2016-07-27 20:32:27 +02:00
|
|
|
// Validate the finalized transaction to ensure the output script is
|
|
|
|
// properly met, and that the remote peer supplied a valid signature.
|
2022-03-18 18:37:43 +01:00
|
|
|
vm, err := txscript.NewEngine(
|
|
|
|
prevOut.PkScript, closeTx, 0, txscript.StandardVerifyFlags, nil,
|
2023-01-20 04:43:47 +01:00
|
|
|
hashCache, prevOut.Value, prevOutputFetcher,
|
2022-03-18 18:37:43 +01:00
|
|
|
)
|
2016-07-27 20:32:27 +02:00
|
|
|
if err != nil {
|
2018-01-09 02:42:13 +01:00
|
|
|
return nil, 0, err
|
2016-07-27 20:32:27 +02:00
|
|
|
}
|
|
|
|
if err := vm.Execute(); err != nil {
|
2018-01-09 02:42:13 +01:00
|
|
|
return nil, 0, err
|
2016-07-27 20:32:27 +02:00
|
|
|
}
|
2016-06-21 07:09:42 +02:00
|
|
|
|
2017-02-28 03:52:23 +01:00
|
|
|
// As the transaction is sane, and the scripts are valid we'll mark the
|
|
|
|
// channel now as closed as the closure transaction should get into the
|
|
|
|
// chain in a timely manner and possibly be re-broadcast by the wallet.
|
2024-01-31 03:06:49 +01:00
|
|
|
lc.isClosed = true
|
2017-02-28 03:52:23 +01:00
|
|
|
|
2018-01-09 02:42:13 +01:00
|
|
|
return closeTx, ourBalance, nil
|
2016-06-21 07:09:42 +02:00
|
|
|
}
|
|
|
|
|
2021-05-06 13:53:11 +02:00
|
|
|
// AnchorResolutions is a set of anchor resolutions that's being used when
|
|
|
|
// sweeping anchors during local channel force close.
|
|
|
|
type AnchorResolutions struct {
|
|
|
|
// Local is the anchor resolution for the local commitment tx.
|
|
|
|
Local *AnchorResolution
|
|
|
|
|
|
|
|
// Remote is the anchor resolution for the remote commitment tx.
|
|
|
|
Remote *AnchorResolution
|
|
|
|
|
|
|
|
// RemotePending is the anchor resolution for the remote pending
|
|
|
|
// commitment tx. The value will be non-nil iff we've created a new
|
|
|
|
// commitment tx for the remote party which they haven't ACKed yet.
|
|
|
|
RemotePending *AnchorResolution
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewAnchorResolutions returns a set of anchor resolutions wrapped in the
|
|
|
|
// struct AnchorResolutions. Because we have no view on the mempool, we can
|
2023-01-20 02:06:38 +01:00
|
|
|
// only blindly anchor all of these txes down. The caller needs to check the
|
2021-05-06 13:53:11 +02:00
|
|
|
// returned values against nil to decide whether there exists an anchor
|
|
|
|
// resolution for local/remote/pending remote commitment txes.
|
|
|
|
func (lc *LightningChannel) NewAnchorResolutions() (*AnchorResolutions,
|
2019-12-13 11:14:22 +01:00
|
|
|
error) {
|
|
|
|
|
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2023-01-20 02:06:38 +01:00
|
|
|
var resolutions AnchorResolutions
|
2019-12-13 11:14:22 +01:00
|
|
|
|
|
|
|
// Add anchor for local commitment tx, if any.
|
2023-01-20 02:06:38 +01:00
|
|
|
revocation, err := lc.channelState.RevocationProducer.AtIndex(
|
|
|
|
lc.currentHeight,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
localCommitPoint := input.ComputeCommitmentPoint(revocation[:])
|
|
|
|
localKeyRing := DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
localCommitPoint, lntypes.Local, lc.channelState.ChanType,
|
2023-01-20 02:06:38 +01:00
|
|
|
&lc.channelState.LocalChanCfg, &lc.channelState.RemoteChanCfg,
|
|
|
|
)
|
2019-12-13 11:14:22 +01:00
|
|
|
localRes, err := NewAnchorResolution(
|
|
|
|
lc.channelState, lc.channelState.LocalCommitment.CommitTx,
|
2024-07-31 01:44:18 +02:00
|
|
|
localKeyRing, lntypes.Local,
|
2019-12-13 11:14:22 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-05-06 13:53:11 +02:00
|
|
|
resolutions.Local = localRes
|
2019-12-13 11:14:22 +01:00
|
|
|
|
|
|
|
// Add anchor for remote commitment tx, if any.
|
2023-01-20 02:06:38 +01:00
|
|
|
remoteKeyRing := DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
lc.channelState.RemoteCurrentRevocation, lntypes.Remote,
|
2023-01-20 02:06:38 +01:00
|
|
|
lc.channelState.ChanType, &lc.channelState.LocalChanCfg,
|
|
|
|
&lc.channelState.RemoteChanCfg,
|
|
|
|
)
|
2019-12-13 11:14:22 +01:00
|
|
|
remoteRes, err := NewAnchorResolution(
|
|
|
|
lc.channelState, lc.channelState.RemoteCommitment.CommitTx,
|
2024-07-31 01:44:18 +02:00
|
|
|
remoteKeyRing, lntypes.Remote,
|
2019-12-13 11:14:22 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-05-06 13:53:11 +02:00
|
|
|
resolutions.Remote = remoteRes
|
2019-12-13 11:14:22 +01:00
|
|
|
|
|
|
|
// Add anchor for remote pending commitment tx, if any.
|
|
|
|
remotePendingCommit, err := lc.channelState.RemoteCommitChainTip()
|
|
|
|
if err != nil && err != channeldb.ErrNoPendingCommit {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if remotePendingCommit != nil {
|
2023-01-20 02:06:38 +01:00
|
|
|
pendingRemoteKeyRing := DeriveCommitmentKeys(
|
2024-07-31 01:44:18 +02:00
|
|
|
lc.channelState.RemoteNextRevocation, lntypes.Remote,
|
2023-01-20 02:06:38 +01:00
|
|
|
lc.channelState.ChanType, &lc.channelState.LocalChanCfg,
|
|
|
|
&lc.channelState.RemoteChanCfg,
|
|
|
|
)
|
2019-12-13 11:14:22 +01:00
|
|
|
remotePendingRes, err := NewAnchorResolution(
|
|
|
|
lc.channelState,
|
|
|
|
remotePendingCommit.Commitment.CommitTx,
|
2024-07-31 01:44:18 +02:00
|
|
|
pendingRemoteKeyRing, lntypes.Remote,
|
2019-12-13 11:14:22 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-05-06 13:53:11 +02:00
|
|
|
resolutions.RemotePending = remotePendingRes
|
2019-12-13 11:14:22 +01:00
|
|
|
}
|
|
|
|
|
2023-01-20 02:06:38 +01:00
|
|
|
return &resolutions, nil
|
2019-12-13 11:14:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewAnchorResolution returns the information that is required to sweep the
|
|
|
|
// local anchor.
|
|
|
|
func NewAnchorResolution(chanState *channeldb.OpenChannel,
|
2023-03-02 06:40:44 +01:00
|
|
|
commitTx *wire.MsgTx, keyRing *CommitmentKeyRing,
|
2024-07-31 01:44:18 +02:00
|
|
|
whoseCommit lntypes.ChannelParty) (*AnchorResolution, error) {
|
2019-12-13 11:14:22 +01:00
|
|
|
|
|
|
|
// Return nil resolution if the channel has no anchors.
|
|
|
|
if !chanState.ChanType.HasAnchors() {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2023-03-02 06:40:44 +01:00
|
|
|
// Derive our local anchor script. For taproot channels, rather than
|
|
|
|
// use the same multi-sig key for both commitments, the anchor script
|
|
|
|
// will differ depending on if this is our local or remote
|
|
|
|
// commitment.
|
|
|
|
localAnchor, remoteAnchor, err := CommitScriptAnchors(
|
2023-01-20 02:06:38 +01:00
|
|
|
chanState.ChanType, &chanState.LocalChanCfg,
|
|
|
|
&chanState.RemoteChanCfg, keyRing,
|
2019-12-13 11:14:22 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-07-31 01:44:18 +02:00
|
|
|
if chanState.ChanType.IsTaproot() && whoseCommit.IsRemote() {
|
2023-08-09 07:22:12 +02:00
|
|
|
//nolint:ineffassign
|
2023-03-02 06:40:44 +01:00
|
|
|
localAnchor, remoteAnchor = remoteAnchor, localAnchor
|
|
|
|
}
|
2019-12-13 11:14:22 +01:00
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
// TODO(roasbeef): remote anchor not needed above
|
|
|
|
|
2019-12-13 11:14:22 +01:00
|
|
|
// Look up the script on the commitment transaction. It may not be
|
|
|
|
// present if there is no output paying to us.
|
2023-03-02 06:40:44 +01:00
|
|
|
found, index := input.FindScriptOutputIndex(
|
2023-08-08 06:09:58 +02:00
|
|
|
commitTx, localAnchor.PkScript(),
|
2023-03-02 06:40:44 +01:00
|
|
|
)
|
2019-12-13 11:14:22 +01:00
|
|
|
if !found {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2023-08-08 06:09:58 +02:00
|
|
|
// For anchor outputs, we'll only ever care about the success path.
|
|
|
|
// script (sweep after 1 block csv delay).
|
|
|
|
anchorWitnessScript, err := localAnchor.WitnessScriptForPath(
|
|
|
|
input.ScriptPathSuccess,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-12-13 11:14:22 +01:00
|
|
|
outPoint := &wire.OutPoint{
|
|
|
|
Hash: commitTx.TxHash(),
|
|
|
|
Index: index,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Instantiate the sign descriptor that allows sweeping of the anchor.
|
|
|
|
signDesc := &input.SignDescriptor{
|
|
|
|
KeyDesc: chanState.LocalChanCfg.MultiSigKey,
|
2023-08-08 06:09:58 +02:00
|
|
|
WitnessScript: anchorWitnessScript,
|
2019-12-13 11:14:22 +01:00
|
|
|
Output: &wire.TxOut{
|
2023-08-08 06:09:58 +02:00
|
|
|
PkScript: localAnchor.PkScript(),
|
2024-04-11 14:22:17 +02:00
|
|
|
Value: int64(AnchorSize),
|
2019-12-13 11:14:22 +01:00
|
|
|
},
|
2023-08-19 00:17:51 +02:00
|
|
|
HashType: sweepSigHash(chanState.ChanType),
|
2019-12-13 11:14:22 +01:00
|
|
|
}
|
|
|
|
|
2023-03-02 06:40:44 +01:00
|
|
|
// For taproot outputs, we'll need to ensure that the proper sign
|
|
|
|
// method is used, and the tweak as well.
|
2023-08-08 06:09:58 +02:00
|
|
|
if scriptTree, ok := localAnchor.(input.TapscriptDescriptor); ok {
|
2023-03-02 06:40:44 +01:00
|
|
|
signDesc.SignMethod = input.TaprootKeySpendSignMethod
|
|
|
|
|
2023-08-09 07:22:12 +02:00
|
|
|
//nolint:lll
|
2023-03-02 06:40:44 +01:00
|
|
|
signDesc.PrevOutputFetcher = txscript.NewCannedPrevOutputFetcher(
|
2024-04-11 14:22:17 +02:00
|
|
|
localAnchor.PkScript(), int64(AnchorSize),
|
2023-03-02 06:40:44 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// For anchor outputs with taproot channels, the key desc is
|
|
|
|
// also different: we'll just re-use our local delay base point
|
|
|
|
// (which becomes our to local output).
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommit.IsLocal() {
|
2023-03-02 06:40:44 +01:00
|
|
|
// In addition to the sign method, we'll also need to
|
|
|
|
// ensure that the single tweak is set, as with the
|
|
|
|
// current formulation, we'll need to use two levels of
|
|
|
|
// tweaks: the normal LN tweak, and the tapscript
|
|
|
|
// tweak.
|
|
|
|
signDesc.SingleTweak = keyRing.LocalCommitKeyTweak
|
|
|
|
|
|
|
|
signDesc.KeyDesc = chanState.LocalChanCfg.DelayBasePoint
|
|
|
|
} else {
|
|
|
|
// When we're playing the force close of a remote
|
|
|
|
// commitment, as this is a "tweakless" channel type,
|
|
|
|
// we don't need a tweak value at all.
|
2023-08-09 07:22:12 +02:00
|
|
|
//
|
|
|
|
//nolint:lll
|
2023-03-02 06:40:44 +01:00
|
|
|
signDesc.KeyDesc = chanState.LocalChanCfg.PaymentBasePoint
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, as this is a keyspend method, we'll need to also
|
|
|
|
// include the taptweak as well.
|
2023-08-08 06:09:58 +02:00
|
|
|
signDesc.TapTweak = scriptTree.TapTweak()
|
2023-03-02 06:40:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
var witnessWeight int64
|
|
|
|
if chanState.ChanType.IsTaproot() {
|
|
|
|
witnessWeight = input.TaprootKeyPathWitnessSize
|
|
|
|
} else {
|
|
|
|
witnessWeight = input.WitnessCommitmentTxWeight
|
|
|
|
}
|
|
|
|
|
2020-09-04 11:19:27 +02:00
|
|
|
// Calculate commit tx weight. This commit tx doesn't yet include the
|
|
|
|
// witness spending the funding output, so we add the (worst case)
|
|
|
|
// weight for that too.
|
|
|
|
utx := btcutil.NewTx(commitTx)
|
2023-03-02 06:40:44 +01:00
|
|
|
weight := blockchain.GetTransactionWeight(utx) + witnessWeight
|
2020-09-04 11:19:27 +02:00
|
|
|
|
|
|
|
// Calculate commit tx fee.
|
|
|
|
fee := chanState.Capacity
|
|
|
|
for _, out := range commitTx.TxOut {
|
|
|
|
fee -= btcutil.Amount(out.Value)
|
|
|
|
}
|
|
|
|
|
2019-12-13 11:14:22 +01:00
|
|
|
return &AnchorResolution{
|
|
|
|
CommitAnchor: *outPoint,
|
|
|
|
AnchorSignDescriptor: *signDesc,
|
2024-05-24 15:56:30 +02:00
|
|
|
CommitWeight: lntypes.WeightUnit(weight),
|
2020-09-04 11:19:27 +02:00
|
|
|
CommitFee: fee,
|
2019-12-13 11:14:22 +01:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2020-02-19 12:27:41 +01:00
|
|
|
// AvailableBalance returns the current balance available for sending within
|
|
|
|
// the channel. By available balance, we mean that if at this very instance a
|
|
|
|
// new commitment were to be created which evals all the log entries, what
|
|
|
|
// would our available balance for adding an additional HTLC be. It takes into
|
2023-11-05 11:29:34 +01:00
|
|
|
// account the fee that must be paid for adding this HTLC, that we cannot spend
|
|
|
|
// from the channel reserve and moreover the FeeBuffer when we are the
|
|
|
|
// initiator of the channel. This method is useful when deciding if a given
|
|
|
|
// channel can accept an HTLC in the multi-hop forwarding scenario.
|
2017-11-10 08:08:50 +01:00
|
|
|
func (lc *LightningChannel) AvailableBalance() lnwire.MilliSatoshi {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
bal, _ := lc.availableBalance(FeeBuffer)
|
2017-11-26 20:50:39 +01:00
|
|
|
return bal
|
2017-11-10 08:08:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// availableBalance is the private, non mutexed version of AvailableBalance.
|
|
|
|
// This method is provided so methods that already hold the lock can access
|
2017-11-26 20:50:39 +01:00
|
|
|
// this method. Additionally, the total weight of the next to be created
|
|
|
|
// commitment is returned for accounting purposes.
|
2023-11-05 11:29:34 +01:00
|
|
|
func (lc *LightningChannel) availableBalance(
|
2024-05-24 15:56:30 +02:00
|
|
|
buffer BufferType) (lnwire.MilliSatoshi, lntypes.WeightUnit) {
|
2023-11-05 11:29:34 +01:00
|
|
|
|
2018-01-09 16:42:07 +01:00
|
|
|
// We'll grab the current set of log updates that the remote has
|
|
|
|
// ACKed.
|
2024-08-09 23:52:21 +02:00
|
|
|
remoteACKedIndex := lc.commitChains.Local.tip().messageIndices.Remote
|
2018-01-16 21:19:04 +01:00
|
|
|
htlcView := lc.fetchHTLCView(remoteACKedIndex,
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.logIndex)
|
2017-11-10 08:08:50 +01:00
|
|
|
|
2020-02-19 12:27:41 +01:00
|
|
|
// Calculate our available balance from our local commitment.
|
2020-02-19 12:27:42 +01:00
|
|
|
// TODO(halseth): could reuse parts validateCommitmentSanity to do this
|
|
|
|
// balance calculation, as most of the logic is the same.
|
2020-02-19 12:27:41 +01:00
|
|
|
//
|
|
|
|
// NOTE: This is not always accurate, since the remote node can always
|
|
|
|
// add updates concurrently, causing our balance to go down if we're
|
|
|
|
// the initiator, but this is a problem on the protocol level.
|
|
|
|
ourLocalCommitBalance, commitWeight := lc.availableCommitmentBalance(
|
2024-07-31 01:44:18 +02:00
|
|
|
htlcView, lntypes.Local, buffer,
|
2020-02-19 12:27:41 +01:00
|
|
|
)
|
|
|
|
|
2020-02-19 12:27:42 +01:00
|
|
|
// Do the same calculation from the remote commitment point of view.
|
|
|
|
ourRemoteCommitBalance, _ := lc.availableCommitmentBalance(
|
2024-07-31 01:44:18 +02:00
|
|
|
htlcView, lntypes.Remote, buffer,
|
2020-02-19 12:27:42 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// Return which ever balance is lowest.
|
|
|
|
if ourRemoteCommitBalance < ourLocalCommitBalance {
|
|
|
|
return ourRemoteCommitBalance, commitWeight
|
|
|
|
}
|
|
|
|
|
2020-02-19 12:27:41 +01:00
|
|
|
return ourLocalCommitBalance, commitWeight
|
|
|
|
}
|
|
|
|
|
|
|
|
// availableCommitmentBalance attempts to calculate the balance we have
|
2024-04-02 05:00:29 +02:00
|
|
|
// available for HTLCs on the local/remote commitment given the HtlcView. To
|
2020-02-19 12:27:41 +01:00
|
|
|
// account for sending HTLCs of different sizes, it will report the balance
|
|
|
|
// available for sending non-dust HTLCs, which will be manifested on the
|
|
|
|
// commitment, increasing the commitment fee we must pay as an initiator,
|
|
|
|
// eating into our balance. It will make sure we won't violate the channel
|
|
|
|
// reserve constraints for this amount.
|
2024-04-02 05:00:29 +02:00
|
|
|
func (lc *LightningChannel) availableCommitmentBalance(view *HtlcView,
|
2024-07-31 01:44:18 +02:00
|
|
|
whoseCommitChain lntypes.ChannelParty, buffer BufferType) (
|
|
|
|
lnwire.MilliSatoshi, lntypes.WeightUnit) {
|
2020-02-19 12:27:41 +01:00
|
|
|
|
|
|
|
// Compute the current balances for this commitment. This will take
|
|
|
|
// into account HTLCs to determine the commit weight, which the
|
|
|
|
// initiator must pay the fee for.
|
2020-02-19 12:27:42 +01:00
|
|
|
ourBalance, theirBalance, commitWeight, filteredView, err := lc.computeView(
|
2024-07-31 01:44:18 +02:00
|
|
|
view, whoseCommitChain, false,
|
|
|
|
fn.None[chainfee.SatPerKWeight](),
|
2020-02-19 12:27:41 +01:00
|
|
|
)
|
2020-02-12 11:10:19 +01:00
|
|
|
if err != nil {
|
|
|
|
lc.log.Errorf("Unable to fetch available balance: %v", err)
|
|
|
|
return 0, 0
|
|
|
|
}
|
2017-11-10 08:08:50 +01:00
|
|
|
|
2020-02-19 12:27:42 +01:00
|
|
|
// We can never spend from the channel reserve, so we'll subtract it
|
|
|
|
// from our available balance.
|
|
|
|
ourReserve := lnwire.NewMSatFromSatoshis(
|
|
|
|
lc.channelState.LocalChanCfg.ChanReserve,
|
|
|
|
)
|
|
|
|
if ourReserve <= ourBalance {
|
|
|
|
ourBalance -= ourReserve
|
|
|
|
} else {
|
|
|
|
ourBalance = 0
|
|
|
|
}
|
|
|
|
|
2020-02-19 12:27:42 +01:00
|
|
|
// Calculate the commitment fee in the case where we would add another
|
|
|
|
// HTLC to the commitment, as only the balance remaining after this fee
|
|
|
|
// has been paid is actually available for sending.
|
2024-04-02 05:00:29 +02:00
|
|
|
feePerKw := filteredView.FeePerKw
|
2023-11-05 11:29:34 +01:00
|
|
|
additionalHtlcFee := lnwire.NewMSatFromSatoshis(
|
|
|
|
feePerKw.FeeForWeight(input.HTLCWeight),
|
2020-02-19 12:27:41 +01:00
|
|
|
)
|
2023-11-05 11:29:34 +01:00
|
|
|
commitFee := lnwire.NewMSatFromSatoshis(
|
|
|
|
feePerKw.FeeForWeight(commitWeight))
|
2020-02-19 12:27:41 +01:00
|
|
|
|
2017-11-10 08:08:50 +01:00
|
|
|
if lc.channelState.IsInitiator {
|
2023-11-05 11:29:34 +01:00
|
|
|
// When the buffer is of type `FeeBuffer` type we know we are
|
|
|
|
// going to send or forward an htlc over this channel therefore
|
|
|
|
// we account for an additional htlc output on the commitment
|
|
|
|
// tx.
|
|
|
|
futureCommitWeight := commitWeight
|
|
|
|
if buffer == FeeBuffer {
|
|
|
|
futureCommitWeight += input.HTLCWeight
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we do not overwrite `ourBalance` that's why we
|
|
|
|
// declare bufferAmt beforehand.
|
|
|
|
var bufferAmt lnwire.MilliSatoshi
|
|
|
|
ourBalance, bufferAmt, err = lc.applyCommitFee(
|
|
|
|
ourBalance, futureCommitWeight, feePerKw, buffer,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
lc.log.Warnf("Set available amount to 0 because we "+
|
|
|
|
"could not pay for the CommitmentFee of the "+
|
|
|
|
"new ChannelState: ourBalance is negative "+
|
|
|
|
"after applying the fee: ourBalance=%v, "+
|
|
|
|
"current commitFee(w/o additional htlc)=%v, "+
|
|
|
|
"feeBuffer=%v (type=%v) local_chan_initiator",
|
|
|
|
int64(ourBalance), commitFee,
|
|
|
|
bufferAmt, buffer)
|
|
|
|
|
2020-02-19 12:27:42 +01:00
|
|
|
return 0, commitWeight
|
|
|
|
}
|
2020-02-19 12:27:42 +01:00
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
return ourBalance, commitWeight
|
2020-02-19 12:27:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we're not the initiator, we must check whether the remote has
|
|
|
|
// enough balance to pay for the fee of our HTLC. We'll start by also
|
|
|
|
// subtracting our counterparty's reserve from their balance.
|
|
|
|
theirReserve := lnwire.NewMSatFromSatoshis(
|
|
|
|
lc.channelState.RemoteChanCfg.ChanReserve,
|
|
|
|
)
|
|
|
|
if theirReserve <= theirBalance {
|
|
|
|
theirBalance -= theirReserve
|
|
|
|
} else {
|
|
|
|
theirBalance = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll use the dustlimit and htlcFee to find the largest HTLC value
|
|
|
|
// that will be considered dust on the commitment.
|
|
|
|
dustlimit := lnwire.NewMSatFromSatoshis(
|
|
|
|
lc.channelState.LocalChanCfg.DustLimit,
|
|
|
|
)
|
|
|
|
|
|
|
|
// For an extra HTLC fee to be paid on our commitment, the HTLC must be
|
|
|
|
// large enough to make a non-dust HTLC timeout transaction.
|
|
|
|
htlcFee := lnwire.NewMSatFromSatoshis(
|
2020-03-06 16:11:49 +01:00
|
|
|
HtlcTimeoutFee(lc.channelState.ChanType, feePerKw),
|
2020-02-19 12:27:42 +01:00
|
|
|
)
|
|
|
|
|
2020-02-19 12:27:42 +01:00
|
|
|
// If we are looking at the remote commitment, we must use the remote
|
|
|
|
// dust limit and the fee for adding an HTLC success transaction.
|
2024-07-31 01:44:18 +02:00
|
|
|
if whoseCommitChain.IsRemote() {
|
2020-02-19 12:27:42 +01:00
|
|
|
dustlimit = lnwire.NewMSatFromSatoshis(
|
|
|
|
lc.channelState.RemoteChanCfg.DustLimit,
|
|
|
|
)
|
|
|
|
htlcFee = lnwire.NewMSatFromSatoshis(
|
2020-03-06 16:11:49 +01:00
|
|
|
HtlcSuccessFee(lc.channelState.ChanType, feePerKw),
|
2020-02-19 12:27:42 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-02-19 12:27:42 +01:00
|
|
|
// The HTLC output will be manifested on the commitment if it
|
|
|
|
// is non-dust after paying the HTLC fee.
|
|
|
|
nonDustHtlcAmt := dustlimit + htlcFee
|
|
|
|
|
2023-11-05 11:29:34 +01:00
|
|
|
// commitFeeWithHtlc is the fee our peer has to pay in case we add
|
|
|
|
// another htlc to the commitment.
|
|
|
|
commitFeeWithHtlc := commitFee + additionalHtlcFee
|
|
|
|
|
2020-02-19 12:27:42 +01:00
|
|
|
// If they cannot pay the fee if we add another non-dust HTLC, we'll
|
|
|
|
// report our available balance just below the non-dust amount, to
|
|
|
|
// avoid attempting HTLCs larger than this size.
|
2023-11-05 11:29:34 +01:00
|
|
|
if theirBalance < commitFeeWithHtlc && ourBalance >= nonDustHtlcAmt {
|
2023-01-15 22:09:36 +01:00
|
|
|
// see https://github.com/lightning/bolts/issues/728
|
|
|
|
ourReportedBalance := nonDustHtlcAmt - 1
|
2023-12-20 17:29:25 +01:00
|
|
|
lc.log.Infof("Reducing local (reported) balance "+
|
|
|
|
"(from %v to %v): remote side does not have enough "+
|
|
|
|
"funds (%v < %v) to pay for non-dust HTLC in case of "+
|
|
|
|
"unilateral close.", ourBalance, ourReportedBalance,
|
|
|
|
theirBalance, commitFeeWithHtlc)
|
2023-01-15 22:09:36 +01:00
|
|
|
ourBalance = ourReportedBalance
|
2017-11-10 08:08:50 +01:00
|
|
|
}
|
|
|
|
|
2018-01-09 16:42:07 +01:00
|
|
|
return ourBalance, commitWeight
|
2017-11-10 08:08:50 +01:00
|
|
|
}
|
|
|
|
|
2016-08-13 00:50:47 +02:00
|
|
|
// StateSnapshot returns a snapshot of the current fully committed state within
|
|
|
|
// the channel.
|
2016-06-23 07:12:28 +02:00
|
|
|
func (lc *LightningChannel) StateSnapshot() *channeldb.ChannelSnapshot {
|
2017-06-09 07:24:10 +02:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
2016-06-23 07:12:28 +02:00
|
|
|
|
|
|
|
return lc.channelState.Snapshot()
|
|
|
|
}
|
|
|
|
|
2017-11-26 20:50:39 +01:00
|
|
|
// validateFeeRate ensures that if the passed fee is applied to the channel,
|
|
|
|
// and a new commitment is created (which evaluates this fee), then the
|
|
|
|
// initiator of the channel does not dip below their reserve.
|
2019-10-31 03:43:05 +01:00
|
|
|
func (lc *LightningChannel) validateFeeRate(feePerKw chainfee.SatPerKWeight) error {
|
2017-11-26 20:50:39 +01:00
|
|
|
// We'll ensure that we can accommodate this new fee change, yet still
|
|
|
|
// be above our reserve balance. Otherwise, we'll reject the fee
|
|
|
|
// update.
|
2023-11-05 11:29:34 +01:00
|
|
|
// We do not enforce the FeeBuffer here because it was exactly
|
|
|
|
// introduced to use this buffer for potential fee rate increases.
|
|
|
|
availableBalance, txWeight := lc.availableBalance(AdditionalHtlc)
|
2021-09-08 13:21:12 +02:00
|
|
|
|
|
|
|
oldFee := lnwire.NewMSatFromSatoshis(
|
2024-08-09 21:47:58 +02:00
|
|
|
lc.commitChains.Local.tip().feePerKw.FeeForWeight(txWeight),
|
2021-09-08 13:21:12 +02:00
|
|
|
)
|
2018-05-02 02:34:52 +02:00
|
|
|
|
|
|
|
// Our base balance is the total amount of satoshis we can commit
|
|
|
|
// towards fees before factoring in the channel reserve.
|
|
|
|
baseBalance := availableBalance + oldFee
|
2017-11-26 20:50:39 +01:00
|
|
|
|
|
|
|
// Using the weight of the commitment transaction if we were to create
|
|
|
|
// a commitment now, we'll compute our remaining balance if we apply
|
|
|
|
// this new fee update.
|
|
|
|
newFee := lnwire.NewMSatFromSatoshis(
|
2018-02-13 14:43:58 +01:00
|
|
|
feePerKw.FeeForWeight(txWeight),
|
2017-11-26 20:50:39 +01:00
|
|
|
)
|
2018-02-25 04:19:46 +01:00
|
|
|
|
2018-05-02 02:34:52 +02:00
|
|
|
// If the total fee exceeds our available balance (taking into account
|
|
|
|
// the fee from the last state), then we'll reject this update as it
|
|
|
|
// would mean we need to trim our entire output.
|
|
|
|
if newFee > baseBalance {
|
2018-02-25 04:19:46 +01:00
|
|
|
return fmt.Errorf("cannot apply fee_update=%v sat/kw, new fee "+
|
|
|
|
"of %v is greater than balance of %v", int64(feePerKw),
|
2018-05-02 02:34:52 +02:00
|
|
|
newFee, baseBalance)
|
2018-02-25 04:19:46 +01:00
|
|
|
}
|
2017-11-26 20:50:39 +01:00
|
|
|
|
|
|
|
// TODO(halseth): should fail if fee update is unreasonable,
|
|
|
|
// as specified in BOLT#2.
|
|
|
|
// * COMMENT(roasbeef): can cross-check with our ideal fee rate
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-14 20:38:35 +02:00
|
|
|
// UpdateFee initiates a fee update for this channel. Must only be called by
|
|
|
|
// the channel initiator, and must be called before sending update_fee to
|
|
|
|
// the remote.
|
2019-10-31 03:43:05 +01:00
|
|
|
func (lc *LightningChannel) UpdateFee(feePerKw chainfee.SatPerKWeight) error {
|
2017-07-14 20:38:35 +02:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
|
|
|
// Only initiator can send fee update, so trying to send one as
|
2017-07-30 21:25:41 +02:00
|
|
|
// non-initiator will fail.
|
2017-07-14 20:38:35 +02:00
|
|
|
if !lc.channelState.IsInitiator {
|
2017-07-30 21:25:41 +02:00
|
|
|
return fmt.Errorf("local fee update as non-initiator")
|
2017-07-14 20:38:35 +02:00
|
|
|
}
|
|
|
|
|
2017-11-26 20:50:39 +01:00
|
|
|
// Ensure that the passed fee rate meets our current requirements.
|
|
|
|
if err := lc.validateFeeRate(feePerKw); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-06-15 01:30:28 +02:00
|
|
|
pd := &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: lc.ChannelID(),
|
2024-08-09 22:00:59 +02:00
|
|
|
LogIndex: lc.updateLogs.Local.logIndex,
|
2019-01-10 12:23:57 +01:00
|
|
|
Amount: lnwire.NewMSatFromSatoshis(btcutil.Amount(feePerKw)),
|
|
|
|
EntryType: FeeUpdate,
|
|
|
|
}
|
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Local.appendUpdate(pd)
|
2017-07-14 20:38:35 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
// CommitFeeTotalAt applies a proposed feerate to the channel and returns the
|
|
|
|
// commitment fee with this new feerate. It does not modify the underlying
|
|
|
|
// LightningChannel.
|
|
|
|
func (lc *LightningChannel) CommitFeeTotalAt(
|
|
|
|
feePerKw chainfee.SatPerKWeight) (btcutil.Amount, btcutil.Amount,
|
|
|
|
error) {
|
|
|
|
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
dryRunFee := fn.Some[chainfee.SatPerKWeight](feePerKw)
|
|
|
|
|
|
|
|
// We want to grab every update in both update logs to calculate the
|
|
|
|
// commitment fees in the worst-case with this fee-rate.
|
2024-08-09 22:00:59 +02:00
|
|
|
localIdx := lc.updateLogs.Local.logIndex
|
|
|
|
remoteIdx := lc.updateLogs.Remote.logIndex
|
2024-06-03 18:43:33 +02:00
|
|
|
|
|
|
|
localHtlcView := lc.fetchHTLCView(remoteIdx, localIdx)
|
|
|
|
|
|
|
|
var localCommitFee, remoteCommitFee btcutil.Amount
|
|
|
|
|
|
|
|
// Compute the local commitment's weight.
|
|
|
|
_, _, localWeight, _, err := lc.computeView(
|
2024-07-31 01:44:18 +02:00
|
|
|
localHtlcView, lntypes.Local, false, dryRunFee,
|
2024-06-03 18:43:33 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
localCommitFee = feePerKw.FeeForWeight(localWeight)
|
|
|
|
|
|
|
|
// Create another view in case for some reason the prior one was
|
|
|
|
// mutated.
|
|
|
|
remoteHtlcView := lc.fetchHTLCView(remoteIdx, localIdx)
|
|
|
|
|
|
|
|
// Compute the remote commitment's weight.
|
|
|
|
_, _, remoteWeight, _, err := lc.computeView(
|
2024-07-31 01:44:18 +02:00
|
|
|
remoteHtlcView, lntypes.Remote, false, dryRunFee,
|
2024-06-03 18:43:33 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
remoteCommitFee = feePerKw.FeeForWeight(remoteWeight)
|
|
|
|
|
|
|
|
return localCommitFee, remoteCommitFee, err
|
|
|
|
}
|
|
|
|
|
2017-07-14 20:38:35 +02:00
|
|
|
// ReceiveUpdateFee handles an updated fee sent from remote. This method will
|
|
|
|
// return an error if called as channel initiator.
|
2019-10-31 03:43:05 +01:00
|
|
|
func (lc *LightningChannel) ReceiveUpdateFee(feePerKw chainfee.SatPerKWeight) error {
|
2017-07-14 20:38:35 +02:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
|
|
|
// Only initiator can send fee update, and we must fail if we receive
|
2017-07-30 21:25:41 +02:00
|
|
|
// fee update as initiator
|
2017-07-14 20:38:35 +02:00
|
|
|
if lc.channelState.IsInitiator {
|
2017-07-30 21:25:41 +02:00
|
|
|
return fmt.Errorf("received fee update as initiator")
|
2017-07-14 20:38:35 +02:00
|
|
|
}
|
|
|
|
|
2017-11-27 21:24:00 +01:00
|
|
|
// TODO(roasbeef): or just modify to use the other balance?
|
2024-06-15 01:30:28 +02:00
|
|
|
pd := &paymentDescriptor{
|
2024-06-06 20:44:44 +02:00
|
|
|
ChanID: lc.ChannelID(),
|
2024-08-09 22:00:59 +02:00
|
|
|
LogIndex: lc.updateLogs.Remote.logIndex,
|
2019-01-10 12:23:57 +01:00
|
|
|
Amount: lnwire.NewMSatFromSatoshis(btcutil.Amount(feePerKw)),
|
|
|
|
EntryType: FeeUpdate,
|
|
|
|
}
|
2017-11-26 20:50:39 +01:00
|
|
|
|
2024-08-09 22:00:59 +02:00
|
|
|
lc.updateLogs.Remote.appendUpdate(pd)
|
2017-07-14 20:38:35 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-10 08:06:10 +01:00
|
|
|
// generateRevocation generates the revocation message for a given height.
|
2017-07-09 01:30:20 +02:00
|
|
|
func (lc *LightningChannel) generateRevocation(height uint64) (*lnwire.RevokeAndAck,
|
|
|
|
error) {
|
|
|
|
|
|
|
|
// Now that we've accept a new state transition, we send the remote
|
|
|
|
// party the revocation for our current commitment state.
|
|
|
|
revocationMsg := &lnwire.RevokeAndAck{}
|
|
|
|
commitSecret, err := lc.channelState.RevocationProducer.AtIndex(height)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
copy(revocationMsg.Revocation[:], commitSecret[:])
|
|
|
|
|
|
|
|
// Along with this revocation, we'll also send the _next_ commitment
|
|
|
|
// point that the remote party should use to create our next commitment
|
|
|
|
// transaction. We use a +2 here as we already gave them a look ahead
|
2023-03-15 22:45:14 +01:00
|
|
|
// of size one after the ChannelReady message was sent:
|
2017-07-09 01:30:20 +02:00
|
|
|
//
|
|
|
|
// 0: current revocation, 1: their "next" revocation, 2: this revocation
|
|
|
|
//
|
|
|
|
// We're revoking the current revocation. Once they receive this
|
|
|
|
// message they'll set the "current" revocation for us to their stored
|
|
|
|
// "next" revocation, and this revocation will become their new "next"
|
|
|
|
// revocation.
|
|
|
|
//
|
|
|
|
// Put simply in the window slides to the left by one.
|
2023-07-12 04:02:41 +02:00
|
|
|
revHeight := height + 2
|
2017-07-09 01:30:20 +02:00
|
|
|
nextCommitSecret, err := lc.channelState.RevocationProducer.AtIndex(
|
2023-07-12 04:02:41 +02:00
|
|
|
revHeight,
|
2017-07-09 01:30:20 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-01-16 15:47:43 +01:00
|
|
|
revocationMsg.NextRevocationKey = input.ComputeCommitmentPoint(nextCommitSecret[:])
|
2017-07-09 01:30:20 +02:00
|
|
|
revocationMsg.ChanID = lnwire.NewChanIDFromOutPoint(
|
2024-01-29 22:19:15 +01:00
|
|
|
lc.channelState.FundingOutpoint,
|
2023-01-20 02:06:38 +01:00
|
|
|
)
|
2017-07-09 01:30:20 +02:00
|
|
|
|
2023-07-12 04:02:41 +02:00
|
|
|
// If this is a taproot channel, then we also need to generate the
|
|
|
|
// verification nonce for this target state.
|
|
|
|
if lc.channelState.ChanType.IsTaproot() {
|
|
|
|
nextVerificationNonce, err := channeldb.NewMusigVerificationNonce( //nolint:lll
|
|
|
|
lc.channelState.LocalChanCfg.MultiSigKey.PubKey,
|
|
|
|
revHeight, lc.taprootNonceProducer,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
multi: upgrade new taproot TLVs to use tlv.OptionalRecordT
In this commit, we update new Taproot related TLVs (nonces, partial sig,
sig with nonce, etc). Along the way we were able to get rid of some
boiler plate, but most importantly, we're able to better protect against
API misuse (using a nonce that isn't initialized, etc) with the new
options API. In some areas this introduces a bit of extra boiler plate,
and where applicable I used some new helper functions to help cut down
on the noise.
Note to reviewers: this is done as a single commit, as changing the API
breaks all callers, so if we want things to compile it needs to be in a
wumbo commit.
2024-02-24 03:04:51 +01:00
|
|
|
revocationMsg.LocalNonce = lnwire.SomeMusig2Nonce(
|
|
|
|
nextVerificationNonce.PubNonce,
|
2023-07-12 04:02:41 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2017-07-09 01:30:20 +02:00
|
|
|
return revocationMsg, nil
|
|
|
|
}
|
|
|
|
|
2023-08-09 04:18:34 +02:00
|
|
|
// closeTxOpts houses the set of options that modify how the cooperative close
|
|
|
|
// tx is to be constructed.
|
|
|
|
type closeTxOpts struct {
|
|
|
|
// enableRBF indicates whether the cooperative close tx should signal
|
|
|
|
// RBF or not.
|
|
|
|
enableRBF bool
|
2024-05-29 19:57:39 +02:00
|
|
|
|
|
|
|
// extraCloseOutputs is a set of additional outputs that should be
|
|
|
|
// added the co-op close transaction.
|
|
|
|
extraCloseOutputs []CloseOutput
|
2024-05-29 19:57:44 +02:00
|
|
|
|
|
|
|
// customSort is a custom function that can be used to sort the
|
|
|
|
// transaction outputs. If this isn't set, then the default BIP-69
|
|
|
|
// sorting is used.
|
|
|
|
customSort CloseSortFunc
|
2023-08-09 04:18:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// defaultCloseTxOpts returns a closeTxOpts struct with default values.
|
|
|
|
func defaultCloseTxOpts() closeTxOpts {
|
|
|
|
return closeTxOpts{
|
|
|
|
enableRBF: false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CloseTxOpt is a functional option that allows us to modify how the closing
|
|
|
|
// transaction is created.
|
|
|
|
type CloseTxOpt func(*closeTxOpts)
|
|
|
|
|
|
|
|
// WithRBFCloseTx signals that the cooperative close tx should signal RBF.
|
|
|
|
func WithRBFCloseTx() CloseTxOpt {
|
|
|
|
return func(o *closeTxOpts) {
|
|
|
|
o.enableRBF = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:39 +02:00
|
|
|
// WithExtraTxCloseOutputs can be used to add extra outputs to the cooperative
|
|
|
|
// transaction.
|
|
|
|
func WithExtraTxCloseOutputs(extraOutputs []CloseOutput) CloseTxOpt {
|
|
|
|
return func(o *closeTxOpts) {
|
|
|
|
o.extraCloseOutputs = extraOutputs
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:44 +02:00
|
|
|
// WithCustomTxSort can be used to modify the way the close transaction is
|
|
|
|
// sorted.
|
|
|
|
func WithCustomTxSort(sorter CloseSortFunc) CloseTxOpt {
|
|
|
|
return func(opts *closeTxOpts) {
|
|
|
|
opts.customSort = sorter
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-13 00:50:47 +02:00
|
|
|
// CreateCooperativeCloseTx creates a transaction which if signed by both
|
2016-06-21 07:09:42 +02:00
|
|
|
// parties, then broadcast cooperatively closes an active channel. The creation
|
|
|
|
// of the closure transaction is modified by a boolean indicating if the party
|
|
|
|
// constructing the channel is the initiator of the closure. Currently it is
|
|
|
|
// expected that the initiator pays the transaction fees for the closing
|
|
|
|
// transaction in full.
|
2017-12-22 19:26:16 +01:00
|
|
|
func CreateCooperativeCloseTx(fundingTxIn wire.TxIn,
|
2017-03-25 00:20:05 +01:00
|
|
|
localDust, remoteDust, ourBalance, theirBalance btcutil.Amount,
|
2023-08-09 04:18:34 +02:00
|
|
|
ourDeliveryScript, theirDeliveryScript []byte,
|
2024-05-29 19:57:44 +02:00
|
|
|
closeOpts ...CloseTxOpt) (*wire.MsgTx, error) {
|
2023-08-09 04:18:34 +02:00
|
|
|
|
|
|
|
opts := defaultCloseTxOpts()
|
|
|
|
for _, optFunc := range closeOpts {
|
|
|
|
optFunc(&opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If RBF is signalled, then we'll modify the sequence to permit
|
|
|
|
// replacement.
|
|
|
|
if opts.enableRBF {
|
|
|
|
fundingTxIn.Sequence = mempool.MaxRBFSequence
|
|
|
|
}
|
2016-06-21 07:09:42 +02:00
|
|
|
|
|
|
|
// Construct the transaction to perform a cooperative closure of the
|
|
|
|
// channel. In the event that one side doesn't have any settled funds
|
|
|
|
// within the channel then a refund output for that particular side can
|
|
|
|
// be omitted.
|
2017-01-05 22:56:27 +01:00
|
|
|
closeTx := wire.NewMsgTx(2)
|
2017-12-22 19:26:16 +01:00
|
|
|
closeTx.AddTxIn(&fundingTxIn)
|
2016-06-21 07:09:42 +02:00
|
|
|
|
2017-03-25 00:20:05 +01:00
|
|
|
// Create both cooperative closure outputs, properly respecting the
|
|
|
|
// dust limits of both parties.
|
2024-05-29 19:57:39 +02:00
|
|
|
var localOutputIdx fn.Option[int]
|
|
|
|
haveLocalOutput := ourBalance >= localDust
|
|
|
|
if haveLocalOutput {
|
2016-06-21 07:09:42 +02:00
|
|
|
closeTx.AddTxOut(&wire.TxOut{
|
|
|
|
PkScript: ourDeliveryScript,
|
|
|
|
Value: int64(ourBalance),
|
|
|
|
})
|
2024-05-29 19:57:39 +02:00
|
|
|
|
|
|
|
localOutputIdx = fn.Some(len(closeTx.TxOut) - 1)
|
2016-06-21 07:09:42 +02:00
|
|
|
}
|
2024-05-29 19:57:39 +02:00
|
|
|
|
|
|
|
var remoteOutputIdx fn.Option[int]
|
|
|
|
haveRemoteOutput := theirBalance >= remoteDust
|
|
|
|
if haveRemoteOutput {
|
2016-06-21 07:09:42 +02:00
|
|
|
closeTx.AddTxOut(&wire.TxOut{
|
|
|
|
PkScript: theirDeliveryScript,
|
|
|
|
Value: int64(theirBalance),
|
|
|
|
})
|
2024-05-29 19:57:39 +02:00
|
|
|
|
|
|
|
remoteOutputIdx = fn.Some(len(closeTx.TxOut) - 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have extra outputs to add to the co-op close transaction, then
|
|
|
|
// we'll examine them now. We'll deduct the output's value from the
|
|
|
|
// owning party. In the case that a party can't pay for the output, then
|
|
|
|
// their normal output will be omitted.
|
|
|
|
for _, extraTxOut := range opts.extraCloseOutputs {
|
|
|
|
switch {
|
|
|
|
// For additional local outputs, add the output, then deduct
|
|
|
|
// the balance from our local balance.
|
|
|
|
case extraTxOut.IsLocal:
|
|
|
|
// The extraCloseOutputs in the options just indicate if
|
|
|
|
// an extra output should be added in general. But we
|
|
|
|
// only add one if we actually _need_ one, based on the
|
|
|
|
// balance. If we don't have enough local balance to
|
|
|
|
// cover the extra output, then localOutputIdx is None.
|
|
|
|
localOutputIdx.WhenSome(func(idx int) {
|
|
|
|
// The output that currently represents the
|
|
|
|
// local balance, which means:
|
|
|
|
// txOut.Value == ourBalance.
|
|
|
|
txOut := closeTx.TxOut[idx]
|
|
|
|
|
|
|
|
// The extra output (if one exists) is the more
|
|
|
|
// important one, as in custom channels it might
|
|
|
|
// carry some additional values. The normal
|
|
|
|
// output is just an address that sends the
|
|
|
|
// local balance back to our wallet. The extra
|
|
|
|
// one also goes to our wallet, but might also
|
|
|
|
// carry other values, so it has higher
|
|
|
|
// priority. Do we have enough balance to have
|
|
|
|
// both the extra output with the given value
|
|
|
|
// (which is subtracted from our balance) and
|
|
|
|
// still an above-dust normal output? If not, we
|
|
|
|
// skip the extra output and just overwrite the
|
|
|
|
// existing output script with the one from the
|
|
|
|
// extra output.
|
|
|
|
amtAfterOutput := btcutil.Amount(
|
|
|
|
txOut.Value - extraTxOut.Value,
|
|
|
|
)
|
|
|
|
if amtAfterOutput <= localDust {
|
|
|
|
txOut.PkScript = extraTxOut.PkScript
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
txOut.Value -= extraTxOut.Value
|
|
|
|
closeTx.AddTxOut(&extraTxOut.TxOut)
|
|
|
|
})
|
|
|
|
|
|
|
|
// For extra remote outputs, we'll do the opposite.
|
|
|
|
case !extraTxOut.IsLocal:
|
|
|
|
// The extraCloseOutputs in the options just indicate if
|
|
|
|
// an extra output should be added in general. But we
|
|
|
|
// only add one if we actually _need_ one, based on the
|
|
|
|
// balance. If we don't have enough remote balance to
|
|
|
|
// cover the extra output, then remoteOutputIdx is None.
|
|
|
|
remoteOutputIdx.WhenSome(func(idx int) {
|
|
|
|
// The output that currently represents the
|
|
|
|
// remote balance, which means:
|
|
|
|
// txOut.Value == theirBalance.
|
|
|
|
txOut := closeTx.TxOut[idx]
|
|
|
|
|
|
|
|
// The extra output (if one exists) is the more
|
|
|
|
// important one, as in custom channels it might
|
|
|
|
// carry some additional values. The normal
|
|
|
|
// output is just an address that sends the
|
|
|
|
// remote balance back to their wallet. The
|
|
|
|
// extra one also goes to their wallet, but
|
|
|
|
// might also carry other values, so it has
|
|
|
|
// higher priority. Do they have enough balance
|
|
|
|
// to have both the extra output with the given
|
|
|
|
// value (which is subtracted from their
|
|
|
|
// balance) and still an above-dust normal
|
|
|
|
// output? If not, we skip the extra output and
|
|
|
|
// just overwrite the existing output script
|
|
|
|
// with the one from the extra output.
|
|
|
|
amtAfterOutput := btcutil.Amount(
|
|
|
|
txOut.Value - extraTxOut.Value,
|
|
|
|
)
|
|
|
|
if amtAfterOutput <= remoteDust {
|
|
|
|
txOut.PkScript = extraTxOut.PkScript
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
txOut.Value -= extraTxOut.Value
|
|
|
|
closeTx.AddTxOut(&extraTxOut.TxOut)
|
|
|
|
})
|
|
|
|
}
|
2016-06-21 07:09:42 +02:00
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:44 +02:00
|
|
|
if opts.customSort != nil {
|
|
|
|
if err := opts.customSort(closeTx); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
txsort.InPlaceSort(closeTx)
|
|
|
|
}
|
2016-06-21 07:09:42 +02:00
|
|
|
|
2024-05-29 19:57:44 +02:00
|
|
|
return closeTx, nil
|
2016-06-21 07:09:42 +02:00
|
|
|
}
|
2017-07-14 21:04:29 +02:00
|
|
|
|
lnwallet/chancloser: properly compute initial fee of cop close txn
In this commit, we modify the way we compute the starting ideal fee for
the co-op close transaction. Before thsi commit, channel.CalcFee was
used, which'll compute the fee based on the commitment transaction
itself, rathern than the co-op close transaction. As the co-op close
transaction is potentailly bigger (two P2TR outputs) than the commitment
transaction, this can cause us to under estimate the fee, which can
result in the fee rate being too low to propagate.
To remedy this, we now compute a fee estimate from scratch, based on the
delivery fees of the two parties.
We also add a bug fix in the chancloser unit tests that wasn't caught
due to loop variable shadowing.
The wallet import itest has been updated as well, since we'll now pay
600 extra saothis to close the channel, since we're accounting for the
added weight of the P2TR outputs.
Fixes #6953
2022-09-30 04:49:44 +02:00
|
|
|
// LocalBalanceDust returns true if when creating a co-op close transaction,
|
|
|
|
// the balance of the local party will be dust after accounting for any anchor
|
|
|
|
// outputs.
|
2024-05-29 19:57:42 +02:00
|
|
|
func (lc *LightningChannel) LocalBalanceDust() (bool, btcutil.Amount) {
|
lnwallet/chancloser: properly compute initial fee of cop close txn
In this commit, we modify the way we compute the starting ideal fee for
the co-op close transaction. Before thsi commit, channel.CalcFee was
used, which'll compute the fee based on the commitment transaction
itself, rathern than the co-op close transaction. As the co-op close
transaction is potentailly bigger (two P2TR outputs) than the commitment
transaction, this can cause us to under estimate the fee, which can
result in the fee rate being too low to propagate.
To remedy this, we now compute a fee estimate from scratch, based on the
delivery fees of the two parties.
We also add a bug fix in the chancloser unit tests that wasn't caught
due to loop variable shadowing.
The wallet import itest has been updated as well, since we'll now pay
600 extra saothis to close the channel, since we're accounting for the
added weight of the P2TR outputs.
Fixes #6953
2022-09-30 04:49:44 +02:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
chanState := lc.channelState
|
|
|
|
localBalance := chanState.LocalCommitment.LocalBalance.ToSatoshis()
|
|
|
|
|
|
|
|
// If this is an anchor channel, and we're the initiator, then we'll
|
|
|
|
// regain the stats allocated to the anchor outputs with the co-op
|
|
|
|
// close transaction.
|
|
|
|
if chanState.ChanType.HasAnchors() && chanState.IsInitiator {
|
2024-04-11 14:22:17 +02:00
|
|
|
localBalance += 2 * AnchorSize
|
lnwallet/chancloser: properly compute initial fee of cop close txn
In this commit, we modify the way we compute the starting ideal fee for
the co-op close transaction. Before thsi commit, channel.CalcFee was
used, which'll compute the fee based on the commitment transaction
itself, rathern than the co-op close transaction. As the co-op close
transaction is potentailly bigger (two P2TR outputs) than the commitment
transaction, this can cause us to under estimate the fee, which can
result in the fee rate being too low to propagate.
To remedy this, we now compute a fee estimate from scratch, based on the
delivery fees of the two parties.
We also add a bug fix in the chancloser unit tests that wasn't caught
due to loop variable shadowing.
The wallet import itest has been updated as well, since we'll now pay
600 extra saothis to close the channel, since we're accounting for the
added weight of the P2TR outputs.
Fixes #6953
2022-09-30 04:49:44 +02:00
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:42 +02:00
|
|
|
localDust := chanState.LocalChanCfg.DustLimit
|
|
|
|
|
|
|
|
return localBalance <= localDust, localDust
|
lnwallet/chancloser: properly compute initial fee of cop close txn
In this commit, we modify the way we compute the starting ideal fee for
the co-op close transaction. Before thsi commit, channel.CalcFee was
used, which'll compute the fee based on the commitment transaction
itself, rathern than the co-op close transaction. As the co-op close
transaction is potentailly bigger (two P2TR outputs) than the commitment
transaction, this can cause us to under estimate the fee, which can
result in the fee rate being too low to propagate.
To remedy this, we now compute a fee estimate from scratch, based on the
delivery fees of the two parties.
We also add a bug fix in the chancloser unit tests that wasn't caught
due to loop variable shadowing.
The wallet import itest has been updated as well, since we'll now pay
600 extra saothis to close the channel, since we're accounting for the
added weight of the P2TR outputs.
Fixes #6953
2022-09-30 04:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// RemoteBalanceDust returns true if when creating a co-op close transaction,
|
|
|
|
// the balance of the remote party will be dust after accounting for any anchor
|
|
|
|
// outputs.
|
2024-05-29 19:57:42 +02:00
|
|
|
func (lc *LightningChannel) RemoteBalanceDust() (bool, btcutil.Amount) {
|
lnwallet/chancloser: properly compute initial fee of cop close txn
In this commit, we modify the way we compute the starting ideal fee for
the co-op close transaction. Before thsi commit, channel.CalcFee was
used, which'll compute the fee based on the commitment transaction
itself, rathern than the co-op close transaction. As the co-op close
transaction is potentailly bigger (two P2TR outputs) than the commitment
transaction, this can cause us to under estimate the fee, which can
result in the fee rate being too low to propagate.
To remedy this, we now compute a fee estimate from scratch, based on the
delivery fees of the two parties.
We also add a bug fix in the chancloser unit tests that wasn't caught
due to loop variable shadowing.
The wallet import itest has been updated as well, since we'll now pay
600 extra saothis to close the channel, since we're accounting for the
added weight of the P2TR outputs.
Fixes #6953
2022-09-30 04:49:44 +02:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
chanState := lc.channelState
|
|
|
|
remoteBalance := chanState.RemoteCommitment.RemoteBalance.ToSatoshis()
|
|
|
|
|
|
|
|
// If this is an anchor channel, and they're the initiator, then we'll
|
|
|
|
// regain the stats allocated to the anchor outputs with the co-op
|
|
|
|
// close transaction.
|
|
|
|
if chanState.ChanType.HasAnchors() && !chanState.IsInitiator {
|
2024-04-11 14:22:17 +02:00
|
|
|
remoteBalance += 2 * AnchorSize
|
lnwallet/chancloser: properly compute initial fee of cop close txn
In this commit, we modify the way we compute the starting ideal fee for
the co-op close transaction. Before thsi commit, channel.CalcFee was
used, which'll compute the fee based on the commitment transaction
itself, rathern than the co-op close transaction. As the co-op close
transaction is potentailly bigger (two P2TR outputs) than the commitment
transaction, this can cause us to under estimate the fee, which can
result in the fee rate being too low to propagate.
To remedy this, we now compute a fee estimate from scratch, based on the
delivery fees of the two parties.
We also add a bug fix in the chancloser unit tests that wasn't caught
due to loop variable shadowing.
The wallet import itest has been updated as well, since we'll now pay
600 extra saothis to close the channel, since we're accounting for the
added weight of the P2TR outputs.
Fixes #6953
2022-09-30 04:49:44 +02:00
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:42 +02:00
|
|
|
remoteDust := chanState.RemoteChanCfg.DustLimit
|
|
|
|
|
|
|
|
return remoteBalance <= remoteDust, remoteDust
|
|
|
|
}
|
|
|
|
|
|
|
|
// CommitBalances returns the local and remote balances in the current
|
|
|
|
// commitment state.
|
|
|
|
func (lc *LightningChannel) CommitBalances() (btcutil.Amount, btcutil.Amount) {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
chanState := lc.channelState
|
|
|
|
localCommit := lc.channelState.LocalCommitment
|
|
|
|
|
|
|
|
localBalance := localCommit.LocalBalance.ToSatoshis()
|
|
|
|
remoteBalance := localCommit.RemoteBalance.ToSatoshis()
|
|
|
|
|
|
|
|
if chanState.ChanType.HasAnchors() {
|
|
|
|
if chanState.IsInitiator {
|
|
|
|
localBalance += 2 * AnchorSize
|
|
|
|
} else {
|
|
|
|
remoteBalance += 2 * AnchorSize
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return localBalance, remoteBalance
|
|
|
|
}
|
|
|
|
|
|
|
|
// CommitFee returns the commitment fee for the current commitment state.
|
|
|
|
func (lc *LightningChannel) CommitFee() btcutil.Amount {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
return lc.channelState.LocalCommitment.CommitFee
|
lnwallet/chancloser: properly compute initial fee of cop close txn
In this commit, we modify the way we compute the starting ideal fee for
the co-op close transaction. Before thsi commit, channel.CalcFee was
used, which'll compute the fee based on the commitment transaction
itself, rathern than the co-op close transaction. As the co-op close
transaction is potentailly bigger (two P2TR outputs) than the commitment
transaction, this can cause us to under estimate the fee, which can
result in the fee rate being too low to propagate.
To remedy this, we now compute a fee estimate from scratch, based on the
delivery fees of the two parties.
We also add a bug fix in the chancloser unit tests that wasn't caught
due to loop variable shadowing.
The wallet import itest has been updated as well, since we'll now pay
600 extra saothis to close the channel, since we're accounting for the
added weight of the P2TR outputs.
Fixes #6953
2022-09-30 04:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// CalcFee returns the commitment fee to use for the given fee rate
|
|
|
|
// (fee-per-kw).
|
2019-10-31 03:43:05 +01:00
|
|
|
func (lc *LightningChannel) CalcFee(feeRate chainfee.SatPerKWeight) btcutil.Amount {
|
2020-03-06 16:11:46 +01:00
|
|
|
return feeRate.FeeForWeight(CommitWeight(lc.channelState.ChanType))
|
2017-07-14 21:04:29 +02:00
|
|
|
}
|
2017-09-13 14:07:51 +02:00
|
|
|
|
2019-08-24 01:04:30 +02:00
|
|
|
// MaxFeeRate returns the maximum fee rate given an allocation of the channel
|
2021-09-08 13:25:47 +02:00
|
|
|
// initiator's spendable balance along with the local reserve amount. This can
|
|
|
|
// be useful to determine when we should stop proposing fee updates that exceed
|
2021-06-23 14:28:25 +02:00
|
|
|
// our maximum allocation.
|
2023-07-21 09:57:50 +02:00
|
|
|
// Moreover it returns the share of the total balance in the range of [0,1]
|
|
|
|
// which can be allocated to fees. When our desired fee allocation would lead to
|
|
|
|
// a maximum fee rate below the current commitment fee rate we floor the maximum
|
|
|
|
// at the current fee rate which leads to different fee allocations than
|
|
|
|
// initially requested via `maxAllocation`.
|
2019-08-24 01:04:30 +02:00
|
|
|
//
|
|
|
|
// NOTE: This should only be used for channels in which the local commitment is
|
|
|
|
// the initiator.
|
2023-07-21 09:57:50 +02:00
|
|
|
func (lc *LightningChannel) MaxFeeRate(
|
|
|
|
maxAllocation float64) (chainfee.SatPerKWeight, float64) {
|
|
|
|
|
2019-08-24 01:04:30 +02:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
2021-09-08 13:25:47 +02:00
|
|
|
// The maximum fee depends on the available balance that can be
|
|
|
|
// committed towards fees. It takes into account our local reserve
|
2023-11-05 11:29:34 +01:00
|
|
|
// balance. We do not account for a FeeBuffer here because that is
|
|
|
|
// exactly why it was introduced to react for sharp fee changes.
|
|
|
|
availableBalance, weight := lc.availableBalance(AdditionalHtlc)
|
2021-09-08 13:25:47 +02:00
|
|
|
|
2024-08-09 21:47:58 +02:00
|
|
|
currentFee := lc.commitChains.Local.tip().feePerKw.FeeForWeight(weight)
|
2021-09-08 13:25:47 +02:00
|
|
|
|
|
|
|
// baseBalance is the maximum amount available for us to spend on fees.
|
2023-07-21 09:57:50 +02:00
|
|
|
baseBalance := availableBalance.ToSatoshis() + currentFee
|
2021-09-08 13:25:47 +02:00
|
|
|
|
2023-07-21 09:57:50 +02:00
|
|
|
// In case our local channel balance is drained, we make sure we do not
|
|
|
|
// decrease the fee rate below the current fee rate. This could lead to
|
|
|
|
// a scenario where we lower the commitment fee rate as low as the fee
|
|
|
|
// floor although current fee rates are way higher. The maximum fee
|
|
|
|
// we allow should not be smaller then the current fee. The decrease
|
|
|
|
// in fee rate should happen when the mempool reports lower fee levels
|
|
|
|
// rather than us decreasing in local balance. The max fee rate is
|
|
|
|
// always floored by the current fee rate of the channel.
|
|
|
|
idealMaxFee := float64(baseBalance) * maxAllocation
|
|
|
|
maxFee := math.Max(float64(currentFee), idealMaxFee)
|
|
|
|
maxFeeAllocation := maxFee / float64(baseBalance)
|
|
|
|
maxFeeRate := chainfee.SatPerKWeight(maxFee / (float64(weight) / 1000))
|
2019-10-03 23:10:18 +02:00
|
|
|
|
2023-07-21 09:57:50 +02:00
|
|
|
return maxFeeRate, maxFeeAllocation
|
2021-06-23 14:28:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// IdealCommitFeeRate uses the current network fee, the minimum relay fee,
|
|
|
|
// maximum fee allocation and anchor channel commitment fee rate to determine
|
|
|
|
// the ideal fee to be used for the commitments of the channel.
|
|
|
|
func (lc *LightningChannel) IdealCommitFeeRate(netFeeRate, minRelayFeeRate,
|
|
|
|
maxAnchorCommitFeeRate chainfee.SatPerKWeight,
|
|
|
|
maxFeeAlloc float64) chainfee.SatPerKWeight {
|
|
|
|
|
|
|
|
// Get the maximum fee rate that we can use given our max fee allocation
|
|
|
|
// and given the local reserve balance that we must preserve.
|
2023-07-21 09:57:50 +02:00
|
|
|
maxFeeRate, _ := lc.MaxFeeRate(maxFeeAlloc)
|
2021-06-23 14:28:25 +02:00
|
|
|
|
|
|
|
var commitFeeRate chainfee.SatPerKWeight
|
|
|
|
|
|
|
|
// If the channel has anchor outputs then cap the fee rate at the
|
|
|
|
// max anchor fee rate if that maximum is less than our max fee rate.
|
|
|
|
// Otherwise, cap the fee rate at the max fee rate.
|
|
|
|
switch lc.channelState.ChanType.HasAnchors() &&
|
|
|
|
maxFeeRate > maxAnchorCommitFeeRate {
|
|
|
|
case true:
|
|
|
|
commitFeeRate = chainfee.SatPerKWeight(
|
|
|
|
math.Min(
|
|
|
|
float64(netFeeRate),
|
|
|
|
float64(maxAnchorCommitFeeRate),
|
|
|
|
),
|
|
|
|
)
|
2020-12-10 14:16:53 +01:00
|
|
|
|
2021-06-23 14:28:25 +02:00
|
|
|
case false:
|
|
|
|
commitFeeRate = chainfee.SatPerKWeight(
|
|
|
|
math.Min(float64(netFeeRate), float64(maxFeeRate)),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
if commitFeeRate >= minRelayFeeRate {
|
|
|
|
return commitFeeRate
|
|
|
|
}
|
|
|
|
|
|
|
|
// The commitment fee rate is below the minimum relay fee rate.
|
|
|
|
// If the min relay fee rate is still below the maximum fee, then use
|
|
|
|
// the minimum relay fee rate.
|
|
|
|
if minRelayFeeRate <= maxFeeRate {
|
|
|
|
return minRelayFeeRate
|
2020-12-10 14:16:53 +01:00
|
|
|
}
|
|
|
|
|
2021-06-23 14:28:25 +02:00
|
|
|
// The minimum relay fee rate is more than the ideal maximum fee rate.
|
|
|
|
// Check if it is smaller than the absolute maximum fee rate we can
|
|
|
|
// use. If it is, then we use the minimum relay fee rate and we log a
|
|
|
|
// warning to indicate that the max channel fee allocation option was
|
|
|
|
// ignored.
|
2023-07-21 09:57:50 +02:00
|
|
|
absoluteMaxFee, _ := lc.MaxFeeRate(1)
|
2021-06-23 14:28:25 +02:00
|
|
|
if minRelayFeeRate <= absoluteMaxFee {
|
|
|
|
lc.log.Warn("Ignoring max channel fee allocation to " +
|
|
|
|
"ensure that the commitment fee is above the " +
|
|
|
|
"minimum relay fee.")
|
|
|
|
|
|
|
|
return minRelayFeeRate
|
|
|
|
}
|
|
|
|
|
|
|
|
// The absolute maximum fee rate we can pay is below the minimum
|
|
|
|
// relay fee rate. The commitment tx will not be able to propagate.
|
|
|
|
// To give the transaction the best chance, we use the absolute
|
|
|
|
// maximum fee we have available and we log an error.
|
|
|
|
lc.log.Errorf("The commitment fee rate of %s is below the current "+
|
2023-09-09 17:56:34 +02:00
|
|
|
"minimum relay fee rate of %s. The max fee rate of %s will be "+
|
2021-06-23 14:28:25 +02:00
|
|
|
"used.", commitFeeRate, minRelayFeeRate, absoluteMaxFee)
|
|
|
|
|
|
|
|
return absoluteMaxFee
|
2019-08-24 01:04:30 +02:00
|
|
|
}
|
|
|
|
|
2023-01-20 04:43:47 +01:00
|
|
|
// RemoteNextRevocation returns the channelState's RemoteNextRevocation. For
|
|
|
|
// musig2 channels, until a nonce pair is processed by the remote party, a nil
|
|
|
|
// public key is returned.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): revisit, maybe just make a more general method instead?
|
2017-09-13 14:07:51 +02:00
|
|
|
func (lc *LightningChannel) RemoteNextRevocation() *btcec.PublicKey {
|
2017-11-24 05:02:55 +01:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
2017-09-13 14:07:51 +02:00
|
|
|
|
2023-01-20 04:43:47 +01:00
|
|
|
if !lc.channelState.ChanType.IsTaproot() {
|
|
|
|
return lc.channelState.RemoteNextRevocation
|
|
|
|
}
|
|
|
|
|
|
|
|
if lc.musigSessions == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-09-13 14:07:51 +02:00
|
|
|
return lc.channelState.RemoteNextRevocation
|
|
|
|
}
|
2017-11-24 05:02:55 +01:00
|
|
|
|
|
|
|
// IsInitiator returns true if we were the ones that initiated the funding
|
|
|
|
// workflow which led to the creation of this channel. Otherwise, it returns
|
|
|
|
// false.
|
|
|
|
func (lc *LightningChannel) IsInitiator() bool {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
return lc.channelState.IsInitiator
|
|
|
|
}
|
|
|
|
|
|
|
|
// CommitFeeRate returns the current fee rate of the commitment transaction in
|
|
|
|
// units of sat-per-kw.
|
2019-10-31 03:43:05 +01:00
|
|
|
func (lc *LightningChannel) CommitFeeRate() chainfee.SatPerKWeight {
|
2017-11-24 05:02:55 +01:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
2019-10-31 03:43:05 +01:00
|
|
|
return chainfee.SatPerKWeight(lc.channelState.LocalCommitment.FeePerKw)
|
2017-11-24 05:02:55 +01:00
|
|
|
}
|
2017-12-07 01:31:52 +01:00
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
// WorstCaseFeeRate returns the higher feerate from either the local commitment
|
|
|
|
// or the remote commitment.
|
|
|
|
func (lc *LightningChannel) WorstCaseFeeRate() chainfee.SatPerKWeight {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
localFeeRate := lc.channelState.LocalCommitment.FeePerKw
|
|
|
|
remoteFeeRate := lc.channelState.RemoteCommitment.FeePerKw
|
|
|
|
|
|
|
|
if localFeeRate > remoteFeeRate {
|
|
|
|
return chainfee.SatPerKWeight(localFeeRate)
|
|
|
|
}
|
|
|
|
|
|
|
|
return chainfee.SatPerKWeight(remoteFeeRate)
|
|
|
|
}
|
|
|
|
|
2017-12-07 01:31:52 +01:00
|
|
|
// IsPending returns true if the channel's funding transaction has been fully
|
|
|
|
// confirmed, and false otherwise.
|
|
|
|
func (lc *LightningChannel) IsPending() bool {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
return lc.channelState.IsPending
|
|
|
|
}
|
2017-11-21 08:57:33 +01:00
|
|
|
|
2018-11-20 15:09:45 +01:00
|
|
|
// State provides access to the channel's internal state.
|
2017-11-21 08:57:33 +01:00
|
|
|
func (lc *LightningChannel) State() *channeldb.OpenChannel {
|
|
|
|
return lc.channelState
|
|
|
|
}
|
|
|
|
|
2019-09-06 13:14:40 +02:00
|
|
|
// MarkBorked marks the event when the channel as reached an irreconcilable
|
|
|
|
// state, such as a channel breach or state desynchronization. Borked channels
|
|
|
|
// should never be added to the switch.
|
|
|
|
func (lc *LightningChannel) MarkBorked() error {
|
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
|
|
|
return lc.channelState.MarkBorked()
|
|
|
|
}
|
|
|
|
|
2018-05-18 15:19:40 +02:00
|
|
|
// MarkCommitmentBroadcasted marks the channel as a commitment transaction has
|
|
|
|
// been broadcast, either our own or the remote, and we should watch the chain
|
2020-02-21 12:24:23 +01:00
|
|
|
// for it to confirm before taking any further action. It takes a boolean which
|
|
|
|
// indicates whether we initiated the close.
|
|
|
|
func (lc *LightningChannel) MarkCommitmentBroadcasted(tx *wire.MsgTx,
|
2024-07-31 01:44:18 +02:00
|
|
|
closer lntypes.ChannelParty) error {
|
2020-02-21 12:24:23 +01:00
|
|
|
|
2018-05-18 15:19:40 +02:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
return lc.channelState.MarkCommitmentBroadcasted(tx, closer)
|
2018-05-18 15:19:40 +02:00
|
|
|
}
|
|
|
|
|
2019-12-04 22:30:15 +01:00
|
|
|
// MarkCoopBroadcasted marks the channel as a cooperative close transaction has
|
|
|
|
// been broadcast, and that we should watch the chain for it to confirm before
|
2020-02-21 12:24:23 +01:00
|
|
|
// taking any further action. It takes a locally initiated bool which is true
|
|
|
|
// if we initiated the cooperative close.
|
|
|
|
func (lc *LightningChannel) MarkCoopBroadcasted(tx *wire.MsgTx,
|
2024-07-31 01:44:18 +02:00
|
|
|
closer lntypes.ChannelParty) error {
|
2020-02-21 12:24:23 +01:00
|
|
|
|
2019-12-04 22:30:15 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
2024-07-31 01:44:18 +02:00
|
|
|
return lc.channelState.MarkCoopBroadcasted(tx, closer)
|
2019-12-04 22:30:15 +01:00
|
|
|
}
|
|
|
|
|
2024-02-06 14:56:27 +01:00
|
|
|
// MarkShutdownSent persists the given ShutdownInfo. The existence of the
|
|
|
|
// ShutdownInfo represents the fact that the Shutdown message has been sent by
|
|
|
|
// us and so should be re-sent on re-establish.
|
|
|
|
func (lc *LightningChannel) MarkShutdownSent(
|
|
|
|
info *channeldb.ShutdownInfo) error {
|
|
|
|
|
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
|
|
|
return lc.channelState.MarkShutdownSent(info)
|
|
|
|
}
|
|
|
|
|
2019-09-06 13:14:40 +02:00
|
|
|
// MarkDataLoss marks sets the channel status to LocalDataLoss and stores the
|
|
|
|
// passed commitPoint for use to retrieve funds in case the remote force closes
|
|
|
|
// the channel.
|
|
|
|
func (lc *LightningChannel) MarkDataLoss(commitPoint *btcec.PublicKey) error {
|
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
|
|
|
return lc.channelState.MarkDataLoss(commitPoint)
|
|
|
|
}
|
|
|
|
|
2018-01-17 03:10:58 +01:00
|
|
|
// ActiveHtlcs returns a slice of HTLC's which are currently active on *both*
|
|
|
|
// commitment transactions.
|
|
|
|
func (lc *LightningChannel) ActiveHtlcs() []channeldb.HTLC {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
2020-04-03 02:39:10 +02:00
|
|
|
return lc.channelState.ActiveHtlcs()
|
2018-01-17 03:10:58 +01:00
|
|
|
}
|
2017-11-29 14:20:02 +01:00
|
|
|
|
|
|
|
// LocalChanReserve returns our local ChanReserve requirement for the remote party.
|
|
|
|
func (lc *LightningChannel) LocalChanReserve() btcutil.Amount {
|
2020-01-06 11:42:02 +01:00
|
|
|
return lc.channelState.LocalChanCfg.ChanReserve
|
2017-11-29 14:20:02 +01:00
|
|
|
}
|
2018-02-28 05:01:41 +01:00
|
|
|
|
2018-04-24 07:08:03 +02:00
|
|
|
// NextLocalHtlcIndex returns the next unallocated local htlc index. To ensure
|
|
|
|
// this always returns the next index that has been not been allocated, this
|
|
|
|
// will first try to examine any pending commitments, before falling back to the
|
|
|
|
// last locked-in local commitment.
|
|
|
|
func (lc *LightningChannel) NextLocalHtlcIndex() (uint64, error) {
|
2018-02-28 05:01:41 +01:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
2018-04-24 07:08:03 +02:00
|
|
|
return lc.channelState.NextLocalHtlcIndex()
|
2018-02-28 05:01:41 +01:00
|
|
|
}
|
|
|
|
|
2018-08-22 09:32:43 +02:00
|
|
|
// FwdMinHtlc returns the minimum HTLC value required by the remote node, i.e.
|
|
|
|
// the minimum value HTLC we can forward on this channel.
|
|
|
|
func (lc *LightningChannel) FwdMinHtlc() lnwire.MilliSatoshi {
|
2020-01-06 11:42:02 +01:00
|
|
|
return lc.channelState.LocalChanCfg.MinHTLC
|
2018-08-22 09:32:43 +02:00
|
|
|
}
|
2020-07-02 08:16:04 +02:00
|
|
|
|
|
|
|
// unsignedLocalUpdates retrieves the unsigned local updates that we should
|
|
|
|
// store upon receiving a revocation. This function is called from
|
|
|
|
// ReceiveRevocation. remoteMessageIndex is the height into the local update
|
|
|
|
// log that the remote commitment chain tip includes. localMessageIndex
|
|
|
|
// is the height into the local update log that the local commitment tail
|
|
|
|
// includes. Our local updates that are unsigned by the remote should
|
|
|
|
// have height greater than or equal to localMessageIndex (not on our commit),
|
|
|
|
// and height less than remoteMessageIndex (on the remote commit).
|
|
|
|
//
|
|
|
|
// NOTE: remoteMessageIndex is the height on the tip because this is called
|
|
|
|
// before the tail is advanced to the tip during ReceiveRevocation.
|
|
|
|
func (lc *LightningChannel) unsignedLocalUpdates(remoteMessageIndex,
|
2024-06-15 02:45:36 +02:00
|
|
|
localMessageIndex uint64) []channeldb.LogUpdate {
|
2020-07-02 08:16:04 +02:00
|
|
|
|
|
|
|
var localPeerUpdates []channeldb.LogUpdate
|
2024-08-09 22:00:59 +02:00
|
|
|
for e := lc.updateLogs.Local.Front(); e != nil; e = e.Next() {
|
2024-05-01 00:23:50 +02:00
|
|
|
pd := e.Value
|
2020-07-02 08:16:04 +02:00
|
|
|
|
|
|
|
// We don't save add updates as they are restored from the
|
|
|
|
// remote commitment in restoreStateLogs.
|
|
|
|
if pd.EntryType == Add {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is a settle/fail that is on the remote commitment, but
|
|
|
|
// not on the local commitment. We expect this update to be
|
|
|
|
// covered in the next commitment signature that the remote
|
|
|
|
// sends.
|
|
|
|
if pd.LogIndex < remoteMessageIndex && pd.LogIndex >= localMessageIndex {
|
2024-06-15 02:38:52 +02:00
|
|
|
localPeerUpdates = append(
|
2024-06-15 01:30:28 +02:00
|
|
|
localPeerUpdates, pd.toLogUpdate(),
|
2024-06-15 02:38:52 +02:00
|
|
|
)
|
2020-07-02 08:16:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return localPeerUpdates
|
|
|
|
}
|
2023-01-20 04:19:10 +01:00
|
|
|
|
|
|
|
// GenMusigNonces generates the verification nonce to start off a new musig2
|
|
|
|
// channel session.
|
|
|
|
func (lc *LightningChannel) GenMusigNonces() (*musig2.Nonces, error) {
|
2023-01-20 04:24:48 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
2023-01-20 04:19:10 +01:00
|
|
|
|
|
|
|
var err error
|
2023-01-20 04:24:48 +01:00
|
|
|
|
|
|
|
// We pass in the current height+1 as this'll be the set of
|
|
|
|
// verification nonces we'll send to the party to create our _next_
|
|
|
|
// state.
|
2023-07-12 03:56:15 +02:00
|
|
|
lc.pendingVerificationNonce, err = channeldb.NewMusigVerificationNonce(
|
2023-01-20 04:19:10 +01:00
|
|
|
lc.channelState.LocalChanCfg.MultiSigKey.PubKey,
|
2023-01-20 04:24:48 +01:00
|
|
|
lc.currentHeight+1, lc.taprootNonceProducer,
|
2023-01-20 04:19:10 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return lc.pendingVerificationNonce, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasRemoteNonces returns true if the channel has a remote nonce pair.
|
|
|
|
func (lc *LightningChannel) HasRemoteNonces() bool {
|
|
|
|
return lc.musigSessions != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// InitRemoteMusigNonces processes the remote musig nonces sent by the remote
|
|
|
|
// party. This should be called upon connection re-establishment, after we've
|
|
|
|
// generated our own nonces. Once this method returns a nil error, then the
|
|
|
|
// channel can be used to sign commitment states.
|
|
|
|
func (lc *LightningChannel) InitRemoteMusigNonces(remoteNonce *musig2.Nonces,
|
|
|
|
) error {
|
|
|
|
|
2023-01-20 04:24:48 +01:00
|
|
|
lc.Lock()
|
|
|
|
defer lc.Unlock()
|
|
|
|
|
|
|
|
if lc.pendingVerificationNonce == nil {
|
|
|
|
return fmt.Errorf("pending verification nonce is not set")
|
|
|
|
}
|
2023-01-20 04:19:10 +01:00
|
|
|
|
|
|
|
// Now that we have the set of local and remote nonces, we can generate
|
|
|
|
// a new pair of musig sessions for our local commitment and the
|
|
|
|
// commitment of the remote party.
|
|
|
|
localNonce := lc.pendingVerificationNonce
|
|
|
|
|
|
|
|
localChanCfg := lc.channelState.LocalChanCfg
|
|
|
|
remoteChanCfg := lc.channelState.RemoteChanCfg
|
|
|
|
|
|
|
|
// TODO(roasbeef): propagate rename of signing and verification nonces
|
|
|
|
|
|
|
|
sessionCfg := &MusigSessionCfg{
|
2024-03-13 15:54:49 +01:00
|
|
|
LocalKey: localChanCfg.MultiSigKey,
|
|
|
|
RemoteKey: remoteChanCfg.MultiSigKey,
|
|
|
|
LocalNonce: *localNonce,
|
|
|
|
RemoteNonce: *remoteNonce,
|
|
|
|
Signer: lc.Signer,
|
|
|
|
InputTxOut: &lc.fundingOutput,
|
|
|
|
TapscriptTweak: lc.channelState.TapscriptRoot,
|
2023-01-20 04:19:10 +01:00
|
|
|
}
|
|
|
|
lc.musigSessions = NewMusigPairSession(
|
|
|
|
sessionCfg,
|
|
|
|
)
|
|
|
|
|
|
|
|
lc.pendingVerificationNonce = nil
|
|
|
|
|
2023-10-13 17:00:49 +02:00
|
|
|
lc.opts.localNonce = nil
|
|
|
|
lc.opts.remoteNonce = nil
|
|
|
|
|
2023-01-20 04:19:10 +01:00
|
|
|
return nil
|
|
|
|
}
|
2023-01-20 04:43:47 +01:00
|
|
|
|
|
|
|
// ChanType returns the channel type.
|
|
|
|
func (lc *LightningChannel) ChanType() channeldb.ChannelType {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
return lc.channelState.ChanType
|
|
|
|
}
|
|
|
|
|
2024-07-22 23:44:47 +02:00
|
|
|
// Initiator returns the ChannelParty that originally opened this channel.
|
|
|
|
func (lc *LightningChannel) Initiator() lntypes.ChannelParty {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
return lc.channelState.Initiator()
|
|
|
|
}
|
|
|
|
|
2023-01-20 04:43:47 +01:00
|
|
|
// FundingTxOut returns the funding output of the channel.
|
|
|
|
func (lc *LightningChannel) FundingTxOut() *wire.TxOut {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
return &lc.fundingOutput
|
|
|
|
}
|
|
|
|
|
|
|
|
// MultiSigKeys returns the set of multi-sig keys for an channel.
|
2023-07-23 17:35:20 +02:00
|
|
|
func (lc *LightningChannel) MultiSigKeys() (keychain.KeyDescriptor,
|
|
|
|
keychain.KeyDescriptor) {
|
|
|
|
|
2023-01-20 04:43:47 +01:00
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
return lc.channelState.LocalChanCfg.MultiSigKey,
|
|
|
|
lc.channelState.RemoteChanCfg.MultiSigKey
|
|
|
|
}
|
2024-05-15 17:25:53 +02:00
|
|
|
|
|
|
|
// LocalCommitmentBlob returns the custom blob of the local commitment.
|
|
|
|
func (lc *LightningChannel) LocalCommitmentBlob() fn.Option[tlv.Blob] {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
chanState := lc.channelState
|
|
|
|
localBalance := chanState.LocalCommitment.CustomBlob
|
|
|
|
|
|
|
|
return fn.MapOption(func(b tlv.Blob) tlv.Blob {
|
|
|
|
newBlob := make([]byte, len(b))
|
|
|
|
copy(newBlob, b)
|
|
|
|
|
|
|
|
return newBlob
|
|
|
|
})(localBalance)
|
|
|
|
}
|
2024-05-29 19:57:38 +02:00
|
|
|
|
|
|
|
// FundingBlob returns the funding custom blob.
|
|
|
|
func (lc *LightningChannel) FundingBlob() fn.Option[tlv.Blob] {
|
|
|
|
lc.RLock()
|
|
|
|
defer lc.RUnlock()
|
|
|
|
|
|
|
|
return fn.MapOption(func(b tlv.Blob) tlv.Blob {
|
|
|
|
newBlob := make([]byte, len(b))
|
|
|
|
copy(newBlob, b)
|
|
|
|
|
|
|
|
return newBlob
|
|
|
|
})(lc.channelState.CustomBlob)
|
|
|
|
}
|