2017-05-01 19:03:41 +02:00
|
|
|
package htlcswitch
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2022-10-26 13:44:57 +02:00
|
|
|
crand "crypto/rand"
|
2018-06-01 05:31:40 +02:00
|
|
|
"crypto/sha256"
|
2024-04-06 02:08:38 +02:00
|
|
|
"errors"
|
2018-01-17 05:13:42 +01:00
|
|
|
"fmt"
|
2018-05-10 23:40:29 +02:00
|
|
|
prand "math/rand"
|
2017-05-03 16:07:55 +02:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2017-05-01 19:03:41 +02:00
|
|
|
"time"
|
|
|
|
|
2022-02-23 14:48:00 +01:00
|
|
|
"github.com/btcsuite/btcd/btcutil"
|
2018-10-30 10:36:27 +01:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
2024-10-15 15:28:46 +02:00
|
|
|
"github.com/btcsuite/btclog/v2"
|
2019-10-01 11:06:56 +02:00
|
|
|
"github.com/lightningnetwork/lnd/build"
|
2018-01-17 05:13:42 +01:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2022-11-18 12:15:22 +01:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/models"
|
2018-01-17 05:13:42 +01:00
|
|
|
"github.com/lightningnetwork/lnd/contractcourt"
|
2024-02-06 15:14:36 +01:00
|
|
|
"github.com/lightningnetwork/lnd/fn"
|
2018-04-27 11:51:13 +02:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch/hodl"
|
2019-08-30 23:11:20 +02:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch/hop"
|
2024-06-03 18:43:33 +02:00
|
|
|
"github.com/lightningnetwork/lnd/input"
|
2019-02-20 12:11:15 +01:00
|
|
|
"github.com/lightningnetwork/lnd/invoices"
|
2018-06-08 05:17:15 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnpeer"
|
2019-01-15 11:31:22 +01:00
|
|
|
"github.com/lightningnetwork/lnd/lntypes"
|
2024-07-24 13:31:21 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnutils"
|
2017-05-01 19:03:41 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2019-10-31 03:43:05 +01:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
2017-05-01 19:03:41 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2019-02-11 12:01:05 +01:00
|
|
|
"github.com/lightningnetwork/lnd/queue"
|
2018-08-01 21:42:38 +02:00
|
|
|
"github.com/lightningnetwork/lnd/ticker"
|
2024-05-02 16:52:47 +02:00
|
|
|
"github.com/lightningnetwork/lnd/tlv"
|
2017-05-01 19:03:41 +02:00
|
|
|
)
|
|
|
|
|
2018-05-10 23:40:29 +02:00
|
|
|
func init() {
|
|
|
|
prand.Seed(time.Now().UnixNano())
|
|
|
|
}
|
|
|
|
|
2017-08-03 06:10:35 +02:00
|
|
|
const (
|
2019-07-27 03:05:58 +02:00
|
|
|
// DefaultMaxOutgoingCltvExpiry is the maximum outgoing time lock that
|
|
|
|
// the node accepts for forwarded payments. The value is relative to the
|
|
|
|
// current block height. The reason to have a maximum is to prevent
|
|
|
|
// funds getting locked up unreasonably long. Otherwise, an attacker
|
|
|
|
// willing to lock its own funds too, could force the funds of this node
|
|
|
|
// to be locked up for an indefinite (max int32) number of blocks.
|
2018-10-15 08:41:56 +02:00
|
|
|
//
|
2019-10-11 21:47:52 +02:00
|
|
|
// The value 2016 corresponds to on average two weeks worth of blocks
|
|
|
|
// and is based on the maximum number of hops (20), the default CLTV
|
|
|
|
// delta (40), and some extra margin to account for the other lightning
|
|
|
|
// implementations and past lnd versions which used to have a default
|
|
|
|
// CLTV delta of 144.
|
|
|
|
DefaultMaxOutgoingCltvExpiry = 2016
|
2018-10-15 08:41:56 +02:00
|
|
|
|
2018-07-31 10:29:12 +02:00
|
|
|
// DefaultMinLinkFeeUpdateTimeout represents the minimum interval in
|
|
|
|
// which a link should propose to update its commitment fee rate.
|
2018-05-10 23:40:29 +02:00
|
|
|
DefaultMinLinkFeeUpdateTimeout = 10 * time.Minute
|
2018-07-31 10:29:12 +02:00
|
|
|
|
|
|
|
// DefaultMaxLinkFeeUpdateTimeout represents the maximum interval in
|
|
|
|
// which a link should propose to update its commitment fee rate.
|
2018-05-10 23:40:29 +02:00
|
|
|
DefaultMaxLinkFeeUpdateTimeout = 60 * time.Minute
|
2019-08-24 01:04:59 +02:00
|
|
|
|
|
|
|
// DefaultMaxLinkFeeAllocation is the highest allocation we'll allow
|
|
|
|
// a channel's commitment fee to be of its balance. This only applies to
|
|
|
|
// the initiator of the channel.
|
|
|
|
DefaultMaxLinkFeeAllocation float64 = 0.5
|
2017-08-03 06:10:35 +02:00
|
|
|
)
|
|
|
|
|
2017-06-16 23:30:55 +02:00
|
|
|
// ExpectedFee computes the expected fee for a given htlc amount. The value
|
|
|
|
// returned from this function is to be used as a sanity check when forwarding
|
|
|
|
// HTLC's to ensure that an incoming HTLC properly adheres to our propagated
|
|
|
|
// forwarding policy.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): also add in current available channel bandwidth, inverse
|
|
|
|
// func
|
2023-07-17 12:53:24 +02:00
|
|
|
func ExpectedFee(f models.ForwardingPolicy,
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
htlcAmt lnwire.MilliSatoshi) lnwire.MilliSatoshi {
|
2017-08-22 08:36:43 +02:00
|
|
|
|
2017-06-16 23:30:55 +02:00
|
|
|
return f.BaseFee + (htlcAmt*f.FeeRate)/1000000
|
|
|
|
}
|
|
|
|
|
2017-05-03 16:02:22 +02:00
|
|
|
// ChannelLinkConfig defines the configuration for the channel link. ALL
|
|
|
|
// elements within the configuration MUST be non-nil for channel link to carry
|
|
|
|
// out its duties.
|
|
|
|
type ChannelLinkConfig struct {
|
2017-06-16 23:30:55 +02:00
|
|
|
// FwrdingPolicy is the initial forwarding policy to be used when
|
|
|
|
// deciding whether to forwarding incoming HTLC's or not. This value
|
|
|
|
// can be updated with subsequent calls to UpdateForwardingPolicy
|
|
|
|
// targeted at a given ChannelLink concrete interface implementation.
|
2023-07-17 12:53:24 +02:00
|
|
|
FwrdingPolicy models.ForwardingPolicy
|
2017-06-16 23:30:55 +02:00
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
// Circuits provides restricted access to the switch's circuit map,
|
|
|
|
// allowing the link to open and close circuits.
|
|
|
|
Circuits CircuitModifier
|
|
|
|
|
2021-08-03 20:49:17 +02:00
|
|
|
// BestHeight returns the best known height.
|
|
|
|
BestHeight func() uint32
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
// ForwardPackets attempts to forward the batch of htlcs through the
|
2020-05-19 11:13:02 +02:00
|
|
|
// switch. The function returns and error in case it fails to send one or
|
|
|
|
// more packets. The link's quit signal should be provided to allow
|
2018-07-30 22:11:11 +02:00
|
|
|
// cancellation of forwarding during link shutdown.
|
2024-10-17 13:38:31 +02:00
|
|
|
ForwardPackets func(<-chan struct{}, bool, ...*htlcPacket) error
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2018-02-24 02:30:29 +01:00
|
|
|
// DecodeHopIterators facilitates batched decoding of HTLC Sphinx onion
|
|
|
|
// blobs, which are then used to inform how to forward an HTLC.
|
2018-03-13 02:52:52 +01:00
|
|
|
//
|
|
|
|
// NOTE: This function assumes the same set of readers and preimages
|
|
|
|
// are always presented for the same identifier.
|
2019-09-05 13:35:39 +02:00
|
|
|
DecodeHopIterators func([]byte, []hop.DecodeHopIteratorRequest) (
|
|
|
|
[]hop.DecodeHopIteratorResponse, error)
|
2017-06-29 15:40:45 +02:00
|
|
|
|
2018-03-12 20:39:13 +01:00
|
|
|
// ExtractErrorEncrypter function is responsible for decoding HTLC
|
2017-07-15 05:08:29 +02:00
|
|
|
// Sphinx onion blob, and creating onion failure obfuscator.
|
2019-09-05 13:35:39 +02:00
|
|
|
ExtractErrorEncrypter hop.ErrorEncrypterExtracter
|
2017-06-29 15:40:45 +02:00
|
|
|
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
// FetchLastChannelUpdate retrieves the latest routing policy for a
|
|
|
|
// target channel. This channel will typically be the outgoing channel
|
|
|
|
// specified when we receive an incoming HTLC. This will be used to
|
|
|
|
// provide payment senders our latest policy when sending encrypted
|
|
|
|
// error messages.
|
2024-08-21 08:39:37 +02:00
|
|
|
FetchLastChannelUpdate func(lnwire.ShortChannelID) (
|
|
|
|
*lnwire.ChannelUpdate1, error)
|
2017-05-02 21:01:46 +02:00
|
|
|
|
2017-06-01 01:43:37 +02:00
|
|
|
// Peer is a lightning network node with which we have the channel link
|
|
|
|
// opened.
|
2018-06-08 05:17:15 +02:00
|
|
|
Peer lnpeer.Peer
|
2017-05-03 16:02:22 +02:00
|
|
|
|
2017-06-01 01:43:37 +02:00
|
|
|
// Registry is a sub-system which responsible for managing the invoices
|
|
|
|
// in thread-safe manner.
|
2017-05-03 16:02:22 +02:00
|
|
|
Registry InvoiceDatabase
|
|
|
|
|
2018-02-07 04:11:11 +01:00
|
|
|
// PreimageCache is a global witness beacon that houses any new
|
|
|
|
// preimages discovered by other links. We'll use this to add new
|
2018-01-17 05:13:16 +01:00
|
|
|
// witnesses that we discover which will notify any sub-systems
|
|
|
|
// subscribed to new events.
|
|
|
|
PreimageCache contractcourt.WitnessBeacon
|
2018-01-17 05:13:42 +01:00
|
|
|
|
2018-05-09 15:49:58 +02:00
|
|
|
// OnChannelFailure is a function closure that we'll call if the
|
|
|
|
// channel failed for some reason. Depending on the severity of the
|
|
|
|
// error, the closure potentially must force close this channel and
|
|
|
|
// disconnect the peer.
|
|
|
|
//
|
|
|
|
// NOTE: The method must return in order for the ChannelLink to be able
|
|
|
|
// to shut down properly.
|
|
|
|
OnChannelFailure func(lnwire.ChannelID, lnwire.ShortChannelID,
|
|
|
|
LinkFailureError)
|
|
|
|
|
2018-01-17 05:13:42 +01:00
|
|
|
// UpdateContractSignals is a function closure that we'll use to update
|
2022-01-04 22:21:36 +01:00
|
|
|
// outside sub-systems with this channel's latest ShortChannelID.
|
2018-01-17 05:13:42 +01:00
|
|
|
UpdateContractSignals func(*contractcourt.ContractSignals) error
|
|
|
|
|
2022-01-04 22:21:36 +01:00
|
|
|
// NotifyContractUpdate is a function closure that we'll use to update
|
|
|
|
// the contractcourt and more specifically the ChannelArbitrator of the
|
|
|
|
// latest channel state.
|
|
|
|
NotifyContractUpdate func(*contractcourt.ContractUpdate) error
|
|
|
|
|
2018-01-18 23:15:42 +01:00
|
|
|
// ChainEvents is an active subscription to the chain watcher for this
|
|
|
|
// channel to be notified of any on-chain activity related to this
|
|
|
|
// channel.
|
|
|
|
ChainEvents *contractcourt.ChainEventSubscription
|
|
|
|
|
2017-11-24 05:31:45 +01:00
|
|
|
// FeeEstimator is an instance of a live fee estimator which will be
|
|
|
|
// used to dynamically regulate the current fee of the commitment
|
|
|
|
// transaction to ensure timely confirmation.
|
2019-10-31 03:43:05 +01:00
|
|
|
FeeEstimator chainfee.Estimator
|
2017-11-24 05:31:45 +01:00
|
|
|
|
2018-04-27 11:51:13 +02:00
|
|
|
// hodl.Mask is a bitvector composed of hodl.Flags, specifying breakpoints
|
|
|
|
// for HTLC forwarding internal to the switch.
|
2017-11-24 05:31:45 +01:00
|
|
|
//
|
2019-08-14 19:57:31 +02:00
|
|
|
// NOTE: This should only be used for testing.
|
2018-04-27 11:51:13 +02:00
|
|
|
HodlMask hodl.Mask
|
2017-07-09 01:30:20 +02:00
|
|
|
|
|
|
|
// SyncStates is used to indicate that we need send the channel
|
|
|
|
// reestablishment message to the remote peer. It should be done if our
|
|
|
|
// clients have been restarted, or remote peer have been reconnected.
|
|
|
|
SyncStates bool
|
2018-01-16 21:17:14 +01:00
|
|
|
|
|
|
|
// BatchTicker is the ticker that determines the interval that we'll
|
|
|
|
// use to check the batch to see if there're any updates we should
|
2018-03-13 02:52:52 +01:00
|
|
|
// flush out. By batching updates into a single commit, we attempt to
|
|
|
|
// increase throughput by maximizing the number of updates coalesced
|
|
|
|
// into a single commit.
|
2018-08-01 21:42:38 +02:00
|
|
|
BatchTicker ticker.Ticker
|
2018-01-16 21:17:14 +01:00
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
// FwdPkgGCTicker is the ticker determining the frequency at which
|
2018-03-13 02:52:52 +01:00
|
|
|
// garbage collection of forwarding packages occurs. We use a
|
|
|
|
// time-based approach, as opposed to block epochs, as to not hinder
|
|
|
|
// syncing.
|
2018-08-01 21:42:38 +02:00
|
|
|
FwdPkgGCTicker ticker.Ticker
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2020-04-14 19:51:30 +02:00
|
|
|
// PendingCommitTicker is a ticker that allows the link to determine if
|
|
|
|
// a locally initiated commitment dance gets stuck waiting for the
|
|
|
|
// remote party to revoke.
|
|
|
|
PendingCommitTicker ticker.Ticker
|
|
|
|
|
2018-01-16 21:17:14 +01:00
|
|
|
// BatchSize is the max size of a batch of updates done to the link
|
|
|
|
// before we do a state update.
|
|
|
|
BatchSize uint32
|
2017-11-27 08:20:17 +01:00
|
|
|
|
|
|
|
// UnsafeReplay will cause a link to replay the adds in its latest
|
|
|
|
// commitment txn after the link is restarted. This should only be used
|
|
|
|
// in testing, it is here to ensure the sphinx replay detection on the
|
|
|
|
// receiving node is persistent.
|
|
|
|
UnsafeReplay bool
|
2018-05-10 23:40:29 +02:00
|
|
|
|
2024-04-23 09:49:04 +02:00
|
|
|
// MinUpdateTimeout represents the minimum interval in which a link
|
2019-06-14 02:28:33 +02:00
|
|
|
// will propose to update its commitment fee rate. A random timeout will
|
2024-04-23 09:49:04 +02:00
|
|
|
// be selected between this and MaxUpdateTimeout.
|
|
|
|
MinUpdateTimeout time.Duration
|
2019-06-14 02:28:33 +02:00
|
|
|
|
2024-04-23 09:49:04 +02:00
|
|
|
// MaxUpdateTimeout represents the maximum interval in which a link
|
2019-06-14 02:28:33 +02:00
|
|
|
// will propose to update its commitment fee rate. A random timeout will
|
2024-04-23 09:49:04 +02:00
|
|
|
// be selected between this and MinUpdateTimeout.
|
|
|
|
MaxUpdateTimeout time.Duration
|
2019-03-26 12:05:43 +01:00
|
|
|
|
2019-04-03 12:18:19 +02:00
|
|
|
// OutgoingCltvRejectDelta defines the number of blocks before expiry of
|
|
|
|
// an htlc where we don't offer an htlc anymore. This should be at least
|
|
|
|
// the outgoing broadcast delta, because in any case we don't want to
|
|
|
|
// risk offering an htlc that triggers channel closure.
|
|
|
|
OutgoingCltvRejectDelta uint32
|
2019-06-14 02:28:14 +02:00
|
|
|
|
|
|
|
// TowerClient is an optional engine that manages the signing,
|
|
|
|
// encrypting, and uploading of justice transactions to the daemon's
|
2020-11-26 00:06:46 +01:00
|
|
|
// configured set of watchtowers for legacy channels.
|
2019-06-14 02:28:14 +02:00
|
|
|
TowerClient TowerClient
|
2019-07-27 03:05:58 +02:00
|
|
|
|
2019-12-04 23:34:18 +01:00
|
|
|
// MaxOutgoingCltvExpiry is the maximum outgoing timelock that the link
|
|
|
|
// should accept for a forwarded HTLC. The value is relative to the
|
|
|
|
// current block height.
|
2019-07-27 03:05:58 +02:00
|
|
|
MaxOutgoingCltvExpiry uint32
|
2019-08-24 01:04:59 +02:00
|
|
|
|
|
|
|
// MaxFeeAllocation is the highest allocation we'll allow a channel's
|
|
|
|
// commitment fee to be of its balance. This only applies to the
|
|
|
|
// initiator of the channel.
|
|
|
|
MaxFeeAllocation float64
|
2019-09-19 21:46:44 +02:00
|
|
|
|
2020-12-10 14:16:53 +01:00
|
|
|
// MaxAnchorsCommitFeeRate is the max commitment fee rate we'll use as
|
|
|
|
// the initiator for channels of the anchor type.
|
|
|
|
MaxAnchorsCommitFeeRate chainfee.SatPerKWeight
|
|
|
|
|
2020-03-07 04:42:58 +01:00
|
|
|
// NotifyActiveLink allows the link to tell the ChannelNotifier when a
|
|
|
|
// link is first started.
|
|
|
|
NotifyActiveLink func(wire.OutPoint)
|
|
|
|
|
2019-09-19 21:46:44 +02:00
|
|
|
// NotifyActiveChannel allows the link to tell the ChannelNotifier when
|
|
|
|
// channels becomes active.
|
|
|
|
NotifyActiveChannel func(wire.OutPoint)
|
|
|
|
|
|
|
|
// NotifyInactiveChannel allows the switch to tell the ChannelNotifier
|
|
|
|
// when channels become inactive.
|
|
|
|
NotifyInactiveChannel func(wire.OutPoint)
|
2020-02-19 16:34:47 +01:00
|
|
|
|
2022-11-23 23:58:33 +01:00
|
|
|
// NotifyInactiveLinkEvent allows the switch to tell the
|
|
|
|
// ChannelNotifier when a channel link become inactive.
|
|
|
|
NotifyInactiveLinkEvent func(wire.OutPoint)
|
|
|
|
|
2020-02-19 16:34:47 +01:00
|
|
|
// HtlcNotifier is an instance of a htlcNotifier which we will pipe htlc
|
|
|
|
// events through.
|
|
|
|
HtlcNotifier htlcNotifier
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
|
|
|
|
// FailAliasUpdate is a function used to fail an HTLC for an
|
|
|
|
// option_scid_alias channel.
|
|
|
|
FailAliasUpdate func(sid lnwire.ShortChannelID,
|
2024-08-21 08:39:37 +02:00
|
|
|
incoming bool) *lnwire.ChannelUpdate1
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
|
|
|
|
// GetAliases is used by the link and switch to fetch the set of
|
|
|
|
// aliases for a given link.
|
|
|
|
GetAliases func(base lnwire.ShortChannelID) []lnwire.ShortChannelID
|
2024-02-06 15:14:36 +01:00
|
|
|
|
|
|
|
// PreviouslySentShutdown is an optional value that is set if, at the
|
|
|
|
// time of the link being started, persisted shutdown info was found for
|
|
|
|
// the channel. This value being set means that we previously sent a
|
|
|
|
// Shutdown message to our peer, and so we should do so again on
|
|
|
|
// re-establish and should not allow anymore HTLC adds on the outgoing
|
|
|
|
// direction of the link.
|
|
|
|
PreviouslySentShutdown fn.Option[lnwire.Shutdown]
|
2024-04-02 15:04:27 +02:00
|
|
|
|
|
|
|
// Adds the option to disable forwarding payments in blinded routes
|
|
|
|
// by failing back any blinding-related payloads as if they were
|
|
|
|
// invalid.
|
|
|
|
DisallowRouteBlinding bool
|
2024-06-03 18:43:33 +02:00
|
|
|
|
|
|
|
// MaxFeeExposure is the threshold in milli-satoshis after which we'll
|
|
|
|
// restrict the flow of HTLCs and fee updates.
|
|
|
|
MaxFeeExposure lnwire.MilliSatoshi
|
2017-05-03 16:02:22 +02:00
|
|
|
}
|
|
|
|
|
2017-05-03 16:07:55 +02:00
|
|
|
// channelLink is the service which drives a channel's commitment update
|
2018-03-13 02:52:52 +01:00
|
|
|
// state-machine. In the event that an HTLC needs to be propagated to another
|
|
|
|
// link, the forward handler from config is used which sends HTLC to the
|
2017-05-03 16:07:55 +02:00
|
|
|
// switch. Additionally, the link encapsulate logic of commitment protocol
|
|
|
|
// message ordering and updates.
|
|
|
|
type channelLink struct {
|
2017-06-16 23:58:02 +02:00
|
|
|
// The following fields are only meant to be used *atomically*
|
2019-09-19 21:46:56 +02:00
|
|
|
started int32
|
|
|
|
reestablished int32
|
|
|
|
shutdown int32
|
2017-06-16 23:58:02 +02:00
|
|
|
|
2018-05-23 15:14:46 +02:00
|
|
|
// failed should be set to true in case a link error happens, making
|
|
|
|
// sure we don't process any more updates.
|
|
|
|
failed bool
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
// keystoneBatch represents a volatile list of keystones that must be
|
2018-03-13 02:52:52 +01:00
|
|
|
// written before attempting to sign the next commitment txn. These
|
|
|
|
// represent all the HTLC's forwarded to the link from the switch. Once
|
|
|
|
// we lock them into our outgoing commitment, then the circuit has a
|
|
|
|
// keystone, and is fully opened.
|
2017-11-27 08:20:17 +01:00
|
|
|
keystoneBatch []Keystone
|
|
|
|
|
2018-03-13 02:52:52 +01:00
|
|
|
// openedCircuits is the set of all payment circuits that will be open
|
|
|
|
// once we make our next commitment. After making the commitment we'll
|
|
|
|
// ACK all these from our mailbox to ensure that they don't get
|
|
|
|
// re-delivered if we reconnect.
|
2017-11-27 08:20:17 +01:00
|
|
|
openedCircuits []CircuitKey
|
2018-03-13 02:52:52 +01:00
|
|
|
|
|
|
|
// closedCircuits is the set of all payment circuits that will be
|
|
|
|
// closed once we make our next commitment. After taking the commitment
|
|
|
|
// we'll ACK all these to ensure that they don't get re-delivered if we
|
|
|
|
// reconnect.
|
2017-11-27 08:20:17 +01:00
|
|
|
closedCircuits []CircuitKey
|
|
|
|
|
2017-05-01 23:06:10 +02:00
|
|
|
// channel is a lightning network channel to which we apply htlc
|
|
|
|
// updates.
|
|
|
|
channel *lnwallet.LightningChannel
|
2017-05-03 16:02:22 +02:00
|
|
|
|
|
|
|
// cfg is a structure which carries all dependable fields/handlers
|
|
|
|
// which may affect behaviour of the service.
|
2017-06-16 23:58:02 +02:00
|
|
|
cfg ChannelLinkConfig
|
2017-05-03 16:07:55 +02:00
|
|
|
|
2017-11-10 23:48:23 +01:00
|
|
|
// mailBox is the main interface between the outside world and the
|
|
|
|
// link. All incoming messages will be sent over this mailBox. Messages
|
|
|
|
// include new updates from our connected peer, and new packets to be
|
|
|
|
// forwarded sent by the switch.
|
2017-11-27 08:20:17 +01:00
|
|
|
mailBox MailBox
|
2017-09-25 21:31:52 +02:00
|
|
|
|
2017-06-16 23:58:02 +02:00
|
|
|
// upstream is a channel that new messages sent from the remote peer to
|
|
|
|
// the local peer will be sent across.
|
2017-05-03 16:07:55 +02:00
|
|
|
upstream chan lnwire.Message
|
|
|
|
|
2017-06-16 23:58:02 +02:00
|
|
|
// downstream is a channel in which new multi-hop HTLC's to be
|
|
|
|
// forwarded will be sent across. Messages from this channel are sent
|
|
|
|
// by the HTLC switch.
|
2017-05-03 16:07:55 +02:00
|
|
|
downstream chan *htlcPacket
|
|
|
|
|
2018-05-10 23:40:29 +02:00
|
|
|
// updateFeeTimer is the timer responsible for updating the link's
|
|
|
|
// commitment fee every time it fires.
|
|
|
|
updateFeeTimer *time.Timer
|
|
|
|
|
2019-02-20 02:06:15 +01:00
|
|
|
// uncommittedPreimages stores a list of all preimages that have been
|
|
|
|
// learned since receiving the last CommitSig from the remote peer. The
|
|
|
|
// batch will be flushed just before accepting the subsequent CommitSig
|
|
|
|
// or on shutdown to avoid doing a write for each preimage received.
|
|
|
|
uncommittedPreimages []lntypes.Preimage
|
|
|
|
|
2018-02-04 03:14:09 +01:00
|
|
|
sync.RWMutex
|
|
|
|
|
2019-02-11 12:01:05 +01:00
|
|
|
// hodlQueue is used to receive exit hop htlc resolutions from invoice
|
|
|
|
// registry.
|
|
|
|
hodlQueue *queue.ConcurrentQueue
|
|
|
|
|
2019-08-14 21:11:34 +02:00
|
|
|
// hodlMap stores related htlc data for a circuit key. It allows
|
2019-02-11 12:01:05 +01:00
|
|
|
// resolving those htlcs when we receive a message on hodlQueue.
|
2022-11-18 12:15:22 +01:00
|
|
|
hodlMap map[models.CircuitKey]hodlHtlc
|
2019-02-11 12:01:05 +01:00
|
|
|
|
2019-10-01 11:06:56 +02:00
|
|
|
// log is a link-specific logging instance.
|
|
|
|
log btclog.Logger
|
|
|
|
|
2023-11-28 02:32:03 +01:00
|
|
|
// isOutgoingAddBlocked tracks whether the channelLink can send an
|
|
|
|
// UpdateAddHTLC.
|
|
|
|
isOutgoingAddBlocked atomic.Bool
|
|
|
|
|
|
|
|
// isIncomingAddBlocked tracks whether the channelLink can receive an
|
|
|
|
// UpdateAddHTLC.
|
|
|
|
isIncomingAddBlocked atomic.Bool
|
|
|
|
|
2023-11-28 05:26:21 +01:00
|
|
|
// flushHooks is a hookMap that is triggered when we reach a channel
|
|
|
|
// state with no live HTLCs.
|
|
|
|
flushHooks hookMap
|
|
|
|
|
|
|
|
// outgoingCommitHooks is a hookMap that is triggered after we send our
|
|
|
|
// next CommitSig.
|
|
|
|
outgoingCommitHooks hookMap
|
|
|
|
|
|
|
|
// incomingCommitHooks is a hookMap that is triggered after we receive
|
|
|
|
// our next CommitSig.
|
|
|
|
incomingCommitHooks hookMap
|
|
|
|
|
2024-10-17 13:38:34 +02:00
|
|
|
// ContextGuard is a helper that encapsulates a wait group and quit
|
|
|
|
// channel and allows contexts that either block or cancel on those
|
|
|
|
// depending on the use case.
|
|
|
|
*fn.ContextGuard
|
2017-05-03 16:07:55 +02:00
|
|
|
}
|
|
|
|
|
2023-11-28 05:26:21 +01:00
|
|
|
// hookMap is a data structure that is used to track the hooks that need to be
|
|
|
|
// called in various parts of the channelLink's lifecycle.
|
|
|
|
//
|
|
|
|
// WARNING: NOT thread-safe.
|
|
|
|
type hookMap struct {
|
|
|
|
// allocIdx keeps track of the next id we haven't yet allocated.
|
|
|
|
allocIdx atomic.Uint64
|
|
|
|
|
|
|
|
// transient is a map of hooks that are only called the next time invoke
|
|
|
|
// is called. These hooks are deleted during invoke.
|
|
|
|
transient map[uint64]func()
|
|
|
|
|
|
|
|
// newTransients is a channel that we use to accept new hooks into the
|
|
|
|
// hookMap.
|
|
|
|
newTransients chan func()
|
|
|
|
}
|
|
|
|
|
|
|
|
// newHookMap initializes a new empty hookMap.
|
|
|
|
func newHookMap() hookMap {
|
|
|
|
return hookMap{
|
|
|
|
allocIdx: atomic.Uint64{},
|
|
|
|
transient: make(map[uint64]func()),
|
|
|
|
newTransients: make(chan func()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// alloc allocates space in the hook map for the supplied hook, the second
|
|
|
|
// argument determines whether it goes into the transient or persistent part
|
|
|
|
// of the hookMap.
|
|
|
|
func (m *hookMap) alloc(hook func()) uint64 {
|
|
|
|
// We assume we never overflow a uint64. Seems OK.
|
|
|
|
hookID := m.allocIdx.Add(1)
|
|
|
|
if hookID == 0 {
|
|
|
|
panic("hookMap allocIdx overflow")
|
|
|
|
}
|
|
|
|
m.transient[hookID] = hook
|
|
|
|
|
|
|
|
return hookID
|
|
|
|
}
|
|
|
|
|
|
|
|
// invoke is used on a hook map to call all the registered hooks and then clear
|
|
|
|
// out the transient hooks so they are not called again.
|
|
|
|
func (m *hookMap) invoke() {
|
|
|
|
for _, hook := range m.transient {
|
|
|
|
hook()
|
|
|
|
}
|
|
|
|
|
|
|
|
m.transient = make(map[uint64]func())
|
|
|
|
}
|
|
|
|
|
2019-02-20 12:11:15 +01:00
|
|
|
// hodlHtlc contains htlc data that is required for resolution.
|
|
|
|
type hodlHtlc struct {
|
2024-08-16 22:05:54 +02:00
|
|
|
add lnwire.UpdateAddHTLC
|
|
|
|
sourceRef channeldb.AddRef
|
2019-09-05 13:35:39 +02:00
|
|
|
obfuscator hop.ErrorEncrypter
|
2019-02-20 12:11:15 +01:00
|
|
|
}
|
|
|
|
|
2017-06-16 23:58:02 +02:00
|
|
|
// NewChannelLink creates a new instance of a ChannelLink given a configuration
|
|
|
|
// and active channel that will be used to verify/apply updates to.
|
2018-06-01 05:31:40 +02:00
|
|
|
func NewChannelLink(cfg ChannelLinkConfig,
|
|
|
|
channel *lnwallet.LightningChannel) ChannelLink {
|
2017-05-01 23:06:10 +02:00
|
|
|
|
2021-07-13 10:10:38 +02:00
|
|
|
logPrefix := fmt.Sprintf("ChannelLink(%v):", channel.ChannelPoint())
|
2019-10-01 11:06:56 +02:00
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
// If the max fee exposure isn't set, use the default.
|
|
|
|
if cfg.MaxFeeExposure == 0 {
|
|
|
|
cfg.MaxFeeExposure = DefaultMaxFeeExposure
|
|
|
|
}
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
return &channelLink{
|
2023-11-28 05:26:21 +01:00
|
|
|
cfg: cfg,
|
|
|
|
channel: channel,
|
|
|
|
hodlMap: make(map[models.CircuitKey]hodlHtlc),
|
|
|
|
hodlQueue: queue.NewConcurrentQueue(10),
|
|
|
|
log: build.NewPrefixLog(logPrefix, log),
|
|
|
|
flushHooks: newHookMap(),
|
|
|
|
outgoingCommitHooks: newHookMap(),
|
|
|
|
incomingCommitHooks: newHookMap(),
|
2024-10-17 13:38:34 +02:00
|
|
|
ContextGuard: fn.NewContextGuard(),
|
2017-05-01 23:06:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-03 16:07:55 +02:00
|
|
|
// A compile time check to ensure channelLink implements the ChannelLink
|
|
|
|
// interface.
|
|
|
|
var _ ChannelLink = (*channelLink)(nil)
|
|
|
|
|
2017-06-01 01:43:37 +02:00
|
|
|
// Start starts all helper goroutines required for the operation of the channel
|
|
|
|
// link.
|
|
|
|
//
|
2017-05-03 16:07:55 +02:00
|
|
|
// NOTE: Part of the ChannelLink interface.
|
|
|
|
func (l *channelLink) Start() error {
|
|
|
|
if !atomic.CompareAndSwapInt32(&l.started, 0, 1) {
|
2024-04-06 02:08:38 +02:00
|
|
|
err := fmt.Errorf("channel link(%v): already started", l)
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Warn("already started")
|
2017-07-09 01:30:20 +02:00
|
|
|
return err
|
2017-05-03 16:07:55 +02:00
|
|
|
}
|
|
|
|
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Info("starting")
|
2017-05-03 16:07:55 +02:00
|
|
|
|
2019-06-14 02:28:14 +02:00
|
|
|
// If the config supplied watchtower client, ensure the channel is
|
|
|
|
// registered before trying to use it during operation.
|
2020-11-26 00:06:46 +01:00
|
|
|
if l.cfg.TowerClient != nil {
|
2023-05-16 15:22:45 +02:00
|
|
|
err := l.cfg.TowerClient.RegisterChannel(
|
|
|
|
l.ChanID(), l.channel.State().ChanType,
|
|
|
|
)
|
2019-06-14 02:28:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
l.mailBox.ResetMessages()
|
2019-02-11 12:01:05 +01:00
|
|
|
l.hodlQueue.Start()
|
2017-09-23 00:54:10 +02:00
|
|
|
|
2018-05-04 05:11:46 +02:00
|
|
|
// Before launching the htlcManager messages, revert any circuits that
|
|
|
|
// were marked open in the switch's circuit map, but did not make it
|
|
|
|
// into a commitment txn. We use the next local htlc index as the cut
|
|
|
|
// off point, since all indexes below that are committed. This action
|
|
|
|
// is only performed if the link's final short channel ID has been
|
|
|
|
// assigned, otherwise we would try to trim the htlcs belonging to the
|
2019-08-30 23:11:38 +02:00
|
|
|
// all-zero, hop.Source ID.
|
|
|
|
if l.ShortChanID() != hop.Source {
|
2018-05-04 05:11:46 +02:00
|
|
|
localHtlcIndex, err := l.channel.NextLocalHtlcIndex()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to retrieve next local "+
|
|
|
|
"htlc index: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: This is automatically done by the switch when it
|
|
|
|
// starts up, but is necessary to prevent inconsistencies in
|
|
|
|
// the case that the link flaps. This is a result of a link's
|
|
|
|
// life-cycle being shorter than that of the switch.
|
|
|
|
chanID := l.ShortChanID()
|
|
|
|
err = l.cfg.Circuits.TrimOpenCircuits(chanID, localHtlcIndex)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to trim circuits above "+
|
|
|
|
"local htlc index %d: %v", localHtlcIndex, err)
|
|
|
|
}
|
2018-07-13 03:16:32 +02:00
|
|
|
|
|
|
|
// Since the link is live, before we start the link we'll update
|
|
|
|
// the ChainArbitrator with the set of new channel signals for
|
|
|
|
// this channel.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): split goroutines within channel arb to avoid
|
|
|
|
go func() {
|
|
|
|
signals := &contractcourt.ContractSignals{
|
|
|
|
ShortChanID: l.channel.ShortChanID(),
|
|
|
|
}
|
|
|
|
|
|
|
|
err := l.cfg.UpdateContractSignals(signals)
|
|
|
|
if err != nil {
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Errorf("unable to update signals")
|
2018-07-13 03:16:32 +02:00
|
|
|
}
|
|
|
|
}()
|
2018-05-04 05:11:46 +02:00
|
|
|
}
|
|
|
|
|
2018-05-10 23:40:29 +02:00
|
|
|
l.updateFeeTimer = time.NewTimer(l.randomFeeUpdateTimeout())
|
|
|
|
|
2024-10-17 13:38:34 +02:00
|
|
|
l.Wg.Add(1)
|
2017-06-16 23:58:02 +02:00
|
|
|
go l.htlcManager()
|
2017-05-03 16:07:55 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop gracefully stops all active helper goroutines, then waits until they've
|
|
|
|
// exited.
|
2017-06-01 01:43:37 +02:00
|
|
|
//
|
2017-05-03 16:07:55 +02:00
|
|
|
// NOTE: Part of the ChannelLink interface.
|
|
|
|
func (l *channelLink) Stop() {
|
|
|
|
if !atomic.CompareAndSwapInt32(&l.shutdown, 0, 1) {
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Warn("already stopped")
|
2017-05-03 16:07:55 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Info("stopping")
|
2017-05-03 16:07:55 +02:00
|
|
|
|
2019-12-20 11:25:07 +01:00
|
|
|
// As the link is stopping, we are no longer interested in htlc
|
|
|
|
// resolutions coming from the invoice registry.
|
2019-02-11 12:01:05 +01:00
|
|
|
l.cfg.Registry.HodlUnsubscribeAll(l.hodlQueue.ChanIn())
|
|
|
|
|
2018-01-18 23:15:42 +01:00
|
|
|
if l.cfg.ChainEvents.Cancel != nil {
|
|
|
|
l.cfg.ChainEvents.Cancel()
|
|
|
|
}
|
|
|
|
|
2020-12-01 23:20:31 +01:00
|
|
|
// Ensure the channel for the timer is drained.
|
2024-07-27 14:39:46 +02:00
|
|
|
if l.updateFeeTimer != nil {
|
|
|
|
if !l.updateFeeTimer.Stop() {
|
|
|
|
select {
|
|
|
|
case <-l.updateFeeTimer.C:
|
|
|
|
default:
|
|
|
|
}
|
2020-12-01 23:20:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-27 14:39:46 +02:00
|
|
|
if l.hodlQueue != nil {
|
|
|
|
l.hodlQueue.Stop()
|
|
|
|
}
|
2017-09-23 00:54:10 +02:00
|
|
|
|
2024-10-17 13:38:34 +02:00
|
|
|
close(l.Quit)
|
|
|
|
l.Wg.Wait()
|
2019-02-20 02:06:15 +01:00
|
|
|
|
2020-04-14 19:50:07 +02:00
|
|
|
// Now that the htlcManager has completely exited, reset the packet
|
|
|
|
// courier. This allows the mailbox to revaluate any lingering Adds that
|
|
|
|
// were delivered but didn't make it on a commitment to be failed back
|
|
|
|
// if the link is offline for an extended period of time. The error is
|
|
|
|
// ignored since it can only fail when the daemon is exiting.
|
|
|
|
_ = l.mailBox.ResetPackets()
|
|
|
|
|
2019-02-20 02:06:15 +01:00
|
|
|
// As a final precaution, we will attempt to flush any uncommitted
|
|
|
|
// preimages to the preimage cache. The preimages should be re-delivered
|
|
|
|
// after channel reestablishment, however this adds an extra layer of
|
|
|
|
// protection in case the peer never returns. Without this, we will be
|
|
|
|
// unable to settle any contracts depending on the preimages even though
|
|
|
|
// we had learned them at some point.
|
|
|
|
err := l.cfg.PreimageCache.AddPreimages(l.uncommittedPreimages...)
|
|
|
|
if err != nil {
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Errorf("unable to add preimages=%v to cache: %v",
|
2019-02-20 02:06:15 +01:00
|
|
|
l.uncommittedPreimages, err)
|
|
|
|
}
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
|
|
|
|
2018-06-27 03:05:39 +02:00
|
|
|
// WaitForShutdown blocks until the link finishes shutting down, which includes
|
|
|
|
// termination of all dependent goroutines.
|
|
|
|
func (l *channelLink) WaitForShutdown() {
|
2024-10-17 13:38:34 +02:00
|
|
|
l.Wg.Wait()
|
2018-06-27 03:05:39 +02:00
|
|
|
}
|
|
|
|
|
2017-12-06 02:48:28 +01:00
|
|
|
// EligibleToForward returns a bool indicating if the channel is able to
|
|
|
|
// actively accept requests to forward HTLC's. We're able to forward HTLC's if
|
2023-11-26 21:43:21 +01:00
|
|
|
// we are eligible to update AND the channel isn't currently flushing the
|
|
|
|
// outgoing half of the channel.
|
2017-12-06 02:48:28 +01:00
|
|
|
func (l *channelLink) EligibleToForward() bool {
|
2023-11-26 21:43:21 +01:00
|
|
|
return l.EligibleToUpdate() &&
|
|
|
|
!l.IsFlushing(Outgoing)
|
|
|
|
}
|
|
|
|
|
|
|
|
// EligibleToUpdate returns a bool indicating if the channel is able to update
|
|
|
|
// channel state. We're able to update channel state if we know the remote
|
|
|
|
// party's next revocation point. Otherwise, we can't initiate new channel
|
|
|
|
// state. We also require that the short channel ID not be the all-zero source
|
|
|
|
// ID, meaning that the channel has had its ID finalized.
|
|
|
|
func (l *channelLink) EligibleToUpdate() bool {
|
2018-05-02 01:29:47 +02:00
|
|
|
return l.channel.RemoteNextRevocation() != nil &&
|
2019-09-19 21:46:56 +02:00
|
|
|
l.ShortChanID() != hop.Source &&
|
|
|
|
l.isReestablished()
|
|
|
|
}
|
|
|
|
|
2023-11-21 22:20:53 +01:00
|
|
|
// EnableAdds sets the ChannelUpdateHandler state to allow UpdateAddHtlc's in
|
2024-02-07 11:39:37 +01:00
|
|
|
// the specified direction. It returns true if the state was changed and false
|
|
|
|
// if the desired state was already set before the method was called.
|
|
|
|
func (l *channelLink) EnableAdds(linkDirection LinkDirection) bool {
|
2023-11-28 02:32:03 +01:00
|
|
|
if linkDirection == Outgoing {
|
2024-02-07 11:39:37 +01:00
|
|
|
return l.isOutgoingAddBlocked.Swap(false)
|
2023-11-28 02:32:03 +01:00
|
|
|
}
|
|
|
|
|
2024-02-07 11:39:37 +01:00
|
|
|
return l.isIncomingAddBlocked.Swap(false)
|
2023-11-21 22:20:53 +01:00
|
|
|
}
|
|
|
|
|
2024-01-31 11:09:02 +01:00
|
|
|
// DisableAdds sets the ChannelUpdateHandler state to allow UpdateAddHtlc's in
|
2024-02-07 11:39:37 +01:00
|
|
|
// the specified direction. It returns true if the state was changed and false
|
|
|
|
// if the desired state was already set before the method was called.
|
|
|
|
func (l *channelLink) DisableAdds(linkDirection LinkDirection) bool {
|
2023-11-28 02:32:03 +01:00
|
|
|
if linkDirection == Outgoing {
|
2024-02-07 11:39:37 +01:00
|
|
|
return !l.isOutgoingAddBlocked.Swap(true)
|
2023-11-28 02:32:03 +01:00
|
|
|
}
|
|
|
|
|
2024-02-07 11:39:37 +01:00
|
|
|
return !l.isIncomingAddBlocked.Swap(true)
|
2023-11-21 22:20:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsFlushing returns true when UpdateAddHtlc's are disabled in the direction of
|
|
|
|
// the argument.
|
2023-11-28 02:32:03 +01:00
|
|
|
func (l *channelLink) IsFlushing(linkDirection LinkDirection) bool {
|
|
|
|
if linkDirection == Outgoing {
|
|
|
|
return l.isOutgoingAddBlocked.Load()
|
|
|
|
}
|
|
|
|
|
|
|
|
return l.isIncomingAddBlocked.Load()
|
2023-11-21 22:20:53 +01:00
|
|
|
}
|
|
|
|
|
2023-11-28 05:26:21 +01:00
|
|
|
// OnFlushedOnce adds a hook that will be called the next time the channel
|
|
|
|
// state reaches zero htlcs. This hook will only ever be called once. If the
|
|
|
|
// channel state already has zero htlcs, then this will be called immediately.
|
|
|
|
func (l *channelLink) OnFlushedOnce(hook func()) {
|
|
|
|
select {
|
|
|
|
case l.flushHooks.newTransients <- hook:
|
2024-10-17 13:38:34 +02:00
|
|
|
case <-l.Quit:
|
2023-11-28 05:26:21 +01:00
|
|
|
}
|
2023-11-21 22:20:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// OnCommitOnce adds a hook that will be called the next time a CommitSig
|
|
|
|
// message is sent in the argument's LinkDirection. This hook will only ever be
|
|
|
|
// called once. If no CommitSig is owed in the argument's LinkDirection, then
|
2023-11-28 05:26:21 +01:00
|
|
|
// we will call this hook be run immediately.
|
|
|
|
func (l *channelLink) OnCommitOnce(direction LinkDirection, hook func()) {
|
|
|
|
var queue chan func()
|
|
|
|
|
|
|
|
if direction == Outgoing {
|
|
|
|
queue = l.outgoingCommitHooks.newTransients
|
|
|
|
} else {
|
|
|
|
queue = l.incomingCommitHooks.newTransients
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case queue <- hook:
|
2024-10-17 13:38:34 +02:00
|
|
|
case <-l.Quit:
|
2023-11-28 05:26:21 +01:00
|
|
|
}
|
2023-11-21 22:20:53 +01:00
|
|
|
}
|
|
|
|
|
2019-09-19 21:46:56 +02:00
|
|
|
// isReestablished returns true if the link has successfully completed the
|
|
|
|
// channel reestablishment dance.
|
|
|
|
func (l *channelLink) isReestablished() bool {
|
|
|
|
return atomic.LoadInt32(&l.reestablished) == 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// markReestablished signals that the remote peer has successfully exchanged
|
|
|
|
// channel reestablish messages and that the channel is ready to process
|
|
|
|
// subsequent messages.
|
|
|
|
func (l *channelLink) markReestablished() {
|
|
|
|
atomic.StoreInt32(&l.reestablished, 1)
|
2017-12-06 02:48:28 +01:00
|
|
|
}
|
|
|
|
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
// IsUnadvertised returns true if the underlying channel is unadvertised.
|
|
|
|
func (l *channelLink) IsUnadvertised() bool {
|
|
|
|
state := l.channel.State()
|
|
|
|
return state.ChannelFlags&lnwire.FFAnnounceChannel == 0
|
|
|
|
}
|
|
|
|
|
2017-11-24 05:31:45 +01:00
|
|
|
// sampleNetworkFee samples the current fee rate on the network to get into the
|
|
|
|
// chain in a timely manner. The returned value is expressed in fee-per-kw, as
|
|
|
|
// this is the native rate used when computing the fee for commitment
|
|
|
|
// transactions, and the second-level HTLC transactions.
|
2019-10-31 03:43:05 +01:00
|
|
|
func (l *channelLink) sampleNetworkFee() (chainfee.SatPerKWeight, error) {
|
2018-07-28 03:20:58 +02:00
|
|
|
// We'll first query for the sat/kw recommended to be confirmed within 3
|
|
|
|
// blocks.
|
|
|
|
feePerKw, err := l.cfg.FeeEstimator.EstimateFeePerKW(3)
|
2017-11-24 05:31:45 +01:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Debugf("sampled fee rate for 3 block conf: %v sat/kw",
|
|
|
|
int64(feePerKw))
|
2017-11-24 05:31:45 +01:00
|
|
|
|
|
|
|
return feePerKw, nil
|
|
|
|
}
|
|
|
|
|
2017-11-24 05:21:46 +01:00
|
|
|
// shouldAdjustCommitFee returns true if we should update our commitment fee to
|
|
|
|
// match that of the network fee. We'll only update our commitment fee if the
|
2021-06-23 14:28:25 +02:00
|
|
|
// network fee is +/- 10% to our commitment fee or if our current commitment
|
|
|
|
// fee is below the minimum relay fee.
|
|
|
|
func shouldAdjustCommitFee(netFee, chanFee,
|
|
|
|
minRelayFee chainfee.SatPerKWeight) bool {
|
|
|
|
|
2017-11-24 05:21:46 +01:00
|
|
|
switch {
|
2021-06-23 14:28:25 +02:00
|
|
|
// If the network fee is greater than our current commitment fee and
|
|
|
|
// our current commitment fee is below the minimum relay fee then
|
|
|
|
// we should switch to it no matter if it is less than a 10% increase.
|
|
|
|
case netFee > chanFee && chanFee < minRelayFee:
|
|
|
|
return true
|
|
|
|
|
2017-11-24 05:21:46 +01:00
|
|
|
// If the network fee is greater than the commitment fee, then we'll
|
|
|
|
// switch to it if it's at least 10% greater than the commit fee.
|
|
|
|
case netFee > chanFee && netFee >= (chanFee+(chanFee*10)/100):
|
|
|
|
return true
|
|
|
|
|
|
|
|
// If the network fee is less than our commitment fee, then we'll
|
|
|
|
// switch to it if it's at least 10% less than the commitment fee.
|
|
|
|
case netFee < chanFee && netFee <= (chanFee-(chanFee*10)/100):
|
|
|
|
return true
|
|
|
|
|
|
|
|
// Otherwise, we won't modify our fee.
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
// failCb is used to cut down on the argument verbosity.
|
2024-08-21 08:39:37 +02:00
|
|
|
type failCb func(update *lnwire.ChannelUpdate1) lnwire.FailureMessage
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
|
|
|
|
// createFailureWithUpdate creates a ChannelUpdate when failing an incoming or
|
|
|
|
// outgoing HTLC. It may return a FailureMessage that references a channel's
|
|
|
|
// alias. If the channel does not have an alias, then the regular channel
|
|
|
|
// update from disk will be returned.
|
|
|
|
func (l *channelLink) createFailureWithUpdate(incoming bool,
|
|
|
|
outgoingScid lnwire.ShortChannelID, cb failCb) lnwire.FailureMessage {
|
|
|
|
|
|
|
|
// Determine which SCID to use in case we need to use aliases in the
|
|
|
|
// ChannelUpdate.
|
|
|
|
scid := outgoingScid
|
|
|
|
if incoming {
|
|
|
|
scid = l.ShortChanID()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try using the FailAliasUpdate function. If it returns nil, fallback
|
|
|
|
// to the non-alias behavior.
|
|
|
|
update := l.cfg.FailAliasUpdate(scid, incoming)
|
|
|
|
if update == nil {
|
|
|
|
// Fallback to the non-alias behavior.
|
|
|
|
var err error
|
|
|
|
update, err = l.cfg.FetchLastChannelUpdate(l.ShortChanID())
|
|
|
|
if err != nil {
|
|
|
|
return &lnwire.FailTemporaryNodeFailure{}
|
|
|
|
}
|
2019-09-27 16:01:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return cb(update)
|
|
|
|
}
|
|
|
|
|
2018-01-17 05:15:51 +01:00
|
|
|
// syncChanState attempts to synchronize channel states with the remote party.
|
|
|
|
// This method is to be called upon reconnection after the initial funding
|
|
|
|
// flow. We'll compare out commitment chains with the remote party, and re-send
|
|
|
|
// either a danging commit signature, a revocation, or both.
|
|
|
|
func (l *channelLink) syncChanStates() error {
|
2023-11-22 16:12:49 +01:00
|
|
|
chanState := l.channel.State()
|
|
|
|
|
|
|
|
l.log.Infof("Attempting to re-synchronize channel: %v", chanState)
|
2018-01-17 05:15:51 +01:00
|
|
|
|
|
|
|
// First, we'll generate our ChanSync message to send to the other
|
|
|
|
// side. Based on this message, the remote party will decide if they
|
|
|
|
// need to retransmit any data or not.
|
2019-09-11 11:15:57 +02:00
|
|
|
localChanSyncMsg, err := chanState.ChanSyncMsg()
|
2018-01-17 05:15:51 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to generate chan sync message for "+
|
|
|
|
"ChannelPoint(%v)", l.channel.ChannelPoint())
|
|
|
|
}
|
2019-04-10 04:48:34 +02:00
|
|
|
if err := l.cfg.Peer.SendMessage(true, localChanSyncMsg); err != nil {
|
2020-04-14 19:56:05 +02:00
|
|
|
return fmt.Errorf("unable to send chan sync message for "+
|
2020-03-05 15:57:28 +01:00
|
|
|
"ChannelPoint(%v): %v", l.channel.ChannelPoint(), err)
|
2018-01-17 05:15:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
var msgsToReSend []lnwire.Message
|
|
|
|
|
2019-09-19 21:47:08 +02:00
|
|
|
// Next, we'll wait indefinitely to receive the ChanSync message. The
|
|
|
|
// first message sent MUST be the ChanSync message.
|
2018-01-17 05:15:51 +01:00
|
|
|
select {
|
|
|
|
case msg := <-l.upstream:
|
2023-03-20 09:35:09 +01:00
|
|
|
l.log.Tracef("Received msg=%v from peer(%x)", msg.MsgType(),
|
|
|
|
l.cfg.Peer.PubKey())
|
|
|
|
|
2018-01-17 05:15:51 +01:00
|
|
|
remoteChanSyncMsg, ok := msg.(*lnwire.ChannelReestablish)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("first message sent to sync "+
|
|
|
|
"should be ChannelReestablish, instead "+
|
|
|
|
"received: %T", msg)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the remote party indicates that they think we haven't
|
|
|
|
// done any state updates yet, then we'll retransmit the
|
2023-04-27 20:02:34 +02:00
|
|
|
// channel_ready message first. We do this, as at this point
|
2018-01-17 05:15:51 +01:00
|
|
|
// we can't be sure if they've really received the
|
2023-03-15 22:45:14 +01:00
|
|
|
// ChannelReady message.
|
2018-01-17 05:15:51 +01:00
|
|
|
if remoteChanSyncMsg.NextLocalCommitHeight == 1 &&
|
|
|
|
localChanSyncMsg.NextLocalCommitHeight == 1 &&
|
|
|
|
!l.channel.IsPending() {
|
|
|
|
|
2023-03-15 22:36:58 +01:00
|
|
|
l.log.Infof("resending ChannelReady message to peer")
|
2018-01-17 05:15:51 +01:00
|
|
|
|
|
|
|
nextRevocation, err := l.channel.NextRevocationKey()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to create next "+
|
|
|
|
"revocation: %v", err)
|
|
|
|
}
|
|
|
|
|
2023-03-15 22:00:17 +01:00
|
|
|
channelReadyMsg := lnwire.NewChannelReady(
|
2018-01-17 05:15:51 +01:00
|
|
|
l.ChanID(), nextRevocation,
|
|
|
|
)
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
|
2023-07-12 04:04:31 +02:00
|
|
|
// If this is a taproot channel, then we'll send the
|
|
|
|
// very same nonce that we sent above, as they should
|
|
|
|
// take the latest verification nonce we send.
|
|
|
|
if chanState.ChanType.IsTaproot() {
|
2023-01-20 05:26:05 +01:00
|
|
|
//nolint:lll
|
|
|
|
channelReadyMsg.NextLocalNonce = localChanSyncMsg.LocalNonce
|
2023-07-12 04:04:31 +02:00
|
|
|
}
|
|
|
|
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
// For channels that negotiated the option-scid-alias
|
|
|
|
// feature bit, ensure that we send over the alias in
|
2023-03-15 22:36:58 +01:00
|
|
|
// the channel_ready message. We'll send the first
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
// alias we find for the channel since it does not
|
|
|
|
// matter which alias we send. We'll error out if no
|
|
|
|
// aliases are found.
|
|
|
|
if l.negotiatedAliasFeature() {
|
|
|
|
aliases := l.getAliases()
|
|
|
|
if len(aliases) == 0 {
|
|
|
|
// This shouldn't happen since we
|
|
|
|
// always add at least one alias before
|
|
|
|
// the channel reaches the link.
|
|
|
|
return fmt.Errorf("no aliases found")
|
|
|
|
}
|
|
|
|
|
|
|
|
// getAliases returns a copy of the alias slice
|
|
|
|
// so it is ok to use a pointer to the first
|
|
|
|
// entry.
|
2023-03-15 22:00:17 +01:00
|
|
|
channelReadyMsg.AliasScid = &aliases[0]
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
}
|
|
|
|
|
2023-03-15 22:00:17 +01:00
|
|
|
err = l.cfg.Peer.SendMessage(false, channelReadyMsg)
|
2018-01-17 05:15:51 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to re-send "+
|
2023-03-15 22:36:58 +01:00
|
|
|
"ChannelReady: %v", err)
|
2018-01-17 05:15:51 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// In any case, we'll then process their ChanSync message.
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Info("received re-establishment message from remote side")
|
2018-01-17 05:15:51 +01:00
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
var (
|
|
|
|
openedCircuits []CircuitKey
|
|
|
|
closedCircuits []CircuitKey
|
|
|
|
)
|
|
|
|
|
2018-03-13 02:52:52 +01:00
|
|
|
// We've just received a ChanSync message from the remote
|
|
|
|
// party, so we'll process the message in order to determine
|
|
|
|
// if we need to re-transmit any messages to the remote party.
|
2024-10-17 13:38:34 +02:00
|
|
|
ctx, cancel := l.WithCtxQuitNoTimeout()
|
|
|
|
defer cancel()
|
2017-11-27 08:20:17 +01:00
|
|
|
msgsToReSend, openedCircuits, closedCircuits, err =
|
2024-10-17 13:38:34 +02:00
|
|
|
l.channel.ProcessChanSyncMsg(ctx, remoteChanSyncMsg)
|
2018-01-17 05:15:51 +01:00
|
|
|
if err != nil {
|
2018-07-12 11:02:54 +02:00
|
|
|
return err
|
2018-01-17 05:15:51 +01:00
|
|
|
}
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
// Repopulate any identifiers for circuits that may have been
|
2018-03-13 02:52:52 +01:00
|
|
|
// opened or unclosed. This may happen if we needed to
|
|
|
|
// retransmit a commitment signature message.
|
2017-11-27 08:20:17 +01:00
|
|
|
l.openedCircuits = openedCircuits
|
|
|
|
l.closedCircuits = closedCircuits
|
|
|
|
|
|
|
|
// Ensure that all packets have been have been removed from the
|
|
|
|
// link's mailbox.
|
2018-08-19 04:35:20 +02:00
|
|
|
if err := l.ackDownStreamPackets(); err != nil {
|
2017-11-27 08:20:17 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-17 05:15:51 +01:00
|
|
|
if len(msgsToReSend) > 0 {
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Infof("sending %v updates to synchronize the "+
|
2019-10-01 11:16:24 +02:00
|
|
|
"state", len(msgsToReSend))
|
2018-01-17 05:15:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we have any messages to retransmit, we'll do so
|
|
|
|
// immediately so we return to a synchronized state as soon as
|
|
|
|
// possible.
|
|
|
|
for _, msg := range msgsToReSend {
|
2018-06-08 05:17:15 +02:00
|
|
|
l.cfg.Peer.SendMessage(false, msg)
|
2018-01-17 05:15:51 +01:00
|
|
|
}
|
|
|
|
|
2024-10-17 13:38:34 +02:00
|
|
|
case <-l.Quit:
|
2018-05-02 01:29:47 +02:00
|
|
|
return ErrLinkShuttingDown
|
2018-01-17 05:15:51 +01:00
|
|
|
}
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
return nil
|
|
|
|
}
|
2018-01-17 05:15:51 +01:00
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
// resolveFwdPkgs loads any forwarding packages for this link from disk, and
|
2018-03-13 02:52:52 +01:00
|
|
|
// reprocesses them in order. The primary goal is to make sure that any HTLCs
|
|
|
|
// we previously received are reinstated in memory, and forwarded to the switch
|
|
|
|
// if necessary. After a restart, this will also delete any previously
|
|
|
|
// completed packages.
|
2017-11-27 08:20:17 +01:00
|
|
|
func (l *channelLink) resolveFwdPkgs() error {
|
|
|
|
fwdPkgs, err := l.channel.LoadFwdPkgs()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-01-17 05:15:51 +01:00
|
|
|
}
|
|
|
|
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Debugf("loaded %d fwd pks", len(fwdPkgs))
|
2018-01-17 05:18:35 +01:00
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
for _, fwdPkg := range fwdPkgs {
|
2019-04-10 13:10:25 +02:00
|
|
|
if err := l.resolveFwdPkg(fwdPkg); err != nil {
|
2018-01-17 05:18:35 +01:00
|
|
|
return err
|
|
|
|
}
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// If any of our reprocessing steps require an update to the commitment
|
|
|
|
// txn, we initiate a state transition to capture all relevant changes.
|
2023-12-12 04:18:57 +01:00
|
|
|
if l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote) > 0 {
|
2017-11-27 08:20:17 +01:00
|
|
|
return l.updateCommitTx()
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// resolveFwdPkg interprets the FwdState of the provided package, either
|
|
|
|
// reprocesses any outstanding htlcs in the package, or performs garbage
|
|
|
|
// collection on the package.
|
2019-04-10 13:10:25 +02:00
|
|
|
func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) error {
|
2017-11-27 08:20:17 +01:00
|
|
|
// Remove any completed packages to clear up space.
|
|
|
|
if fwdPkg.State == channeldb.FwdStateCompleted {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Debugf("removing completed fwd pkg for height=%d",
|
2017-11-27 08:20:17 +01:00
|
|
|
fwdPkg.Height)
|
|
|
|
|
2020-08-19 16:52:44 +02:00
|
|
|
err := l.channel.RemoveFwdPkgs(fwdPkg.Height)
|
2018-01-17 05:18:35 +01:00
|
|
|
if err != nil {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Errorf("unable to remove fwd pkg for height=%d: "+
|
|
|
|
"%v", fwdPkg.Height, err)
|
2019-04-10 13:10:25 +02:00
|
|
|
return err
|
2018-01-17 05:18:35 +01:00
|
|
|
}
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
2018-01-17 05:18:35 +01:00
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
// Otherwise this is either a new package or one has gone through
|
2018-03-13 02:52:52 +01:00
|
|
|
// processing, but contains htlcs that need to be restored in memory.
|
|
|
|
// We replay this forwarding package to make sure our local mem state
|
|
|
|
// is resurrected, we mimic any original responses back to the remote
|
|
|
|
// party, and re-forward the relevant HTLCs to the switch.
|
2017-11-27 08:20:17 +01:00
|
|
|
|
|
|
|
// If the package is fully acked but not completed, it must still have
|
|
|
|
// settles and fails to propagate.
|
|
|
|
if !fwdPkg.SettleFailFilter.IsFull() {
|
2024-08-16 22:28:44 +02:00
|
|
|
l.processRemoteSettleFails(fwdPkg)
|
2018-01-17 05:18:35 +01:00
|
|
|
}
|
|
|
|
|
2018-03-13 02:52:52 +01:00
|
|
|
// Finally, replay *ALL ADDS* in this forwarding package. The
|
|
|
|
// downstream logic is able to filter out any duplicates, but we must
|
|
|
|
// shove the entire, original set of adds down the pipeline so that the
|
|
|
|
// batch of adds presented to the sphinx router does not ever change.
|
2017-11-27 08:20:17 +01:00
|
|
|
if !fwdPkg.AckFilter.IsFull() {
|
2024-08-16 22:19:36 +02:00
|
|
|
l.processRemoteAdds(fwdPkg)
|
2018-05-23 15:14:46 +02:00
|
|
|
|
|
|
|
// If the link failed during processing the adds, we must
|
|
|
|
// return to ensure we won't attempted to update the state
|
|
|
|
// further.
|
|
|
|
if l.failed {
|
2019-04-10 13:10:25 +02:00
|
|
|
return fmt.Errorf("link failed while " +
|
2018-05-23 15:14:46 +02:00
|
|
|
"processing remote adds")
|
|
|
|
}
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
|
2019-04-10 13:10:25 +02:00
|
|
|
return nil
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// fwdPkgGarbager periodically reads all forwarding packages from disk and
|
|
|
|
// removes those that can be discarded. It is safe to do this entirely in the
|
|
|
|
// background, since all state is coordinated on disk. This also ensures the
|
|
|
|
// link can continue to process messages and interleave database accesses.
|
|
|
|
//
|
|
|
|
// NOTE: This MUST be run as a goroutine.
|
|
|
|
func (l *channelLink) fwdPkgGarbager() {
|
2024-10-17 13:38:34 +02:00
|
|
|
defer l.Wg.Done()
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2018-08-01 21:42:38 +02:00
|
|
|
l.cfg.FwdPkgGCTicker.Resume()
|
2017-11-27 08:20:17 +01:00
|
|
|
defer l.cfg.FwdPkgGCTicker.Stop()
|
|
|
|
|
2020-08-19 17:09:48 +02:00
|
|
|
if err := l.loadAndRemove(); err != nil {
|
|
|
|
l.log.Warnf("unable to run initial fwd pkgs gc: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
for {
|
|
|
|
select {
|
2018-08-01 21:42:38 +02:00
|
|
|
case <-l.cfg.FwdPkgGCTicker.Ticks():
|
2020-08-19 17:09:48 +02:00
|
|
|
if err := l.loadAndRemove(); err != nil {
|
|
|
|
l.log.Warnf("unable to remove fwd pkgs: %v",
|
2019-10-01 11:06:56 +02:00
|
|
|
err)
|
2017-11-27 08:20:17 +01:00
|
|
|
continue
|
|
|
|
}
|
2024-10-17 13:38:34 +02:00
|
|
|
case <-l.Quit:
|
2017-11-27 08:20:17 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2018-01-17 05:18:35 +01:00
|
|
|
}
|
|
|
|
|
2020-08-19 17:09:48 +02:00
|
|
|
// loadAndRemove loads all the channels forwarding packages and determines if
|
|
|
|
// they can be removed. It is called once before the FwdPkgGCTicker ticks so that
|
|
|
|
// a longer tick interval can be used.
|
|
|
|
func (l *channelLink) loadAndRemove() error {
|
|
|
|
fwdPkgs, err := l.channel.LoadFwdPkgs()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var removeHeights []uint64
|
|
|
|
for _, fwdPkg := range fwdPkgs {
|
|
|
|
if fwdPkg.State != channeldb.FwdStateCompleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
removeHeights = append(removeHeights, fwdPkg.Height)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If removeHeights is empty, return early so we don't use a db
|
|
|
|
// transaction.
|
|
|
|
if len(removeHeights) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return l.channel.RemoveFwdPkgs(removeHeights...)
|
|
|
|
}
|
|
|
|
|
2024-04-06 01:48:27 +02:00
|
|
|
// handleChanSyncErr performs the error handling logic in the case where we
|
|
|
|
// could not successfully syncChanStates with our channel peer.
|
|
|
|
func (l *channelLink) handleChanSyncErr(err error) {
|
|
|
|
l.log.Warnf("error when syncing channel states: %v", err)
|
|
|
|
|
|
|
|
var errDataLoss *lnwallet.ErrCommitSyncLocalDataLoss
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case errors.Is(err, ErrLinkShuttingDown):
|
|
|
|
l.log.Debugf("unable to sync channel states, link is " +
|
|
|
|
"shutting down")
|
|
|
|
return
|
|
|
|
|
|
|
|
// We failed syncing the commit chains, probably because the remote has
|
|
|
|
// lost state. We should force close the channel.
|
|
|
|
case errors.Is(err, lnwallet.ErrCommitSyncRemoteDataLoss):
|
|
|
|
fallthrough
|
|
|
|
|
|
|
|
// The remote sent us an invalid last commit secret, we should force
|
|
|
|
// close the channel.
|
|
|
|
// TODO(halseth): and permanently ban the peer?
|
|
|
|
case errors.Is(err, lnwallet.ErrInvalidLastCommitSecret):
|
|
|
|
fallthrough
|
|
|
|
|
|
|
|
// The remote sent us a commit point different from what they sent us
|
|
|
|
// before.
|
|
|
|
// TODO(halseth): ban peer?
|
|
|
|
case errors.Is(err, lnwallet.ErrInvalidLocalUnrevokedCommitPoint):
|
|
|
|
// We'll fail the link and tell the peer to force close the
|
|
|
|
// channel. Note that the database state is not updated here,
|
|
|
|
// but will be updated when the close transaction is ready to
|
|
|
|
// avoid that we go down before storing the transaction in the
|
|
|
|
// db.
|
|
|
|
l.failf(
|
|
|
|
LinkFailureError{
|
|
|
|
code: ErrSyncError,
|
|
|
|
FailureAction: LinkFailureForceClose,
|
|
|
|
},
|
|
|
|
"unable to synchronize channel states: %v", err,
|
|
|
|
)
|
|
|
|
|
|
|
|
// We have lost state and cannot safely force close the channel. Fail
|
|
|
|
// the channel and wait for the remote to hopefully force close it. The
|
|
|
|
// remote has sent us its latest unrevoked commitment point, and we'll
|
|
|
|
// store it in the database, such that we can attempt to recover the
|
|
|
|
// funds if the remote force closes the channel.
|
|
|
|
case errors.As(err, &errDataLoss):
|
|
|
|
err := l.channel.MarkDataLoss(
|
|
|
|
errDataLoss.CommitPoint,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
l.log.Errorf("unable to mark channel data loss: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We determined the commit chains were not possible to sync. We
|
|
|
|
// cautiously fail the channel, but don't force close.
|
|
|
|
// TODO(halseth): can we safely force close in any cases where this
|
|
|
|
// error is returned?
|
|
|
|
case errors.Is(err, lnwallet.ErrCannotSyncCommitChains):
|
|
|
|
if err := l.channel.MarkBorked(); err != nil {
|
|
|
|
l.log.Errorf("unable to mark channel borked: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Other, unspecified error.
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
l.failf(
|
|
|
|
LinkFailureError{
|
|
|
|
code: ErrRecoveryError,
|
|
|
|
FailureAction: LinkFailureForceNone,
|
|
|
|
},
|
|
|
|
"unable to synchronize channel states: %v", err,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2017-06-16 23:58:02 +02:00
|
|
|
// htlcManager is the primary goroutine which drives a channel's commitment
|
2017-05-01 19:03:41 +02:00
|
|
|
// update state-machine in response to messages received via several channels.
|
2017-05-03 23:03:47 +02:00
|
|
|
// This goroutine reads messages from the upstream (remote) peer, and also from
|
|
|
|
// downstream channel managed by the channel link. In the event that an htlc
|
|
|
|
// needs to be forwarded, then send-only forward handler is used which sends
|
2022-04-17 00:53:32 +02:00
|
|
|
// htlc packets to the switch. Additionally, this goroutine handles acting upon
|
|
|
|
// all timeouts for any active HTLCs, manages the channel's revocation window,
|
|
|
|
// and also the htlc trickle queue+timer for this active channels.
|
2017-06-01 01:43:37 +02:00
|
|
|
//
|
|
|
|
// NOTE: This MUST be run as a goroutine.
|
2017-06-16 23:58:02 +02:00
|
|
|
func (l *channelLink) htlcManager() {
|
2017-11-11 04:37:47 +01:00
|
|
|
defer func() {
|
2018-07-31 07:25:38 +02:00
|
|
|
l.cfg.BatchTicker.Stop()
|
2024-10-17 13:38:34 +02:00
|
|
|
l.Wg.Done()
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Infof("exited")
|
2017-11-11 04:37:47 +01:00
|
|
|
}()
|
2017-05-03 23:03:47 +02:00
|
|
|
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Infof("HTLC manager started, bandwidth=%v", l.Bandwidth())
|
2017-05-01 19:03:41 +02:00
|
|
|
|
2020-03-07 04:42:58 +01:00
|
|
|
// Notify any clients that the link is now in the switch via an
|
2022-11-23 23:58:33 +01:00
|
|
|
// ActiveLinkEvent. We'll also defer an inactive link notification for
|
|
|
|
// when the link exits to ensure that every active notification is
|
|
|
|
// matched by an inactive one.
|
2024-01-29 22:55:20 +01:00
|
|
|
l.cfg.NotifyActiveLink(l.ChannelPoint())
|
|
|
|
defer l.cfg.NotifyInactiveLinkEvent(l.ChannelPoint())
|
2020-03-07 04:42:58 +01:00
|
|
|
|
2017-11-10 23:57:59 +01:00
|
|
|
// TODO(roasbeef): need to call wipe chan whenever D/C?
|
|
|
|
|
|
|
|
// If this isn't the first time that this channel link has been
|
|
|
|
// created, then we'll need to check to see if we need to
|
2018-01-17 05:15:51 +01:00
|
|
|
// re-synchronize state with the remote peer. settledHtlcs is a map of
|
|
|
|
// HTLC's that we re-settled as part of the channel state sync.
|
2017-07-09 01:30:20 +02:00
|
|
|
if l.cfg.SyncStates {
|
2018-05-02 01:29:47 +02:00
|
|
|
err := l.syncChanStates()
|
|
|
|
if err != nil {
|
2024-04-06 01:48:27 +02:00
|
|
|
l.handleChanSyncErr(err)
|
2017-07-09 01:30:20 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-06 15:14:36 +01:00
|
|
|
// If a shutdown message has previously been sent on this link, then we
|
|
|
|
// need to make sure that we have disabled any HTLC adds on the outgoing
|
|
|
|
// direction of the link and that we re-resend the same shutdown message
|
|
|
|
// that we previously sent.
|
|
|
|
l.cfg.PreviouslySentShutdown.WhenSome(func(shutdown lnwire.Shutdown) {
|
|
|
|
// Immediately disallow any new outgoing HTLCs.
|
|
|
|
if !l.DisableAdds(Outgoing) {
|
|
|
|
l.log.Warnf("Outgoing link adds already disabled")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-send the shutdown message the peer. Since syncChanStates
|
|
|
|
// would have sent any outstanding CommitSig, it is fine for us
|
|
|
|
// to immediately queue the shutdown message now.
|
|
|
|
err := l.cfg.Peer.SendMessage(false, &shutdown)
|
|
|
|
if err != nil {
|
|
|
|
l.log.Warnf("Error sending shutdown message: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2019-09-19 21:46:56 +02:00
|
|
|
// We've successfully reestablished the channel, mark it as such to
|
|
|
|
// allow the switch to forward HTLCs in the outbound direction.
|
|
|
|
l.markReestablished()
|
|
|
|
|
2023-04-27 20:02:34 +02:00
|
|
|
// Now that we've received both channel_ready and channel reestablish,
|
2019-09-19 21:46:56 +02:00
|
|
|
// we can go ahead and send the active channel notification. We'll also
|
|
|
|
// defer the inactive notification for when the link exits to ensure
|
|
|
|
// that every active notification is matched by an inactive one.
|
2024-01-29 22:55:20 +01:00
|
|
|
l.cfg.NotifyActiveChannel(l.ChannelPoint())
|
|
|
|
defer l.cfg.NotifyInactiveChannel(l.ChannelPoint())
|
2019-09-19 21:46:56 +02:00
|
|
|
|
2018-03-13 02:52:52 +01:00
|
|
|
// With the channel states synced, we now reset the mailbox to ensure
|
|
|
|
// we start processing all unacked packets in order. This is done here
|
|
|
|
// to ensure that all acknowledgments that occur during channel
|
2017-11-27 08:20:17 +01:00
|
|
|
// resynchronization have taken affect, causing us only to pull unacked
|
|
|
|
// packets after starting to read from the downstream mailbox.
|
|
|
|
l.mailBox.ResetPackets()
|
|
|
|
|
|
|
|
// After cleaning up any memory pertaining to incoming packets, we now
|
|
|
|
// replay our forwarding packages to handle any htlcs that can be
|
2018-08-22 03:47:52 +02:00
|
|
|
// processed locally, or need to be forwarded out to the switch. We will
|
|
|
|
// only attempt to resolve packages if our short chan id indicates that
|
|
|
|
// the channel is not pending, otherwise we should have no htlcs to
|
|
|
|
// reforward.
|
2019-08-30 23:11:38 +02:00
|
|
|
if l.ShortChanID() != hop.Source {
|
2022-05-09 20:33:45 +02:00
|
|
|
err := l.resolveFwdPkgs()
|
|
|
|
switch err {
|
|
|
|
// No error was encountered, success.
|
|
|
|
case nil:
|
|
|
|
|
|
|
|
// If the duplicate keystone error was encountered, we'll fail
|
|
|
|
// without sending an Error message to the peer.
|
|
|
|
case ErrDuplicateKeystone:
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrCircuitError},
|
2022-05-09 20:33:45 +02:00
|
|
|
"temporary circuit error: %v", err)
|
|
|
|
return
|
|
|
|
|
|
|
|
// A non-nil error was encountered, send an Error message to
|
|
|
|
// the peer.
|
|
|
|
default:
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInternalError},
|
2018-08-22 03:47:52 +02:00
|
|
|
"unable to resolve fwd pkgs: %v", err)
|
|
|
|
return
|
|
|
|
}
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2018-08-22 03:47:52 +02:00
|
|
|
// With our link's in-memory state fully reconstructed, spawn a
|
|
|
|
// goroutine to manage the reclamation of disk space occupied by
|
|
|
|
// completed forwarding packages.
|
2024-10-17 13:38:34 +02:00
|
|
|
l.Wg.Add(1)
|
2018-08-22 03:47:52 +02:00
|
|
|
go l.fwdPkgGarbager()
|
|
|
|
}
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
for {
|
2018-05-23 15:14:46 +02:00
|
|
|
// We must always check if we failed at some point processing
|
|
|
|
// the last update before processing the next.
|
|
|
|
if l.failed {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Errorf("link failed, exiting htlcManager")
|
2020-04-14 19:50:45 +02:00
|
|
|
return
|
2018-05-23 15:14:46 +02:00
|
|
|
}
|
|
|
|
|
2019-09-24 11:49:32 +02:00
|
|
|
// If the previous event resulted in a non-empty batch, resume
|
|
|
|
// the batch ticker so that it can be cleared. Otherwise pause
|
|
|
|
// the ticker to prevent waking up the htlcManager while the
|
|
|
|
// batch is empty.
|
2023-12-12 04:18:57 +01:00
|
|
|
numUpdates := l.channel.NumPendingUpdates(
|
|
|
|
lntypes.Local, lntypes.Remote,
|
|
|
|
)
|
|
|
|
if numUpdates > 0 {
|
2019-04-09 16:22:50 +02:00
|
|
|
l.cfg.BatchTicker.Resume()
|
2022-01-21 00:35:56 +01:00
|
|
|
l.log.Tracef("BatchTicker resumed, "+
|
2023-12-12 04:18:57 +01:00
|
|
|
"NumPendingUpdates(Local, Remote)=%d",
|
|
|
|
numUpdates,
|
|
|
|
)
|
2019-09-24 11:49:32 +02:00
|
|
|
} else {
|
|
|
|
l.cfg.BatchTicker.Pause()
|
2022-01-21 00:35:56 +01:00
|
|
|
l.log.Trace("BatchTicker paused due to zero " +
|
2023-12-12 04:18:57 +01:00
|
|
|
"NumPendingUpdates(Local, Remote)")
|
2019-04-09 16:22:50 +02:00
|
|
|
}
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
select {
|
2023-11-28 05:26:21 +01:00
|
|
|
// We have a new hook that needs to be run when we reach a clean
|
|
|
|
// channel state.
|
|
|
|
case hook := <-l.flushHooks.newTransients:
|
|
|
|
if l.channel.IsChannelClean() {
|
|
|
|
hook()
|
|
|
|
} else {
|
|
|
|
l.flushHooks.alloc(hook)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have a new hook that needs to be run when we have
|
|
|
|
// committed all of our updates.
|
|
|
|
case hook := <-l.outgoingCommitHooks.newTransients:
|
|
|
|
if !l.channel.OweCommitment() {
|
|
|
|
hook()
|
|
|
|
} else {
|
|
|
|
l.outgoingCommitHooks.alloc(hook)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have a new hook that needs to be run when our peer has
|
|
|
|
// committed all of their updates.
|
|
|
|
case hook := <-l.incomingCommitHooks.newTransients:
|
|
|
|
if !l.channel.NeedCommitment() {
|
|
|
|
hook()
|
|
|
|
} else {
|
|
|
|
l.incomingCommitHooks.alloc(hook)
|
|
|
|
}
|
|
|
|
|
2018-05-10 23:40:29 +02:00
|
|
|
// Our update fee timer has fired, so we'll check the network
|
|
|
|
// fee to see if we should adjust our commitment fee.
|
|
|
|
case <-l.updateFeeTimer.C:
|
|
|
|
l.updateFeeTimer.Reset(l.randomFeeUpdateTimeout())
|
2017-08-03 06:10:35 +02:00
|
|
|
|
2017-11-24 05:31:45 +01:00
|
|
|
// If we're not the initiator of the channel, don't we
|
|
|
|
// don't control the fees, so we can ignore this.
|
|
|
|
if !l.channel.IsInitiator() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are the initiator, then we'll sample the
|
|
|
|
// current fee rate to get into the chain within 3
|
|
|
|
// blocks.
|
2019-08-24 01:04:59 +02:00
|
|
|
netFee, err := l.sampleNetworkFee()
|
2017-11-24 05:31:45 +01:00
|
|
|
if err != nil {
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Errorf("unable to sample network fee: %v",
|
|
|
|
err)
|
2017-11-24 05:31:45 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-06-23 14:28:25 +02:00
|
|
|
minRelayFee := l.cfg.FeeEstimator.RelayFeePerKW()
|
|
|
|
|
|
|
|
newCommitFee := l.channel.IdealCommitFeeRate(
|
|
|
|
netFee, minRelayFee,
|
2020-12-10 14:16:53 +01:00
|
|
|
l.cfg.MaxAnchorsCommitFeeRate,
|
2021-06-23 14:28:25 +02:00
|
|
|
l.cfg.MaxFeeAllocation,
|
2020-12-10 14:16:53 +01:00
|
|
|
)
|
2021-06-23 14:28:25 +02:00
|
|
|
|
|
|
|
// We determine if we should adjust the commitment fee
|
|
|
|
// based on the current commitment fee, the suggested
|
|
|
|
// new commitment fee and the current minimum relay fee
|
|
|
|
// rate.
|
|
|
|
commitFee := l.channel.CommitFeeRate()
|
|
|
|
if !shouldAdjustCommitFee(
|
|
|
|
newCommitFee, commitFee, minRelayFee,
|
|
|
|
) {
|
2022-02-07 13:58:28 +01:00
|
|
|
|
2017-11-24 05:31:45 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we do, then we'll send a new UpdateFee message to
|
|
|
|
// the remote party, to be locked in with a new update.
|
2019-08-24 01:04:59 +02:00
|
|
|
if err := l.updateChannelFee(newCommitFee); err != nil {
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Errorf("unable to update fee rate: %v",
|
|
|
|
err)
|
2017-11-24 05:31:45 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-06-16 23:58:02 +02:00
|
|
|
// The underlying channel has notified us of a unilateral close
|
|
|
|
// carried out by the remote peer. In the case of such an
|
|
|
|
// event, we'll wipe the channel state from the peer, and mark
|
|
|
|
// the contract as fully settled. Afterwards we can exit.
|
2018-02-28 07:19:21 +01:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): add force closure? also breach?
|
2018-03-16 15:19:11 +01:00
|
|
|
case <-l.cfg.ChainEvents.RemoteUnilateralClosure:
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Warnf("remote peer has closed on-chain")
|
2017-05-01 19:03:41 +02:00
|
|
|
|
2017-11-10 23:57:59 +01:00
|
|
|
// TODO(roasbeef): remove all together
|
2017-07-09 01:20:56 +02:00
|
|
|
go func() {
|
2017-11-23 08:15:21 +01:00
|
|
|
chanPoint := l.channel.ChannelPoint()
|
2024-01-29 21:59:51 +01:00
|
|
|
l.cfg.Peer.WipeChannel(&chanPoint)
|
2017-07-09 01:20:56 +02:00
|
|
|
}()
|
2017-06-16 23:58:02 +02:00
|
|
|
|
2020-04-14 19:50:45 +02:00
|
|
|
return
|
2017-05-01 19:03:41 +02:00
|
|
|
|
2018-08-01 21:42:38 +02:00
|
|
|
case <-l.cfg.BatchTicker.Ticks():
|
2019-09-24 11:49:32 +02:00
|
|
|
// Attempt to extend the remote commitment chain
|
|
|
|
// including all the currently pending entries. If the
|
|
|
|
// send was unsuccessful, then abandon the update,
|
|
|
|
// waiting for the revocation window to open up.
|
2020-05-13 15:22:56 +02:00
|
|
|
if !l.updateCommitTxOrFail() {
|
2020-04-14 19:50:45 +02:00
|
|
|
return
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
|
|
|
|
2020-04-14 19:51:30 +02:00
|
|
|
case <-l.cfg.PendingCommitTicker.Ticks():
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(
|
2023-05-20 02:18:00 +02:00
|
|
|
LinkFailureError{
|
|
|
|
code: ErrRemoteUnresponsive,
|
|
|
|
FailureAction: LinkFailureDisconnect,
|
|
|
|
},
|
|
|
|
"unable to complete dance",
|
|
|
|
)
|
2020-04-14 19:51:30 +02:00
|
|
|
return
|
|
|
|
|
2017-06-16 23:58:02 +02:00
|
|
|
// A message from the switch was just received. This indicates
|
|
|
|
// that the link is an intermediate hop in a multi-hop HTLC
|
|
|
|
// circuit.
|
2017-05-03 23:03:47 +02:00
|
|
|
case pkt := <-l.downstream:
|
2020-04-07 20:55:25 +02:00
|
|
|
l.handleDownstreamPkt(pkt)
|
2017-05-01 19:03:41 +02:00
|
|
|
|
2017-06-16 23:58:02 +02:00
|
|
|
// A message from the connected peer was just received. This
|
|
|
|
// indicates that we have a new incoming HTLC, either directly
|
|
|
|
// for us, or part of a multi-hop HTLC circuit.
|
2017-05-03 23:03:47 +02:00
|
|
|
case msg := <-l.upstream:
|
|
|
|
l.handleUpstreamMsg(msg)
|
|
|
|
|
2019-12-20 11:25:07 +01:00
|
|
|
// A htlc resolution is received. This means that we now have a
|
2019-02-11 12:01:05 +01:00
|
|
|
// resolution for a previously accepted htlc.
|
|
|
|
case hodlItem := <-l.hodlQueue.ChanOut():
|
2019-12-20 11:25:07 +01:00
|
|
|
htlcResolution := hodlItem.(invoices.HtlcResolution)
|
|
|
|
err := l.processHodlQueue(htlcResolution)
|
2022-05-09 20:33:45 +02:00
|
|
|
switch err {
|
|
|
|
// No error, success.
|
|
|
|
case nil:
|
|
|
|
|
|
|
|
// If the duplicate keystone error was encountered,
|
|
|
|
// fail back gracefully.
|
|
|
|
case ErrDuplicateKeystone:
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{
|
|
|
|
code: ErrCircuitError,
|
|
|
|
}, "process hodl queue: "+
|
|
|
|
"temporary circuit error: %v",
|
|
|
|
err,
|
2022-05-09 20:33:45 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// Send an Error message to the peer.
|
|
|
|
default:
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{
|
|
|
|
code: ErrInternalError,
|
|
|
|
}, "process hodl queue: unable to update "+
|
|
|
|
"commitment: %v", err,
|
2019-02-11 12:01:05 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2024-10-17 13:38:34 +02:00
|
|
|
case <-l.Quit:
|
2020-04-14 19:50:45 +02:00
|
|
|
return
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 11:25:07 +01:00
|
|
|
// processHodlQueue processes a received htlc resolution and continues reading
|
|
|
|
// from the hodl queue until no more resolutions remain. When this function
|
|
|
|
// returns without an error, the commit tx should be updated.
|
|
|
|
func (l *channelLink) processHodlQueue(
|
|
|
|
firstResolution invoices.HtlcResolution) error {
|
|
|
|
|
2019-02-11 12:01:05 +01:00
|
|
|
// Try to read all waiting resolution messages, so that they can all be
|
|
|
|
// processed in a single commitment tx update.
|
2019-12-20 11:25:07 +01:00
|
|
|
htlcResolution := firstResolution
|
2019-02-11 12:01:05 +01:00
|
|
|
loop:
|
|
|
|
for {
|
2019-08-14 21:11:34 +02:00
|
|
|
// Lookup all hodl htlcs that can be failed or settled with this event.
|
|
|
|
// The hodl htlc must be present in the map.
|
2020-02-06 18:35:10 +01:00
|
|
|
circuitKey := htlcResolution.CircuitKey()
|
2019-08-14 21:11:34 +02:00
|
|
|
hodlHtlc, ok := l.hodlMap[circuitKey]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("hodl htlc not found: %v", circuitKey)
|
|
|
|
}
|
|
|
|
|
2019-12-20 11:25:07 +01:00
|
|
|
if err := l.processHtlcResolution(htlcResolution, hodlHtlc); err != nil {
|
2019-02-11 12:01:05 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-14 21:11:34 +02:00
|
|
|
// Clean up hodl map.
|
|
|
|
delete(l.hodlMap, circuitKey)
|
|
|
|
|
2019-02-11 12:01:05 +01:00
|
|
|
select {
|
|
|
|
case item := <-l.hodlQueue.ChanOut():
|
2019-12-20 11:25:07 +01:00
|
|
|
htlcResolution = item.(invoices.HtlcResolution)
|
2019-02-11 12:01:05 +01:00
|
|
|
default:
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the commitment tx.
|
|
|
|
if err := l.updateCommitTx(); err != nil {
|
2022-05-09 20:33:45 +02:00
|
|
|
return err
|
2019-02-11 12:01:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-20 11:25:07 +01:00
|
|
|
// processHtlcResolution applies a received htlc resolution to the provided
|
|
|
|
// htlc. When this function returns without an error, the commit tx should be
|
|
|
|
// updated.
|
|
|
|
func (l *channelLink) processHtlcResolution(resolution invoices.HtlcResolution,
|
2019-08-14 21:11:34 +02:00
|
|
|
htlc hodlHtlc) error {
|
|
|
|
|
2020-02-06 18:35:10 +01:00
|
|
|
circuitKey := resolution.CircuitKey()
|
2019-02-20 12:11:15 +01:00
|
|
|
|
2020-02-06 18:35:10 +01:00
|
|
|
// Determine required action for the resolution based on the type of
|
|
|
|
// resolution we have received.
|
|
|
|
switch res := resolution.(type) {
|
|
|
|
// Settle htlcs that returned a settle resolution using the preimage
|
|
|
|
// in the resolution.
|
|
|
|
case *invoices.HtlcSettleResolution:
|
2020-04-14 19:47:47 +02:00
|
|
|
l.log.Debugf("received settle resolution for %v "+
|
2020-02-06 18:35:10 +01:00
|
|
|
"with outcome: %v", circuitKey, res.Outcome)
|
2019-04-16 12:11:20 +02:00
|
|
|
|
2024-08-16 21:48:04 +02:00
|
|
|
return l.settleHTLC(
|
2024-08-16 22:05:54 +02:00
|
|
|
res.Preimage, htlc.add.ID, htlc.sourceRef,
|
2024-08-16 21:48:04 +02:00
|
|
|
)
|
2019-02-20 12:11:15 +01:00
|
|
|
|
2020-02-06 18:35:10 +01:00
|
|
|
// For htlc failures, we get the relevant failure message based
|
|
|
|
// on the failure resolution and then fail the htlc.
|
|
|
|
case *invoices.HtlcFailResolution:
|
|
|
|
l.log.Debugf("received cancel resolution for "+
|
|
|
|
"%v with outcome: %v", circuitKey, res.Outcome)
|
2019-04-09 16:22:50 +02:00
|
|
|
|
2020-02-06 18:35:10 +01:00
|
|
|
// Get the lnwire failure message based on the resolution
|
|
|
|
// result.
|
2024-08-16 22:05:54 +02:00
|
|
|
failure := getResolutionFailure(res, htlc.add.Amount)
|
2019-02-20 12:11:15 +01:00
|
|
|
|
2020-02-06 18:35:10 +01:00
|
|
|
l.sendHTLCError(
|
2024-08-16 22:05:54 +02:00
|
|
|
htlc.add, htlc.sourceRef, failure, htlc.obfuscator,
|
2024-08-16 21:45:35 +02:00
|
|
|
true,
|
2020-02-06 18:35:10 +01:00
|
|
|
)
|
|
|
|
return nil
|
|
|
|
|
|
|
|
// Fail if we do not get a settle of fail resolution, since we
|
|
|
|
// are only expecting to handle settles and fails.
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unknown htlc resolution type: %T",
|
|
|
|
resolution)
|
|
|
|
}
|
2019-02-20 12:11:15 +01:00
|
|
|
}
|
|
|
|
|
2019-12-20 11:25:08 +01:00
|
|
|
// getResolutionFailure returns the wire message that a htlc resolution should
|
|
|
|
// be failed with.
|
2020-02-06 18:35:10 +01:00
|
|
|
func getResolutionFailure(resolution *invoices.HtlcFailResolution,
|
2020-02-06 18:35:17 +01:00
|
|
|
amount lnwire.MilliSatoshi) *LinkError {
|
2019-12-20 11:25:08 +01:00
|
|
|
|
2020-02-06 18:35:10 +01:00
|
|
|
// If the resolution has been resolved as part of a MPP timeout,
|
|
|
|
// we need to fail the htlc with lnwire.FailMppTimeout.
|
2019-12-20 11:25:08 +01:00
|
|
|
if resolution.Outcome == invoices.ResultMppTimeout {
|
2020-02-06 18:35:17 +01:00
|
|
|
return NewDetailedLinkError(
|
|
|
|
&lnwire.FailMPPTimeout{}, resolution.Outcome,
|
|
|
|
)
|
2019-12-20 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
2020-02-06 18:35:10 +01:00
|
|
|
// If the htlc is not a MPP timeout, we fail it with
|
|
|
|
// FailIncorrectDetails. This error is sent for invoice payment
|
|
|
|
// failures such as underpayment/ expiry too soon and hodl invoices
|
|
|
|
// (which return FailIncorrectDetails to avoid leaking information).
|
2020-02-06 18:35:17 +01:00
|
|
|
incorrectDetails := lnwire.NewFailIncorrectDetails(
|
2019-12-20 11:25:08 +01:00
|
|
|
amount, uint32(resolution.AcceptHeight),
|
|
|
|
)
|
2020-02-06 18:35:17 +01:00
|
|
|
|
|
|
|
return NewDetailedLinkError(incorrectDetails, resolution.Outcome)
|
2019-12-20 11:25:08 +01:00
|
|
|
}
|
|
|
|
|
2018-05-10 23:40:29 +02:00
|
|
|
// randomFeeUpdateTimeout returns a random timeout between the bounds defined
|
|
|
|
// within the link's configuration that will be used to determine when the link
|
|
|
|
// should propose an update to its commitment fee rate.
|
|
|
|
func (l *channelLink) randomFeeUpdateTimeout() time.Duration {
|
2024-04-23 09:49:04 +02:00
|
|
|
lower := int64(l.cfg.MinUpdateTimeout)
|
|
|
|
upper := int64(l.cfg.MaxUpdateTimeout)
|
2018-06-30 03:22:08 +02:00
|
|
|
return time.Duration(prand.Int63n(upper-lower) + lower)
|
2018-05-10 23:40:29 +02:00
|
|
|
}
|
|
|
|
|
2020-05-13 15:29:31 +02:00
|
|
|
// handleDownstreamUpdateAdd processes an UpdateAddHTLC packet sent from the
|
|
|
|
// downstream HTLC Switch.
|
2020-04-13 17:29:52 +02:00
|
|
|
func (l *channelLink) handleDownstreamUpdateAdd(pkt *htlcPacket) error {
|
|
|
|
htlc, ok := pkt.htlc.(*lnwire.UpdateAddHTLC)
|
|
|
|
if !ok {
|
|
|
|
return errors.New("not an UpdateAddHTLC packet")
|
|
|
|
}
|
2020-05-13 15:29:31 +02:00
|
|
|
|
2023-11-26 22:45:55 +01:00
|
|
|
// If we are flushing the link in the outgoing direction we can't add
|
|
|
|
// new htlcs to the link and we need to bounce it
|
|
|
|
if l.IsFlushing(Outgoing) {
|
|
|
|
l.mailBox.FailAdd(pkt)
|
|
|
|
|
|
|
|
return NewDetailedLinkError(
|
|
|
|
&lnwire.FailPermanentChannelFailure{},
|
|
|
|
OutgoingFailureLinkNotEligible,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-05-13 15:29:31 +02:00
|
|
|
// If hodl.AddOutgoing mode is active, we exit early to simulate
|
|
|
|
// arbitrary delays between the switch adding an ADD to the
|
|
|
|
// mailbox, and the HTLC being added to the commitment state.
|
|
|
|
if l.cfg.HodlMask.Active(hodl.AddOutgoing) {
|
|
|
|
l.log.Warnf(hodl.AddOutgoing.Warning())
|
|
|
|
l.mailBox.AckPacket(pkt.inKey())
|
2020-04-13 17:29:52 +02:00
|
|
|
return nil
|
2020-05-13 15:29:31 +02:00
|
|
|
}
|
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
// Check if we can add the HTLC here without exceededing the max fee
|
|
|
|
// exposure threshold.
|
|
|
|
if l.isOverexposedWithHtlc(htlc, false) {
|
|
|
|
l.log.Debugf("Unable to handle downstream HTLC - max fee " +
|
|
|
|
"exposure exceeded")
|
|
|
|
|
|
|
|
l.mailBox.FailAdd(pkt)
|
|
|
|
|
|
|
|
return NewDetailedLinkError(
|
|
|
|
lnwire.NewTemporaryChannelFailure(nil),
|
|
|
|
OutgoingFailureDownstreamHtlcAdd,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-05-13 15:29:31 +02:00
|
|
|
// A new payment has been initiated via the downstream channel,
|
|
|
|
// so we add the new HTLC to our local log, then update the
|
|
|
|
// commitment chains.
|
|
|
|
htlc.ChanID = l.ChanID()
|
|
|
|
openCircuitRef := pkt.inKey()
|
2023-11-05 11:29:34 +01:00
|
|
|
|
|
|
|
// We enforce the fee buffer for the commitment transaction because
|
|
|
|
// we are in control of adding this htlc. Nothing has locked-in yet so
|
|
|
|
// we can securely enforce the fee buffer which is only relevant if we
|
|
|
|
// are the initiator of the channel.
|
2020-05-13 15:29:31 +02:00
|
|
|
index, err := l.channel.AddHTLC(htlc, &openCircuitRef)
|
|
|
|
if err != nil {
|
|
|
|
// The HTLC was unable to be added to the state machine,
|
|
|
|
// as a result, we'll signal the switch to cancel the
|
|
|
|
// pending payment.
|
|
|
|
l.log.Warnf("Unable to handle downstream add HTLC: %v",
|
|
|
|
err)
|
|
|
|
|
|
|
|
// Remove this packet from the link's mailbox, this
|
|
|
|
// prevents it from being reprocessed if the link
|
|
|
|
// restarts and resets it mailbox. If this response
|
|
|
|
// doesn't make it back to the originating link, it will
|
|
|
|
// be rejected upon attempting to reforward the Add to
|
|
|
|
// the switch, since the circuit was never fully opened,
|
|
|
|
// and the forwarding package shows it as
|
|
|
|
// unacknowledged.
|
|
|
|
l.mailBox.FailAdd(pkt)
|
|
|
|
|
2020-04-13 17:29:52 +02:00
|
|
|
return NewDetailedLinkError(
|
|
|
|
lnwire.NewTemporaryChannelFailure(nil),
|
|
|
|
OutgoingFailureDownstreamHtlcAdd,
|
|
|
|
)
|
2020-05-13 15:29:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
l.log.Tracef("received downstream htlc: payment_hash=%x, "+
|
|
|
|
"local_log_index=%v, pend_updates=%v",
|
|
|
|
htlc.PaymentHash[:], index,
|
2023-12-12 04:18:57 +01:00
|
|
|
l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote))
|
2020-05-13 15:29:31 +02:00
|
|
|
|
|
|
|
pkt.outgoingChanID = l.ShortChanID()
|
|
|
|
pkt.outgoingHTLCID = index
|
|
|
|
htlc.ID = index
|
|
|
|
|
|
|
|
l.log.Debugf("queueing keystone of ADD open circuit: %s->%s",
|
|
|
|
pkt.inKey(), pkt.outKey())
|
|
|
|
|
|
|
|
l.openedCircuits = append(l.openedCircuits, pkt.inKey())
|
|
|
|
l.keystoneBatch = append(l.keystoneBatch, pkt.keystone())
|
|
|
|
|
|
|
|
_ = l.cfg.Peer.SendMessage(false, htlc)
|
|
|
|
|
|
|
|
// Send a forward event notification to htlcNotifier.
|
|
|
|
l.cfg.HtlcNotifier.NotifyForwardingEvent(
|
|
|
|
newHtlcKey(pkt),
|
|
|
|
HtlcInfo{
|
|
|
|
IncomingTimeLock: pkt.incomingTimeout,
|
|
|
|
IncomingAmt: pkt.incomingAmount,
|
|
|
|
OutgoingTimeLock: htlc.Expiry,
|
|
|
|
OutgoingAmt: htlc.Amount,
|
|
|
|
},
|
|
|
|
getEventType(pkt),
|
|
|
|
)
|
|
|
|
|
|
|
|
l.tryBatchUpdateCommitTx()
|
2020-04-13 17:29:52 +02:00
|
|
|
|
|
|
|
return nil
|
2020-05-13 15:29:31 +02:00
|
|
|
}
|
|
|
|
|
2020-04-07 20:55:25 +02:00
|
|
|
// handleDownstreamPkt processes an HTLC packet sent from the downstream HTLC
|
2017-05-01 19:03:41 +02:00
|
|
|
// Switch. Possible messages sent by the switch include requests to forward new
|
|
|
|
// HTLCs, timeout previously cleared HTLCs, and finally to settle currently
|
|
|
|
// cleared HTLCs with the upstream peer.
|
2017-09-26 01:09:48 +02:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): add sync ntfn to ensure switch always has consistent view?
|
2020-04-07 20:55:25 +02:00
|
|
|
func (l *channelLink) handleDownstreamPkt(pkt *htlcPacket) {
|
2017-05-03 23:03:47 +02:00
|
|
|
switch htlc := pkt.htlc.(type) {
|
2017-05-01 19:03:41 +02:00
|
|
|
case *lnwire.UpdateAddHTLC:
|
2020-04-13 17:29:52 +02:00
|
|
|
// Handle add message. The returned error can be ignored,
|
|
|
|
// because it is also sent through the mailbox.
|
|
|
|
_ = l.handleDownstreamUpdateAdd(pkt)
|
2020-05-13 15:26:41 +02:00
|
|
|
|
2018-02-07 04:11:11 +01:00
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
2018-04-27 11:51:13 +02:00
|
|
|
// If hodl.SettleOutgoing mode is active, we exit early to
|
|
|
|
// simulate arbitrary delays between the switch adding the
|
|
|
|
// SETTLE to the mailbox, and the HTLC being added to the
|
|
|
|
// commitment state.
|
2019-08-14 19:57:31 +02:00
|
|
|
if l.cfg.HodlMask.Active(hodl.SettleOutgoing) {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Warnf(hodl.SettleOutgoing.Warning())
|
2018-07-27 12:21:12 +02:00
|
|
|
l.mailBox.AckPacket(pkt.inKey())
|
2018-04-27 11:51:13 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
// An HTLC we forward to the switch has just settled somewhere
|
|
|
|
// upstream. Therefore we settle the HTLC within the our local
|
|
|
|
// state machine.
|
2018-07-27 12:21:12 +02:00
|
|
|
inKey := pkt.inKey()
|
|
|
|
err := l.channel.SettleHTLC(
|
2018-02-24 07:40:55 +01:00
|
|
|
htlc.PaymentPreimage,
|
|
|
|
pkt.incomingHTLCID,
|
2017-11-27 08:20:17 +01:00
|
|
|
pkt.sourceRef,
|
|
|
|
pkt.destRef,
|
2018-07-27 12:21:12 +02:00
|
|
|
&inKey,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Errorf("unable to settle incoming HTLC for "+
|
2018-07-27 12:21:12 +02:00
|
|
|
"circuit-key=%v: %v", inKey, err)
|
|
|
|
|
|
|
|
// If the HTLC index for Settle response was not known
|
|
|
|
// to our commitment state, it has already been
|
|
|
|
// cleaned up by a prior response. We'll thus try to
|
|
|
|
// clean up any lingering state to ensure we don't
|
|
|
|
// continue reforwarding.
|
|
|
|
if _, ok := err.(lnwallet.ErrUnknownHtlcIndex); ok {
|
|
|
|
l.cleanupSpuriousResponse(pkt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the packet from the link's mailbox to ensure
|
|
|
|
// it doesn't get replayed after a reconnection.
|
|
|
|
l.mailBox.AckPacket(inKey)
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Debugf("queueing removal of SETTLE closed circuit: "+
|
2019-10-01 11:06:56 +02:00
|
|
|
"%s->%s", pkt.inKey(), pkt.outKey())
|
2017-11-27 08:20:17 +01:00
|
|
|
|
|
|
|
l.closedCircuits = append(l.closedCircuits, pkt.inKey())
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
// With the HTLC settled, we'll need to populate the wire
|
|
|
|
// message to target the specific channel and HTLC to be
|
2019-10-03 17:22:43 +02:00
|
|
|
// canceled.
|
2017-05-03 23:03:47 +02:00
|
|
|
htlc.ChanID = l.ChanID()
|
2017-10-30 19:56:51 +01:00
|
|
|
htlc.ID = pkt.incomingHTLCID
|
2017-05-01 19:03:41 +02:00
|
|
|
|
|
|
|
// Then we send the HTLC settle message to the connected peer
|
|
|
|
// so we can continue the propagation of the settle message.
|
2018-06-08 05:17:15 +02:00
|
|
|
l.cfg.Peer.SendMessage(false, htlc)
|
2017-05-01 19:03:41 +02:00
|
|
|
|
2020-02-19 17:03:22 +01:00
|
|
|
// Send a settle event notification to htlcNotifier.
|
|
|
|
l.cfg.HtlcNotifier.NotifySettleEvent(
|
|
|
|
newHtlcKey(pkt),
|
2021-06-15 21:01:24 +02:00
|
|
|
htlc.PaymentPreimage,
|
2020-02-19 17:03:22 +01:00
|
|
|
getEventType(pkt),
|
|
|
|
)
|
|
|
|
|
2020-05-13 15:26:41 +02:00
|
|
|
// Immediately update the commitment tx to minimize latency.
|
|
|
|
l.updateCommitTxOrFail()
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
case *lnwire.UpdateFailHTLC:
|
2018-04-27 11:51:13 +02:00
|
|
|
// If hodl.FailOutgoing mode is active, we exit early to
|
|
|
|
// simulate arbitrary delays between the switch adding a FAIL to
|
|
|
|
// the mailbox, and the HTLC being added to the commitment
|
|
|
|
// state.
|
2019-08-14 19:57:31 +02:00
|
|
|
if l.cfg.HodlMask.Active(hodl.FailOutgoing) {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Warnf(hodl.FailOutgoing.Warning())
|
2018-07-27 12:21:12 +02:00
|
|
|
l.mailBox.AckPacket(pkt.inKey())
|
2018-04-27 11:51:13 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
// An HTLC cancellation has been triggered somewhere upstream,
|
|
|
|
// we'll remove then HTLC from our local state machine.
|
2018-07-27 12:21:12 +02:00
|
|
|
inKey := pkt.inKey()
|
|
|
|
err := l.channel.FailHTLC(
|
2018-02-24 07:40:55 +01:00
|
|
|
pkt.incomingHTLCID,
|
|
|
|
htlc.Reason,
|
2017-11-27 08:20:17 +01:00
|
|
|
pkt.sourceRef,
|
|
|
|
pkt.destRef,
|
2018-07-27 12:21:12 +02:00
|
|
|
&inKey,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Errorf("unable to cancel incoming HTLC for "+
|
2018-07-27 12:21:12 +02:00
|
|
|
"circuit-key=%v: %v", inKey, err)
|
|
|
|
|
|
|
|
// If the HTLC index for Fail response was not known to
|
|
|
|
// our commitment state, it has already been cleaned up
|
|
|
|
// by a prior response. We'll thus try to clean up any
|
|
|
|
// lingering state to ensure we don't continue
|
|
|
|
// reforwarding.
|
|
|
|
if _, ok := err.(lnwallet.ErrUnknownHtlcIndex); ok {
|
|
|
|
l.cleanupSpuriousResponse(pkt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the packet from the link's mailbox to ensure
|
|
|
|
// it doesn't get replayed after a reconnection.
|
|
|
|
l.mailBox.AckPacket(inKey)
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Debugf("queueing removal of FAIL closed circuit: %s->%s",
|
2017-11-27 08:20:17 +01:00
|
|
|
pkt.inKey(), pkt.outKey())
|
|
|
|
|
|
|
|
l.closedCircuits = append(l.closedCircuits, pkt.inKey())
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
// With the HTLC removed, we'll need to populate the wire
|
|
|
|
// message to target the specific channel and HTLC to be
|
2019-10-03 17:22:43 +02:00
|
|
|
// canceled. The "Reason" field will have already been set
|
2017-05-01 19:03:41 +02:00
|
|
|
// within the switch.
|
2017-05-03 23:03:47 +02:00
|
|
|
htlc.ChanID = l.ChanID()
|
2017-10-30 19:56:51 +01:00
|
|
|
htlc.ID = pkt.incomingHTLCID
|
2017-05-01 19:03:41 +02:00
|
|
|
|
2020-02-19 17:03:22 +01:00
|
|
|
// We send the HTLC message to the peer which initially created
|
2024-04-08 21:51:15 +02:00
|
|
|
// the HTLC. If the incoming blinding point is non-nil, we
|
|
|
|
// know that we are a relaying node in a blinded path.
|
|
|
|
// Otherwise, we're either an introduction node or not part of
|
|
|
|
// a blinded path at all.
|
|
|
|
if err := l.sendIncomingHTLCFailureMsg(
|
|
|
|
htlc.ID,
|
|
|
|
pkt.obfuscator,
|
|
|
|
htlc.Reason,
|
|
|
|
); err != nil {
|
|
|
|
l.log.Errorf("unable to send HTLC failure: %v",
|
|
|
|
err)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
2020-02-19 17:03:22 +01:00
|
|
|
|
|
|
|
// If the packet does not have a link failure set, it failed
|
|
|
|
// further down the route so we notify a forwarding failure.
|
|
|
|
// Otherwise, we notify a link failure because it failed at our
|
|
|
|
// node.
|
|
|
|
if pkt.linkFailure != nil {
|
|
|
|
l.cfg.HtlcNotifier.NotifyLinkFailEvent(
|
|
|
|
newHtlcKey(pkt),
|
|
|
|
newHtlcInfo(pkt),
|
|
|
|
getEventType(pkt),
|
|
|
|
pkt.linkFailure,
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
l.cfg.HtlcNotifier.NotifyForwardingFailEvent(
|
|
|
|
newHtlcKey(pkt), getEventType(pkt),
|
|
|
|
)
|
|
|
|
}
|
2017-05-01 19:03:41 +02:00
|
|
|
|
2020-05-13 15:26:41 +02:00
|
|
|
// Immediately update the commitment tx to minimize latency.
|
|
|
|
l.updateCommitTxOrFail()
|
|
|
|
}
|
|
|
|
}
|
2019-04-10 15:28:55 +02:00
|
|
|
|
2020-05-13 15:26:41 +02:00
|
|
|
// tryBatchUpdateCommitTx updates the commitment transaction if the batch is
|
|
|
|
// full.
|
|
|
|
func (l *channelLink) tryBatchUpdateCommitTx() {
|
2023-12-12 04:18:57 +01:00
|
|
|
pending := l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote)
|
|
|
|
if pending < uint64(l.cfg.BatchSize) {
|
2020-05-13 15:26:41 +02:00
|
|
|
return
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
2020-05-13 15:26:41 +02:00
|
|
|
|
|
|
|
l.updateCommitTxOrFail()
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
|
|
|
|
2018-07-27 12:21:12 +02:00
|
|
|
// cleanupSpuriousResponse attempts to ack any AddRef or SettleFailRef
|
|
|
|
// associated with this packet. If successful in doing so, it will also purge
|
|
|
|
// the open circuit from the circuit map and remove the packet from the link's
|
|
|
|
// mailbox.
|
|
|
|
func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) {
|
|
|
|
inKey := pkt.inKey()
|
|
|
|
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Debugf("cleaning up spurious response for incoming "+
|
2019-10-01 11:06:56 +02:00
|
|
|
"circuit-key=%v", inKey)
|
2018-07-27 12:21:12 +02:00
|
|
|
|
|
|
|
// If the htlc packet doesn't have a source reference, it is unsafe to
|
|
|
|
// proceed, as skipping this ack may cause the htlc to be reforwarded.
|
|
|
|
if pkt.sourceRef == nil {
|
2021-05-17 12:10:58 +02:00
|
|
|
l.log.Errorf("unable to cleanup response for incoming "+
|
2018-07-27 12:21:12 +02:00
|
|
|
"circuit-key=%v, does not contain source reference",
|
|
|
|
inKey)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the source reference is present, we will try to prevent this link
|
|
|
|
// from resending the packet to the switch. To do so, we ack the AddRef
|
|
|
|
// of the incoming HTLC belonging to this link.
|
|
|
|
err := l.channel.AckAddHtlcs(*pkt.sourceRef)
|
|
|
|
if err != nil {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Errorf("unable to ack AddRef for incoming "+
|
2018-07-27 12:21:12 +02:00
|
|
|
"circuit-key=%v: %v", inKey, err)
|
|
|
|
|
|
|
|
// If this operation failed, it is unsafe to attempt removal of
|
|
|
|
// the destination reference or circuit, so we exit early. The
|
|
|
|
// cleanup may proceed with a different packet in the future
|
|
|
|
// that succeeds on this step.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we know this link will stop retransmitting Adds to the
|
|
|
|
// switch, we can begin to teardown the response reference and circuit
|
|
|
|
// map.
|
|
|
|
//
|
|
|
|
// If the packet includes a destination reference, then a response for
|
|
|
|
// this HTLC was locked into the outgoing channel. Attempt to remove
|
|
|
|
// this reference, so we stop retransmitting the response internally.
|
|
|
|
// Even if this fails, we will proceed in trying to delete the circuit.
|
|
|
|
// When retransmitting responses, the destination references will be
|
|
|
|
// cleaned up if an open circuit is not found in the circuit map.
|
|
|
|
if pkt.destRef != nil {
|
|
|
|
err := l.channel.AckSettleFails(*pkt.destRef)
|
|
|
|
if err != nil {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Errorf("unable to ack SettleFailRef "+
|
2018-07-27 12:21:12 +02:00
|
|
|
"for incoming circuit-key=%v: %v",
|
|
|
|
inKey, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Debugf("deleting circuit for incoming circuit-key=%x", inKey)
|
2018-07-27 12:21:12 +02:00
|
|
|
|
|
|
|
// With all known references acked, we can now safely delete the circuit
|
|
|
|
// from the switch's circuit map, as the state is no longer needed.
|
|
|
|
err = l.cfg.Circuits.DeleteCircuits(inKey)
|
|
|
|
if err != nil {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Errorf("unable to delete circuit for "+
|
2018-07-27 12:21:12 +02:00
|
|
|
"circuit-key=%v: %v", inKey, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
// handleUpstreamMsg processes wire messages related to commitment state
|
|
|
|
// updates from the upstream peer. The upstream peer is the peer whom we have a
|
|
|
|
// direct channel with, updating our respective commitment chains.
|
2017-05-03 23:03:47 +02:00
|
|
|
func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
|
|
|
switch msg := msg.(type) {
|
2017-05-01 19:03:41 +02:00
|
|
|
case *lnwire.UpdateAddHTLC:
|
2023-12-09 00:10:30 +01:00
|
|
|
if l.IsFlushing(Incoming) {
|
|
|
|
// This is forbidden by the protocol specification.
|
|
|
|
// The best chance we have to deal with this is to drop
|
|
|
|
// the connection. This should roll back the channel
|
|
|
|
// state to the last CommitSig. If the remote has
|
|
|
|
// already sent a CommitSig we haven't received yet,
|
|
|
|
// channel state will be re-synchronized with a
|
|
|
|
// ChannelReestablish message upon reconnection and the
|
|
|
|
// protocol state that caused us to flush the link will
|
|
|
|
// be rolled back. In the event that there was some
|
|
|
|
// non-deterministic behavior in the remote that caused
|
|
|
|
// them to violate the protocol, we have a decent shot
|
|
|
|
// at correcting it this way, since reconnecting will
|
|
|
|
// put us in the cleanest possible state to try again.
|
|
|
|
//
|
|
|
|
// In addition to the above, it is possible for us to
|
|
|
|
// hit this case in situations where we improperly
|
|
|
|
// handle message ordering due to concurrency choices.
|
|
|
|
// An issue has been filed to address this here:
|
|
|
|
// https://github.com/lightningnetwork/lnd/issues/8393
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(
|
2023-12-09 00:10:30 +01:00
|
|
|
LinkFailureError{
|
|
|
|
code: ErrInvalidUpdate,
|
|
|
|
FailureAction: LinkFailureDisconnect,
|
|
|
|
PermanentFailure: false,
|
|
|
|
Warning: true,
|
|
|
|
},
|
|
|
|
"received add while link is flushing",
|
|
|
|
)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-04-02 15:04:27 +02:00
|
|
|
// Disallow htlcs with blinding points set if we haven't
|
|
|
|
// enabled the feature. This saves us from having to process
|
|
|
|
// the onion at all, but will only catch blinded payments
|
|
|
|
// where we are a relaying node (as the blinding point will
|
|
|
|
// be in the payload when we're the introduction node).
|
|
|
|
if msg.BlindingPoint.IsSome() && l.cfg.DisallowRouteBlinding {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInvalidUpdate},
|
2024-04-02 15:04:27 +02:00
|
|
|
"blinding point included when route blinding "+
|
|
|
|
"is disabled")
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
// We have to check the limit here rather than later in the
|
|
|
|
// switch because the counterparty can keep sending HTLC's
|
|
|
|
// without sending a revoke. This would mean that the switch
|
|
|
|
// check would only occur later.
|
|
|
|
if l.isOverexposedWithHtlc(msg, true) {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInternalError},
|
2024-06-03 18:43:33 +02:00
|
|
|
"peer sent us an HTLC that exceeded our max "+
|
|
|
|
"fee exposure")
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
// We just received an add request from an upstream peer, so we
|
|
|
|
// add it to our state machine, then add the HTLC to our
|
2017-06-01 01:43:37 +02:00
|
|
|
// "settle" list in the event that we know the preimage.
|
2017-05-03 23:03:47 +02:00
|
|
|
index, err := l.channel.ReceiveHTLC(msg)
|
2017-05-01 19:03:41 +02:00
|
|
|
if err != nil {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInvalidUpdate},
|
2018-05-09 15:49:58 +02:00
|
|
|
"unable to handle upstream add HTLC: %v", err)
|
2017-05-01 19:03:41 +02:00
|
|
|
return
|
|
|
|
}
|
2017-11-10 23:57:59 +01:00
|
|
|
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Tracef("receive upstream htlc with payment hash(%x), "+
|
2017-06-16 23:58:02 +02:00
|
|
|
"assigning index: %v", msg.PaymentHash[:], index)
|
2017-05-01 19:03:41 +02:00
|
|
|
|
2018-02-07 04:11:11 +01:00
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
2017-05-03 23:03:47 +02:00
|
|
|
pre := msg.PaymentPreimage
|
|
|
|
idx := msg.ID
|
2022-02-09 18:02:13 +01:00
|
|
|
|
|
|
|
// Before we pipeline the settle, we'll check the set of active
|
|
|
|
// htlc's to see if the related UpdateAddHTLC has been fully
|
|
|
|
// locked-in.
|
|
|
|
var lockedin bool
|
|
|
|
htlcs := l.channel.ActiveHtlcs()
|
|
|
|
for _, add := range htlcs {
|
|
|
|
// The HTLC will be outgoing and match idx.
|
|
|
|
if !add.Incoming && add.HtlcIndex == idx {
|
|
|
|
lockedin = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !lockedin {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(
|
2022-02-09 18:02:13 +01:00
|
|
|
LinkFailureError{code: ErrInvalidUpdate},
|
|
|
|
"unable to handle upstream settle",
|
|
|
|
)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-03 23:03:47 +02:00
|
|
|
if err := l.channel.ReceiveHTLCSettle(pre, idx); err != nil {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(
|
2018-05-09 15:49:58 +02:00
|
|
|
LinkFailureError{
|
2023-05-20 02:16:25 +02:00
|
|
|
code: ErrInvalidUpdate,
|
|
|
|
FailureAction: LinkFailureForceClose,
|
2018-05-09 15:49:58 +02:00
|
|
|
},
|
|
|
|
"unable to handle upstream settle HTLC: %v", err,
|
|
|
|
)
|
2017-05-01 19:03:41 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-05-30 18:26:24 +02:00
|
|
|
settlePacket := &htlcPacket{
|
|
|
|
outgoingChanID: l.ShortChanID(),
|
|
|
|
outgoingHTLCID: idx,
|
|
|
|
htlc: &lnwire.UpdateFulfillHTLC{
|
|
|
|
PaymentPreimage: pre,
|
|
|
|
},
|
|
|
|
}
|
2018-01-17 05:13:16 +01:00
|
|
|
|
2019-02-20 02:06:15 +01:00
|
|
|
// Add the newly discovered preimage to our growing list of
|
|
|
|
// uncommitted preimage. These will be written to the witness
|
|
|
|
// cache just before accepting the next commitment signature
|
|
|
|
// from the remote peer.
|
|
|
|
l.uncommittedPreimages = append(l.uncommittedPreimages, pre)
|
2017-06-29 15:40:45 +02:00
|
|
|
|
2019-05-30 18:26:24 +02:00
|
|
|
// Pipeline this settle, send it to the switch.
|
2022-02-03 15:34:25 +01:00
|
|
|
go l.forwardBatch(false, settlePacket)
|
2019-05-30 18:26:24 +02:00
|
|
|
|
2017-06-29 15:40:45 +02:00
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
2017-07-15 05:08:29 +02:00
|
|
|
// Convert the failure type encoded within the HTLC fail
|
|
|
|
// message to the proper generic lnwire error code.
|
2017-06-29 15:40:45 +02:00
|
|
|
var failure lnwire.FailureMessage
|
|
|
|
switch msg.FailureCode {
|
|
|
|
case lnwire.CodeInvalidOnionVersion:
|
|
|
|
failure = &lnwire.FailInvalidOnionVersion{
|
|
|
|
OnionSHA256: msg.ShaOnionBlob,
|
|
|
|
}
|
|
|
|
case lnwire.CodeInvalidOnionHmac:
|
|
|
|
failure = &lnwire.FailInvalidOnionHmac{
|
|
|
|
OnionSHA256: msg.ShaOnionBlob,
|
|
|
|
}
|
|
|
|
|
|
|
|
case lnwire.CodeInvalidOnionKey:
|
|
|
|
failure = &lnwire.FailInvalidOnionKey{
|
|
|
|
OnionSHA256: msg.ShaOnionBlob,
|
|
|
|
}
|
2024-03-08 19:33:10 +01:00
|
|
|
|
|
|
|
// Handle malformed errors that are part of a blinded route.
|
|
|
|
// This case is slightly different, because we expect every
|
|
|
|
// relaying node in the blinded portion of the route to send
|
|
|
|
// malformed errors. If we're also a relaying node, we're
|
|
|
|
// likely going to switch this error out anyway for our own
|
|
|
|
// malformed error, but we handle the case here for
|
|
|
|
// completeness.
|
|
|
|
case lnwire.CodeInvalidBlinding:
|
|
|
|
failure = &lnwire.FailInvalidBlinding{
|
|
|
|
OnionSHA256: msg.ShaOnionBlob,
|
|
|
|
}
|
|
|
|
|
2017-06-29 15:40:45 +02:00
|
|
|
default:
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Warnf("unexpected failure code received in "+
|
2019-08-16 19:46:51 +02:00
|
|
|
"UpdateFailMailformedHTLC: %v", msg.FailureCode)
|
|
|
|
|
|
|
|
// We don't just pass back the error we received from
|
|
|
|
// our successor. Otherwise we might report a failure
|
|
|
|
// that penalizes us more than needed. If the onion that
|
|
|
|
// we forwarded was correct, the node should have been
|
|
|
|
// able to send back its own failure. The node did not
|
|
|
|
// send back its own failure, so we assume there was a
|
|
|
|
// problem with the onion and report that back. We reuse
|
|
|
|
// the invalid onion key failure because there is no
|
|
|
|
// specific error for this case.
|
|
|
|
failure = &lnwire.FailInvalidOnionKey{
|
|
|
|
OnionSHA256: msg.ShaOnionBlob,
|
|
|
|
}
|
2017-06-29 15:40:45 +02:00
|
|
|
}
|
|
|
|
|
2017-07-15 05:08:29 +02:00
|
|
|
// With the error parsed, we'll convert the into it's opaque
|
|
|
|
// form.
|
2017-06-29 15:40:45 +02:00
|
|
|
var b bytes.Buffer
|
|
|
|
if err := lnwire.EncodeFailure(&b, failure, 0); err != nil {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Errorf("unable to encode malformed error: %v", err)
|
2017-06-29 15:40:45 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-07-10 12:48:43 +02:00
|
|
|
// If remote side have been unable to parse the onion blob we
|
|
|
|
// have sent to it, than we should transform the malformed HTLC
|
|
|
|
// message to the usual HTLC fail message.
|
2017-10-24 09:48:52 +02:00
|
|
|
err := l.channel.ReceiveFailHTLC(msg.ID, b.Bytes())
|
2017-07-10 12:48:43 +02:00
|
|
|
if err != nil {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInvalidUpdate},
|
2018-05-09 15:49:58 +02:00
|
|
|
"unable to handle upstream fail HTLC: %v", err)
|
2017-07-10 12:48:43 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
case *lnwire.UpdateFailHTLC:
|
2022-10-26 13:44:57 +02:00
|
|
|
// Verify that the failure reason is at least 256 bytes plus
|
|
|
|
// overhead.
|
|
|
|
const minimumFailReasonLength = lnwire.FailureMessageLength +
|
|
|
|
2 + 2 + 32
|
|
|
|
|
|
|
|
if len(msg.Reason) < minimumFailReasonLength {
|
|
|
|
// We've received a reason with a non-compliant length.
|
|
|
|
// Older nodes happily relay back these failures that
|
|
|
|
// may originate from a node further downstream.
|
|
|
|
// Therefore we can't just fail the channel.
|
|
|
|
//
|
|
|
|
// We want to be compliant ourselves, so we also can't
|
|
|
|
// pass back the reason unmodified. And we must make
|
|
|
|
// sure that we don't hit the magic length check of 260
|
|
|
|
// bytes in processRemoteSettleFails either.
|
|
|
|
//
|
|
|
|
// Because the reason is unreadable for the payer
|
|
|
|
// anyway, we just replace it by a compliant-length
|
|
|
|
// series of random bytes.
|
|
|
|
msg.Reason = make([]byte, minimumFailReasonLength)
|
|
|
|
_, err := crand.Read(msg.Reason[:])
|
|
|
|
if err != nil {
|
|
|
|
l.log.Errorf("Random generation error: %v", err)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add fail to the update log.
|
2017-05-03 23:03:47 +02:00
|
|
|
idx := msg.ID
|
2017-10-24 09:48:52 +02:00
|
|
|
err := l.channel.ReceiveFailHTLC(idx, msg.Reason[:])
|
2017-09-25 21:31:52 +02:00
|
|
|
if err != nil {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInvalidUpdate},
|
2018-05-09 15:49:58 +02:00
|
|
|
"unable to handle upstream fail HTLC: %v", err)
|
2017-05-01 19:03:41 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
case *lnwire.CommitSig:
|
2019-02-20 02:06:15 +01:00
|
|
|
// Since we may have learned new preimages for the first time,
|
|
|
|
// we'll add them to our preimage cache. By doing this, we
|
|
|
|
// ensure any contested contracts watched by any on-chain
|
|
|
|
// arbitrators can now sweep this HTLC on-chain. We delay
|
|
|
|
// committing the preimages until just before accepting the new
|
|
|
|
// remote commitment, as afterwards the peer won't resend the
|
|
|
|
// Settle messages on the next channel reestablishment. Doing so
|
|
|
|
// allows us to more effectively batch this operation, instead
|
|
|
|
// of doing a single write per preimage.
|
|
|
|
err := l.cfg.PreimageCache.AddPreimages(
|
|
|
|
l.uncommittedPreimages...,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(
|
2019-02-20 02:06:15 +01:00
|
|
|
LinkFailureError{code: ErrInternalError},
|
|
|
|
"unable to add preimages=%v to cache: %v",
|
|
|
|
l.uncommittedPreimages, err,
|
|
|
|
)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Instead of truncating the slice to conserve memory
|
|
|
|
// allocations, we simply set the uncommitted preimage slice to
|
|
|
|
// nil so that a new one will be initialized if any more
|
2023-06-14 22:02:34 +02:00
|
|
|
// witnesses are discovered. We do this because the maximum size
|
|
|
|
// that the slice can occupy is 15KB, and we want to ensure we
|
|
|
|
// release that memory back to the runtime.
|
2019-02-20 02:06:15 +01:00
|
|
|
l.uncommittedPreimages = nil
|
|
|
|
|
2018-01-09 03:56:52 +01:00
|
|
|
// We just received a new updates to our local commitment
|
|
|
|
// chain, validate this new commitment, closing the link if
|
|
|
|
// invalid.
|
2024-04-09 04:48:36 +02:00
|
|
|
auxSigBlob, err := msg.CustomRecords.Serialize()
|
|
|
|
if err != nil {
|
|
|
|
l.failf(
|
|
|
|
LinkFailureError{code: ErrInvalidCommitment},
|
|
|
|
"unable to serialize custom records: %v", err,
|
|
|
|
)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
2023-01-20 03:27:07 +01:00
|
|
|
err = l.channel.ReceiveNewCommitment(&lnwallet.CommitSigs{
|
2023-01-20 05:30:10 +01:00
|
|
|
CommitSig: msg.CommitSig,
|
|
|
|
HtlcSigs: msg.HtlcSigs,
|
|
|
|
PartialSig: msg.PartialSig,
|
2024-04-09 04:48:36 +02:00
|
|
|
AuxSigBlob: auxSigBlob,
|
2023-01-20 03:27:07 +01:00
|
|
|
})
|
2017-07-30 23:08:25 +02:00
|
|
|
if err != nil {
|
2018-01-09 03:56:52 +01:00
|
|
|
// If we were unable to reconstruct their proposed
|
|
|
|
// commitment, then we'll examine the type of error. If
|
|
|
|
// it's an InvalidCommitSigError, then we'll send a
|
|
|
|
// direct error.
|
2018-05-09 15:49:58 +02:00
|
|
|
var sendData []byte
|
2018-04-05 02:41:40 +02:00
|
|
|
switch err.(type) {
|
|
|
|
case *lnwallet.InvalidCommitSigError:
|
2018-05-09 15:49:58 +02:00
|
|
|
sendData = []byte(err.Error())
|
2018-04-05 02:41:40 +02:00
|
|
|
case *lnwallet.InvalidHtlcSigError:
|
2018-05-09 15:49:58 +02:00
|
|
|
sendData = []byte(err.Error())
|
2018-01-09 03:56:52 +01:00
|
|
|
}
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(
|
2018-05-09 15:49:58 +02:00
|
|
|
LinkFailureError{
|
2023-05-20 02:16:25 +02:00
|
|
|
code: ErrInvalidCommitment,
|
|
|
|
FailureAction: LinkFailureForceClose,
|
|
|
|
SendData: sendData,
|
2018-05-09 15:49:58 +02:00
|
|
|
},
|
|
|
|
"ChannelPoint(%v): unable to accept new "+
|
|
|
|
"commitment: %v",
|
|
|
|
l.channel.ChannelPoint(), err,
|
|
|
|
)
|
2017-05-01 19:03:41 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-04-18 04:03:27 +02:00
|
|
|
// As we've just accepted a new state, we'll now
|
2017-05-01 19:03:41 +02:00
|
|
|
// immediately send the remote peer a revocation for our prior
|
|
|
|
// state.
|
2022-08-29 13:28:17 +02:00
|
|
|
nextRevocation, currentHtlcs, finalHTLCs, err :=
|
2022-05-10 12:33:44 +02:00
|
|
|
l.channel.RevokeCurrentCommitment()
|
2017-05-01 19:03:41 +02:00
|
|
|
if err != nil {
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Errorf("unable to revoke commitment: %v", err)
|
multi: ensure link is always torn down due to db failures, add exponential back off for sql-kvdb failures (#7927)
* lnwallet: fix log output msg
The log message is off by one.
* htlcswitch: fail channel when revoking it fails.
When the revocation of a channel state fails after receiving a new
CommitmentSigned msg we have to fail the channel otherwise we
continue with an unclean state.
* docs: update release-docs
* htlcswitch: tear down connection if revocation processing fails
If we couldn't revoke due to a DB error, then we want to also tear down
the connection, as we don't want the other party to continue to send
updates. That may lead to de-sync'd state an eventual force close.
Otherwise, the database might be able to recover come the next
reconnection attempt.
* kvdb: use sql.LevelSerializable for all backends
In this commit, we modify the default isolation level to be
`sql.LevelSerializable. This is the strictness isolation type for
postgres. For sqlite, there's only ever a single writer, so this doesn't
apply directly.
* kvdb/sqlbase: add randomized exponential backoff for serialization failures
In this commit, we add randomized exponential backoff for serialization
failures. For postgres, we''ll his this any time a transaction set fails
to be linearized. For sqlite, we'll his this if we have many writers
trying to grab the write lock at time same time, manifesting as a
`SQLITE_BUSY` error code.
As is, we'll retry up to 10 times, waiting a minimum of 50 miliseconds
between each attempt, up to 5 seconds without any delay at all. For
sqlite, this is also bounded by the busy timeout set, which applies on
top of this retry logic (block for busy timeout seconds, then apply this
back off logic).
* docs/release-notes: add entry for sqlite/postgres tx retry
---------
Co-authored-by: ziggie <ziggie1984@protonmail.com>
2023-08-31 01:48:00 +02:00
|
|
|
|
|
|
|
// We need to fail the channel in case revoking our
|
|
|
|
// local commitment does not succeed. We might have
|
|
|
|
// already advanced our channel state which would lead
|
|
|
|
// us to proceed with an unclean state.
|
|
|
|
//
|
|
|
|
// NOTE: We do not trigger a force close because this
|
|
|
|
// could resolve itself in case our db was just busy
|
|
|
|
// not accepting new transactions.
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(
|
multi: ensure link is always torn down due to db failures, add exponential back off for sql-kvdb failures (#7927)
* lnwallet: fix log output msg
The log message is off by one.
* htlcswitch: fail channel when revoking it fails.
When the revocation of a channel state fails after receiving a new
CommitmentSigned msg we have to fail the channel otherwise we
continue with an unclean state.
* docs: update release-docs
* htlcswitch: tear down connection if revocation processing fails
If we couldn't revoke due to a DB error, then we want to also tear down
the connection, as we don't want the other party to continue to send
updates. That may lead to de-sync'd state an eventual force close.
Otherwise, the database might be able to recover come the next
reconnection attempt.
* kvdb: use sql.LevelSerializable for all backends
In this commit, we modify the default isolation level to be
`sql.LevelSerializable. This is the strictness isolation type for
postgres. For sqlite, there's only ever a single writer, so this doesn't
apply directly.
* kvdb/sqlbase: add randomized exponential backoff for serialization failures
In this commit, we add randomized exponential backoff for serialization
failures. For postgres, we''ll his this any time a transaction set fails
to be linearized. For sqlite, we'll his this if we have many writers
trying to grab the write lock at time same time, manifesting as a
`SQLITE_BUSY` error code.
As is, we'll retry up to 10 times, waiting a minimum of 50 miliseconds
between each attempt, up to 5 seconds without any delay at all. For
sqlite, this is also bounded by the busy timeout set, which applies on
top of this retry logic (block for busy timeout seconds, then apply this
back off logic).
* docs/release-notes: add entry for sqlite/postgres tx retry
---------
Co-authored-by: ziggie <ziggie1984@protonmail.com>
2023-08-31 01:48:00 +02:00
|
|
|
LinkFailureError{
|
|
|
|
code: ErrInternalError,
|
|
|
|
Warning: true,
|
|
|
|
FailureAction: LinkFailureDisconnect,
|
|
|
|
},
|
|
|
|
"ChannelPoint(%v): unable to accept new "+
|
|
|
|
"commitment: %v",
|
|
|
|
l.channel.ChannelPoint(), err,
|
|
|
|
)
|
2017-05-01 19:03:41 +02:00
|
|
|
return
|
|
|
|
}
|
2023-11-28 05:26:21 +01:00
|
|
|
|
|
|
|
// As soon as we are ready to send our next revocation, we can
|
|
|
|
// invoke the incoming commit hooks.
|
|
|
|
l.RWMutex.Lock()
|
|
|
|
l.incomingCommitHooks.invoke()
|
|
|
|
l.RWMutex.Unlock()
|
|
|
|
|
2018-06-08 05:17:15 +02:00
|
|
|
l.cfg.Peer.SendMessage(false, nextRevocation)
|
2017-05-01 19:03:41 +02:00
|
|
|
|
2022-08-29 13:28:17 +02:00
|
|
|
// Notify the incoming htlcs of which the resolutions were
|
|
|
|
// locked in.
|
|
|
|
for id, settled := range finalHTLCs {
|
|
|
|
l.cfg.HtlcNotifier.NotifyFinalHtlcEvent(
|
2022-11-18 12:15:22 +01:00
|
|
|
models.CircuitKey{
|
2024-02-23 01:17:09 +01:00
|
|
|
ChanID: l.ShortChanID(),
|
2022-08-29 13:28:17 +02:00
|
|
|
HtlcID: id,
|
|
|
|
},
|
|
|
|
channeldb.FinalHtlcInfo{
|
|
|
|
Settled: settled,
|
|
|
|
Offchain: true,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2018-01-17 05:15:08 +01:00
|
|
|
// Since we just revoked our commitment, we may have a new set
|
2022-01-04 22:21:36 +01:00
|
|
|
// of HTLC's on our commitment, so we'll send them using our
|
|
|
|
// function closure NotifyContractUpdate.
|
|
|
|
newUpdate := &contractcourt.ContractUpdate{
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 02:23:26 +02:00
|
|
|
HtlcKey: contractcourt.LocalHtlcSet,
|
|
|
|
Htlcs: currentHtlcs,
|
2022-01-04 22:21:36 +01:00
|
|
|
}
|
|
|
|
err = l.cfg.NotifyContractUpdate(newUpdate)
|
|
|
|
if err != nil {
|
|
|
|
l.log.Errorf("unable to notify contract update: %v",
|
|
|
|
err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2024-10-17 13:38:34 +02:00
|
|
|
case <-l.Quit:
|
2018-01-17 05:15:08 +01:00
|
|
|
return
|
2022-01-04 22:21:36 +01:00
|
|
|
default:
|
2018-01-17 05:15:08 +01:00
|
|
|
}
|
|
|
|
|
2023-11-28 05:26:21 +01:00
|
|
|
// If the remote party initiated the state transition,
|
|
|
|
// we'll reply with a signature to provide them with their
|
|
|
|
// version of the latest commitment. Otherwise, both commitment
|
|
|
|
// chains are fully synced from our PoV, then we don't need to
|
|
|
|
// reply with a signature as both sides already have a
|
|
|
|
// commitment with the latest accepted.
|
|
|
|
if l.channel.OweCommitment() {
|
|
|
|
if !l.updateCommitTxOrFail() {
|
|
|
|
return
|
|
|
|
}
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
|
|
|
|
2023-11-28 05:26:21 +01:00
|
|
|
// Now that we have finished processing the incoming CommitSig
|
|
|
|
// and sent out our RevokeAndAck, we invoke the flushHooks if
|
|
|
|
// the channel state is clean.
|
|
|
|
l.RWMutex.Lock()
|
|
|
|
if l.channel.IsChannelClean() {
|
|
|
|
l.flushHooks.invoke()
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
2023-11-28 05:26:21 +01:00
|
|
|
l.RWMutex.Unlock()
|
2017-05-01 19:03:41 +02:00
|
|
|
|
|
|
|
case *lnwire.RevokeAndAck:
|
|
|
|
// We've received a revocation from the remote chain, if valid,
|
|
|
|
// this moves the remote chain forward, and expands our
|
|
|
|
// revocation window.
|
2022-04-13 16:33:07 +02:00
|
|
|
|
|
|
|
// We now process the message and advance our remote commit
|
|
|
|
// chain.
|
2024-08-16 23:35:49 +02:00
|
|
|
fwdPkg, remoteHTLCs, err := l.channel.ReceiveRevocation(msg)
|
2017-05-01 19:03:41 +02:00
|
|
|
if err != nil {
|
2018-05-09 15:49:58 +02:00
|
|
|
// TODO(halseth): force close?
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(
|
multi: ensure link is always torn down due to db failures, add exponential back off for sql-kvdb failures (#7927)
* lnwallet: fix log output msg
The log message is off by one.
* htlcswitch: fail channel when revoking it fails.
When the revocation of a channel state fails after receiving a new
CommitmentSigned msg we have to fail the channel otherwise we
continue with an unclean state.
* docs: update release-docs
* htlcswitch: tear down connection if revocation processing fails
If we couldn't revoke due to a DB error, then we want to also tear down
the connection, as we don't want the other party to continue to send
updates. That may lead to de-sync'd state an eventual force close.
Otherwise, the database might be able to recover come the next
reconnection attempt.
* kvdb: use sql.LevelSerializable for all backends
In this commit, we modify the default isolation level to be
`sql.LevelSerializable. This is the strictness isolation type for
postgres. For sqlite, there's only ever a single writer, so this doesn't
apply directly.
* kvdb/sqlbase: add randomized exponential backoff for serialization failures
In this commit, we add randomized exponential backoff for serialization
failures. For postgres, we''ll his this any time a transaction set fails
to be linearized. For sqlite, we'll his this if we have many writers
trying to grab the write lock at time same time, manifesting as a
`SQLITE_BUSY` error code.
As is, we'll retry up to 10 times, waiting a minimum of 50 miliseconds
between each attempt, up to 5 seconds without any delay at all. For
sqlite, this is also bounded by the busy timeout set, which applies on
top of this retry logic (block for busy timeout seconds, then apply this
back off logic).
* docs/release-notes: add entry for sqlite/postgres tx retry
---------
Co-authored-by: ziggie <ziggie1984@protonmail.com>
2023-08-31 01:48:00 +02:00
|
|
|
LinkFailureError{
|
|
|
|
code: ErrInvalidRevocation,
|
|
|
|
FailureAction: LinkFailureDisconnect,
|
|
|
|
},
|
|
|
|
"unable to accept revocation: %v", err,
|
|
|
|
)
|
2017-05-01 19:03:41 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 02:23:26 +02:00
|
|
|
// The remote party now has a new primary commitment, so we'll
|
|
|
|
// update the contract court to be aware of this new set (the
|
|
|
|
// prior old remote pending).
|
2022-01-04 22:21:36 +01:00
|
|
|
newUpdate := &contractcourt.ContractUpdate{
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 02:23:26 +02:00
|
|
|
HtlcKey: contractcourt.RemoteHtlcSet,
|
|
|
|
Htlcs: remoteHTLCs,
|
2022-01-04 22:21:36 +01:00
|
|
|
}
|
|
|
|
err = l.cfg.NotifyContractUpdate(newUpdate)
|
|
|
|
if err != nil {
|
|
|
|
l.log.Errorf("unable to notify contract update: %v",
|
|
|
|
err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2024-10-17 13:38:34 +02:00
|
|
|
case <-l.Quit:
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 02:23:26 +02:00
|
|
|
return
|
2022-01-04 22:21:36 +01:00
|
|
|
default:
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 02:23:26 +02:00
|
|
|
}
|
|
|
|
|
2020-11-26 00:06:46 +01:00
|
|
|
// If we have a tower client for this channel type, we'll
|
2022-04-13 16:33:07 +02:00
|
|
|
// create a backup for the current state.
|
2020-11-26 00:06:46 +01:00
|
|
|
if l.cfg.TowerClient != nil {
|
|
|
|
state := l.channel.State()
|
2019-06-14 02:28:14 +02:00
|
|
|
chanID := l.ChanID()
|
2023-02-02 10:40:33 +01:00
|
|
|
|
2019-08-08 04:49:59 +02:00
|
|
|
err = l.cfg.TowerClient.BackupState(
|
2023-02-02 10:40:33 +01:00
|
|
|
&chanID, state.RemoteCommitment.CommitHeight-1,
|
2019-08-08 04:49:59 +02:00
|
|
|
)
|
2019-06-14 02:28:14 +02:00
|
|
|
if err != nil {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{
|
|
|
|
code: ErrInternalError,
|
|
|
|
}, "unable to queue breach backup: %v", err)
|
2019-06-14 02:28:14 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-08-16 22:28:44 +02:00
|
|
|
l.processRemoteSettleFails(fwdPkg)
|
2024-08-16 22:19:36 +02:00
|
|
|
l.processRemoteAdds(fwdPkg)
|
2018-05-23 15:14:46 +02:00
|
|
|
|
|
|
|
// If the link failed during processing the adds, we must
|
|
|
|
// return to ensure we won't attempted to update the state
|
|
|
|
// further.
|
|
|
|
if l.failed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-10 16:05:09 +02:00
|
|
|
// The revocation window opened up. If there are pending local
|
|
|
|
// updates, try to update the commit tx. Pending updates could
|
|
|
|
// already have been present because of a previously failed
|
|
|
|
// update to the commit tx or freshly added in by
|
|
|
|
// processRemoteAdds. Also in case there are no local updates,
|
|
|
|
// but there are still remote updates that are not in the remote
|
|
|
|
// commit tx yet, send out an update.
|
2022-03-19 17:59:19 +01:00
|
|
|
if l.channel.OweCommitment() {
|
2020-05-13 15:22:56 +02:00
|
|
|
if !l.updateCommitTxOrFail() {
|
2017-11-27 08:20:17 +01:00
|
|
|
return
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
2017-08-15 19:09:16 +02:00
|
|
|
|
2023-11-28 05:26:21 +01:00
|
|
|
// Now that we have finished processing the RevokeAndAck, we
|
|
|
|
// can invoke the flushHooks if the channel state is clean.
|
|
|
|
l.RWMutex.Lock()
|
|
|
|
if l.channel.IsChannelClean() {
|
|
|
|
l.flushHooks.invoke()
|
|
|
|
}
|
|
|
|
l.RWMutex.Unlock()
|
|
|
|
|
2017-07-14 20:40:42 +02:00
|
|
|
case *lnwire.UpdateFee:
|
2024-06-03 18:43:33 +02:00
|
|
|
// Check and see if their proposed fee-rate would make us
|
|
|
|
// exceed the fee threshold.
|
|
|
|
fee := chainfee.SatPerKWeight(msg.FeePerKw)
|
|
|
|
|
|
|
|
isDust, err := l.exceedsFeeExposureLimit(fee)
|
|
|
|
if err != nil {
|
|
|
|
// This shouldn't typically happen. If it does, it
|
|
|
|
// indicates something is wrong with our channel state.
|
|
|
|
l.log.Errorf("Unable to determine if fee threshold " +
|
|
|
|
"exceeded")
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInternalError},
|
2024-06-03 18:43:33 +02:00
|
|
|
"error calculating fee exposure: %v", err)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if isDust {
|
|
|
|
// The proposed fee-rate makes us exceed the fee
|
|
|
|
// threshold.
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInternalError},
|
2024-06-03 18:43:33 +02:00
|
|
|
"fee threshold exceeded: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-11-10 23:57:59 +01:00
|
|
|
// We received fee update from peer. If we are the initiator we
|
2017-08-03 06:10:35 +02:00
|
|
|
// will fail the channel, if not we will apply the update.
|
2017-07-14 20:40:42 +02:00
|
|
|
if err := l.channel.ReceiveUpdateFee(fee); err != nil {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInvalidUpdate},
|
2018-05-09 15:49:58 +02:00
|
|
|
"error receiving fee update: %v", err)
|
2017-07-14 20:40:42 +02:00
|
|
|
return
|
|
|
|
}
|
2021-09-28 17:43:51 +02:00
|
|
|
|
|
|
|
// Update the mailbox's feerate as well.
|
|
|
|
l.mailBox.SetFeeRate(fee)
|
|
|
|
|
2022-05-16 18:19:07 +02:00
|
|
|
// In the case where we receive a warning message from our peer, just
|
|
|
|
// log it and move on. We choose not to disconnect from our peer,
|
|
|
|
// although we "MAY" do so according to the specification.
|
|
|
|
case *lnwire.Warning:
|
|
|
|
l.log.Warnf("received warning message from peer: %v",
|
2022-08-24 21:26:42 +02:00
|
|
|
msg.Warning())
|
2022-05-16 18:19:07 +02:00
|
|
|
|
2018-03-01 00:39:26 +01:00
|
|
|
case *lnwire.Error:
|
|
|
|
// Error received from remote, MUST fail channel, but should
|
|
|
|
// only print the contents of the error message if all
|
|
|
|
// characters are printable ASCII.
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(
|
2018-10-08 15:46:14 +02:00
|
|
|
LinkFailureError{
|
2020-11-23 14:42:41 +01:00
|
|
|
code: ErrRemoteError,
|
|
|
|
|
|
|
|
// TODO(halseth): we currently don't fail the
|
|
|
|
// channel permanently, as there are some sync
|
|
|
|
// issues with other implementations that will
|
|
|
|
// lead to them sending an error message, but
|
|
|
|
// we can recover from on next connection. See
|
|
|
|
// https://github.com/ElementsProject/lightning/issues/4212
|
|
|
|
PermanentFailure: false,
|
2018-10-08 15:46:14 +02:00
|
|
|
},
|
2018-05-09 15:49:58 +02:00
|
|
|
"ChannelPoint(%v): received error from peer: %v",
|
2018-10-08 15:46:14 +02:00
|
|
|
l.channel.ChannelPoint(), msg.Error(),
|
|
|
|
)
|
2018-03-01 00:39:26 +01:00
|
|
|
default:
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Warnf("received unknown message of type %T", msg)
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
2018-03-01 00:39:26 +01:00
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
|
|
|
|
2018-03-13 02:52:52 +01:00
|
|
|
// ackDownStreamPackets is responsible for removing htlcs from a link's mailbox
|
|
|
|
// for packets delivered from server, and cleaning up any circuits closed by
|
|
|
|
// signing a previous commitment txn. This method ensures that the circuits are
|
|
|
|
// removed from the circuit map before removing them from the link's mailbox,
|
|
|
|
// otherwise it could be possible for some circuit to be missed if this link
|
|
|
|
// flaps.
|
2018-08-19 04:35:20 +02:00
|
|
|
func (l *channelLink) ackDownStreamPackets() error {
|
2017-11-27 08:20:17 +01:00
|
|
|
// First, remove the downstream Add packets that were included in the
|
|
|
|
// previous commitment signature. This will prevent the Adds from being
|
|
|
|
// replayed if this link disconnects.
|
|
|
|
for _, inKey := range l.openedCircuits {
|
2018-03-13 02:52:52 +01:00
|
|
|
// In order to test the sphinx replay logic of the remote
|
|
|
|
// party, unsafe replay does not acknowledge the packets from
|
|
|
|
// the mailbox. We can then force a replay of any Add packets
|
|
|
|
// held in memory by disconnecting and reconnecting the link.
|
2017-11-27 08:20:17 +01:00
|
|
|
if l.cfg.UnsafeReplay {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Debugf("removing Add packet %s from mailbox", inKey)
|
2017-11-27 08:20:17 +01:00
|
|
|
l.mailBox.AckPacket(inKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, we will delete all circuits closed by the previous commitment
|
|
|
|
// signature, which is the result of downstream Settle/Fail packets. We
|
|
|
|
// batch them here to ensure circuits are closed atomically and for
|
|
|
|
// performance.
|
|
|
|
err := l.cfg.Circuits.DeleteCircuits(l.closedCircuits...)
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
// Successful deletion.
|
|
|
|
|
|
|
|
default:
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Errorf("unable to delete %d circuits: %v",
|
2017-11-27 08:20:17 +01:00
|
|
|
len(l.closedCircuits), err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the circuits removed from memory and disk, we now ack any
|
|
|
|
// Settle/Fails in the mailbox to ensure they do not get redelivered
|
|
|
|
// after startup. If forgive is enabled and we've reached this point,
|
|
|
|
// the circuits must have been removed at some point, so it is now safe
|
2018-03-13 02:52:52 +01:00
|
|
|
// to un-queue the corresponding Settle/Fails.
|
2017-11-27 08:20:17 +01:00
|
|
|
for _, inKey := range l.closedCircuits {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Debugf("removing Fail/Settle packet %s from mailbox",
|
|
|
|
inKey)
|
2017-11-27 08:20:17 +01:00
|
|
|
l.mailBox.AckPacket(inKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lastly, reset our buffers to be empty while keeping any acquired
|
|
|
|
// growth in the backing array.
|
|
|
|
l.openedCircuits = l.openedCircuits[:0]
|
|
|
|
l.closedCircuits = l.closedCircuits[:0]
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-13 15:22:56 +02:00
|
|
|
// updateCommitTxOrFail updates the commitment tx and if that fails, it fails
|
|
|
|
// the link.
|
|
|
|
func (l *channelLink) updateCommitTxOrFail() bool {
|
2022-05-09 20:33:45 +02:00
|
|
|
err := l.updateCommitTx()
|
|
|
|
switch err {
|
|
|
|
// No error encountered, success.
|
|
|
|
case nil:
|
|
|
|
|
|
|
|
// A duplicate keystone error should be resolved and is not fatal, so
|
|
|
|
// we won't send an Error message to the peer.
|
|
|
|
case ErrDuplicateKeystone:
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrCircuitError},
|
2022-05-09 20:33:45 +02:00
|
|
|
"temporary circuit error: %v", err)
|
|
|
|
return false
|
|
|
|
|
|
|
|
// Any other error is treated results in an Error message being sent to
|
|
|
|
// the peer.
|
|
|
|
default:
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInternalError},
|
2020-05-13 15:22:56 +02:00
|
|
|
"unable to update commitment: %v", err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
// updateCommitTx signs, then sends an update to the remote peer adding a new
|
|
|
|
// commitment to their commitment chain which includes all the latest updates
|
|
|
|
// we've received+processed up to this point.
|
2017-05-03 23:03:47 +02:00
|
|
|
func (l *channelLink) updateCommitTx() error {
|
2017-11-27 08:20:17 +01:00
|
|
|
// Preemptively write all pending keystones to disk, just in case the
|
|
|
|
// HTLCs we have in memory are included in the subsequent attempt to
|
|
|
|
// sign a commitment state.
|
|
|
|
err := l.cfg.Circuits.OpenCircuits(l.keystoneBatch...)
|
|
|
|
if err != nil {
|
2022-05-09 20:33:45 +02:00
|
|
|
// If ErrDuplicateKeystone is returned, the caller will catch
|
|
|
|
// it.
|
2017-11-27 08:20:17 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the batch, but keep the backing buffer to avoid reallocating.
|
|
|
|
l.keystoneBatch = l.keystoneBatch[:0]
|
|
|
|
|
2018-04-27 11:51:13 +02:00
|
|
|
// If hodl.Commit mode is active, we will refrain from attempting to
|
|
|
|
// commit any in-memory modifications to the channel state. Exiting here
|
|
|
|
// permits testing of either the switch or link's ability to trim
|
|
|
|
// circuits that have been opened, but unsuccessfully committed.
|
2019-08-14 19:57:31 +02:00
|
|
|
if l.cfg.HodlMask.Active(hodl.Commit) {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Warnf(hodl.Commit.Warning())
|
2018-04-27 11:51:13 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-10-17 13:38:34 +02:00
|
|
|
ctx, done := l.WithCtxQuitNoTimeout()
|
|
|
|
defer done()
|
|
|
|
|
|
|
|
newCommit, err := l.channel.SignNextCommitment(ctx)
|
2017-05-01 19:03:41 +02:00
|
|
|
if err == lnwallet.ErrNoWindow {
|
2020-04-14 19:51:30 +02:00
|
|
|
l.cfg.PendingCommitTicker.Resume()
|
2022-01-21 00:35:56 +01:00
|
|
|
l.log.Trace("PendingCommitTicker resumed")
|
2020-04-14 19:51:30 +02:00
|
|
|
|
2023-12-12 04:18:57 +01:00
|
|
|
n := l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote)
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Tracef("revocation window exhausted, unable to send: "+
|
2023-12-12 04:18:57 +01:00
|
|
|
"%v, pend_updates=%v, dangling_closes%v", n,
|
2024-07-25 16:18:00 +02:00
|
|
|
lnutils.SpewLogClosure(l.openedCircuits),
|
|
|
|
lnutils.SpewLogClosure(l.closedCircuits))
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
return nil
|
|
|
|
} else if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-09 18:36:33 +01:00
|
|
|
if err := l.ackDownStreamPackets(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-04-14 19:51:30 +02:00
|
|
|
l.cfg.PendingCommitTicker.Pause()
|
2022-01-21 00:35:56 +01:00
|
|
|
l.log.Trace("PendingCommitTicker paused after ackDownStreamPackets")
|
2020-04-14 19:51:30 +02:00
|
|
|
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 02:23:26 +02:00
|
|
|
// The remote party now has a new pending commitment, so we'll update
|
|
|
|
// the contract court to be aware of this new set (the prior old remote
|
|
|
|
// pending).
|
2022-01-04 22:21:36 +01:00
|
|
|
newUpdate := &contractcourt.ContractUpdate{
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 02:23:26 +02:00
|
|
|
HtlcKey: contractcourt.RemotePendingHtlcSet,
|
2023-01-20 03:27:07 +01:00
|
|
|
Htlcs: newCommit.PendingHTLCs,
|
2022-01-04 22:21:36 +01:00
|
|
|
}
|
|
|
|
err = l.cfg.NotifyContractUpdate(newUpdate)
|
|
|
|
if err != nil {
|
|
|
|
l.log.Errorf("unable to notify contract update: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2024-10-17 13:38:34 +02:00
|
|
|
case <-l.Quit:
|
2020-03-09 18:36:33 +01:00
|
|
|
return ErrLinkShuttingDown
|
2022-01-04 22:21:36 +01:00
|
|
|
default:
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
|
2024-04-09 04:48:36 +02:00
|
|
|
auxBlobRecords, err := lnwire.ParseCustomRecords(newCommit.AuxSigBlob)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error parsing aux sigs: %w", err)
|
|
|
|
}
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
commitSig := &lnwire.CommitSig{
|
2024-04-09 04:48:36 +02:00
|
|
|
ChanID: l.ChanID(),
|
|
|
|
CommitSig: newCommit.CommitSig,
|
|
|
|
HtlcSigs: newCommit.HtlcSigs,
|
|
|
|
PartialSig: newCommit.PartialSig,
|
|
|
|
CustomRecords: auxBlobRecords,
|
2017-05-01 19:03:41 +02:00
|
|
|
}
|
2018-06-08 05:17:15 +02:00
|
|
|
l.cfg.Peer.SendMessage(false, commitSig)
|
2017-05-01 19:03:41 +02:00
|
|
|
|
2023-11-28 05:26:21 +01:00
|
|
|
// Now that we have sent out a new CommitSig, we invoke the outgoing set
|
|
|
|
// of commit hooks.
|
|
|
|
l.RWMutex.Lock()
|
|
|
|
l.outgoingCommitHooks.invoke()
|
|
|
|
l.RWMutex.Unlock()
|
|
|
|
|
2017-05-01 19:03:41 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-06-01 01:43:37 +02:00
|
|
|
// Peer returns the representation of remote peer with which we have the
|
|
|
|
// channel link opened.
|
|
|
|
//
|
2017-05-03 16:07:55 +02:00
|
|
|
// NOTE: Part of the ChannelLink interface.
|
2024-02-26 22:10:22 +01:00
|
|
|
func (l *channelLink) PeerPubKey() [33]byte {
|
|
|
|
return l.cfg.Peer.PubKey()
|
2017-05-03 16:07:55 +02:00
|
|
|
}
|
|
|
|
|
2018-10-30 10:36:27 +01:00
|
|
|
// ChannelPoint returns the channel outpoint for the channel link.
|
|
|
|
// NOTE: Part of the ChannelLink interface.
|
2024-01-29 22:55:20 +01:00
|
|
|
func (l *channelLink) ChannelPoint() wire.OutPoint {
|
|
|
|
return l.channel.ChannelPoint()
|
2018-10-30 10:36:27 +01:00
|
|
|
}
|
|
|
|
|
2017-06-16 23:32:41 +02:00
|
|
|
// ShortChanID returns the short channel ID for the channel link. The short
|
|
|
|
// channel ID encodes the exact location in the main chain that the original
|
|
|
|
// funding output can be found.
|
|
|
|
//
|
|
|
|
// NOTE: Part of the ChannelLink interface.
|
|
|
|
func (l *channelLink) ShortChanID() lnwire.ShortChannelID {
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
|
2024-02-23 01:17:09 +01:00
|
|
|
return l.channel.ShortChanID()
|
2018-02-04 03:14:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateShortChanID updates the short channel ID for a link. This may be
|
|
|
|
// required in the event that a link is created before the short chan ID for it
|
|
|
|
// is known, or a re-org occurs, and the funding transaction changes location
|
|
|
|
// within the chain.
|
|
|
|
//
|
|
|
|
// NOTE: Part of the ChannelLink interface.
|
2018-05-02 01:29:47 +02:00
|
|
|
func (l *channelLink) UpdateShortChanID() (lnwire.ShortChannelID, error) {
|
|
|
|
chanID := l.ChanID()
|
2018-02-04 03:14:09 +01:00
|
|
|
|
2018-05-02 01:29:47 +02:00
|
|
|
// Refresh the channel state's short channel ID by loading it from disk.
|
|
|
|
// This ensures that the channel state accurately reflects the updated
|
|
|
|
// short channel ID.
|
2022-04-04 22:09:15 +02:00
|
|
|
err := l.channel.State().Refresh()
|
2018-05-02 01:29:47 +02:00
|
|
|
if err != nil {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Errorf("unable to refresh short_chan_id for chan_id=%v: "+
|
|
|
|
"%v", chanID, err)
|
2019-08-30 23:11:38 +02:00
|
|
|
return hop.Source, err
|
2018-05-02 01:29:47 +02:00
|
|
|
}
|
|
|
|
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
return hop.Source, nil
|
2017-06-16 23:32:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ChanID returns the channel ID for the channel link. The channel ID is a more
|
|
|
|
// compact representation of a channel's full outpoint.
|
2017-06-01 01:43:37 +02:00
|
|
|
//
|
2017-05-03 16:07:55 +02:00
|
|
|
// NOTE: Part of the ChannelLink interface.
|
|
|
|
func (l *channelLink) ChanID() lnwire.ChannelID {
|
2024-01-29 22:19:15 +01:00
|
|
|
return lnwire.NewChanIDFromOutPoint(l.channel.ChannelPoint())
|
2017-05-03 16:07:55 +02:00
|
|
|
}
|
|
|
|
|
2017-09-25 21:31:52 +02:00
|
|
|
// Bandwidth returns the total amount that can flow through the channel link at
|
2017-11-10 23:57:59 +01:00
|
|
|
// this given instance. The value returned is expressed in millisatoshi and can
|
|
|
|
// be used by callers when making forwarding decisions to determine if a link
|
|
|
|
// can accept an HTLC.
|
2017-06-01 01:43:37 +02:00
|
|
|
//
|
2017-05-03 16:07:55 +02:00
|
|
|
// NOTE: Part of the ChannelLink interface.
|
2017-08-22 08:36:43 +02:00
|
|
|
func (l *channelLink) Bandwidth() lnwire.MilliSatoshi {
|
2020-02-19 12:27:42 +01:00
|
|
|
// Get the balance available on the channel for new HTLCs. This takes
|
|
|
|
// the channel reserve into account so HTLCs up to this value won't
|
|
|
|
// violate it.
|
2020-04-07 20:56:47 +02:00
|
|
|
return l.channel.AvailableBalance()
|
2017-05-03 16:07:55 +02:00
|
|
|
}
|
|
|
|
|
2021-10-19 09:37:47 +02:00
|
|
|
// MayAddOutgoingHtlc indicates whether we can add an outgoing htlc with the
|
|
|
|
// amount provided to the link. This check does not reserve a space, since
|
|
|
|
// forwards or other payments may use the available slot, so it should be
|
|
|
|
// considered best-effort.
|
|
|
|
func (l *channelLink) MayAddOutgoingHtlc(amt lnwire.MilliSatoshi) error {
|
|
|
|
return l.channel.MayAddOutgoingHtlc(amt)
|
2021-06-22 13:56:08 +02:00
|
|
|
}
|
|
|
|
|
2021-09-28 17:43:51 +02:00
|
|
|
// getDustSum is a wrapper method that calls the underlying channel's dust sum
|
|
|
|
// method.
|
|
|
|
//
|
|
|
|
// NOTE: Part of the dustHandler interface.
|
2024-07-31 02:05:04 +02:00
|
|
|
func (l *channelLink) getDustSum(whoseCommit lntypes.ChannelParty,
|
2024-06-03 18:43:33 +02:00
|
|
|
dryRunFee fn.Option[chainfee.SatPerKWeight]) lnwire.MilliSatoshi {
|
|
|
|
|
2024-07-31 02:05:04 +02:00
|
|
|
return l.channel.GetDustSum(whoseCommit, dryRunFee)
|
2021-09-28 17:43:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// getFeeRate is a wrapper method that retrieves the underlying channel's
|
|
|
|
// feerate.
|
|
|
|
//
|
|
|
|
// NOTE: Part of the dustHandler interface.
|
|
|
|
func (l *channelLink) getFeeRate() chainfee.SatPerKWeight {
|
|
|
|
return l.channel.CommitFeeRate()
|
|
|
|
}
|
|
|
|
|
|
|
|
// getDustClosure returns a closure that can be used by the switch or mailbox
|
|
|
|
// to evaluate whether a given HTLC is dust.
|
|
|
|
//
|
|
|
|
// NOTE: Part of the dustHandler interface.
|
|
|
|
func (l *channelLink) getDustClosure() dustClosure {
|
|
|
|
localDustLimit := l.channel.State().LocalChanCfg.DustLimit
|
|
|
|
remoteDustLimit := l.channel.State().RemoteChanCfg.DustLimit
|
|
|
|
chanType := l.channel.State().ChanType
|
|
|
|
|
|
|
|
return dustHelper(chanType, localDustLimit, remoteDustLimit)
|
|
|
|
}
|
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
// getCommitFee returns either the local or remote CommitFee in satoshis. This
|
|
|
|
// is used so that the Switch can have access to the commitment fee without
|
|
|
|
// needing to have a *LightningChannel. This doesn't include dust.
|
|
|
|
//
|
|
|
|
// NOTE: Part of the dustHandler interface.
|
|
|
|
func (l *channelLink) getCommitFee(remote bool) btcutil.Amount {
|
|
|
|
if remote {
|
|
|
|
return l.channel.State().RemoteCommitment.CommitFee
|
|
|
|
}
|
|
|
|
|
|
|
|
return l.channel.State().LocalCommitment.CommitFee
|
|
|
|
}
|
|
|
|
|
|
|
|
// exceedsFeeExposureLimit returns whether or not the new proposed fee-rate
|
|
|
|
// increases the total dust and fees within the channel past the configured
|
|
|
|
// fee threshold. It first calculates the dust sum over every update in the
|
|
|
|
// update log with the proposed fee-rate and taking into account both the local
|
|
|
|
// and remote dust limits. It uses every update in the update log instead of
|
|
|
|
// what is actually on the local and remote commitments because it is assumed
|
|
|
|
// that in a worst-case scenario, every update in the update log could
|
|
|
|
// theoretically be on either commitment transaction and this needs to be
|
|
|
|
// accounted for with this fee-rate. It then calculates the local and remote
|
|
|
|
// commitment fees given the proposed fee-rate. Finally, it tallies the results
|
|
|
|
// and determines if the fee threshold has been exceeded.
|
|
|
|
func (l *channelLink) exceedsFeeExposureLimit(
|
|
|
|
feePerKw chainfee.SatPerKWeight) (bool, error) {
|
|
|
|
|
|
|
|
dryRunFee := fn.Some[chainfee.SatPerKWeight](feePerKw)
|
|
|
|
|
|
|
|
// Get the sum of dust for both the local and remote commitments using
|
|
|
|
// this "dry-run" fee.
|
2024-07-31 02:05:04 +02:00
|
|
|
localDustSum := l.getDustSum(lntypes.Local, dryRunFee)
|
|
|
|
remoteDustSum := l.getDustSum(lntypes.Remote, dryRunFee)
|
2024-06-03 18:43:33 +02:00
|
|
|
|
|
|
|
// Calculate the local and remote commitment fees using this dry-run
|
|
|
|
// fee.
|
|
|
|
localFee, remoteFee, err := l.channel.CommitFeeTotalAt(feePerKw)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, check whether the max fee exposure was exceeded on either
|
|
|
|
// future commitment transaction with the fee-rate.
|
|
|
|
totalLocalDust := localDustSum + lnwire.NewMSatFromSatoshis(localFee)
|
|
|
|
if totalLocalDust > l.cfg.MaxFeeExposure {
|
2024-10-12 18:30:34 +02:00
|
|
|
l.log.Debugf("ChannelLink(%v): exceeds fee exposure limit: "+
|
|
|
|
"local dust: %v, local fee: %v", l.ShortChanID(),
|
|
|
|
totalLocalDust, localFee)
|
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
totalRemoteDust := remoteDustSum + lnwire.NewMSatFromSatoshis(
|
|
|
|
remoteFee,
|
|
|
|
)
|
|
|
|
|
2024-10-12 18:30:34 +02:00
|
|
|
if totalRemoteDust > l.cfg.MaxFeeExposure {
|
|
|
|
l.log.Debugf("ChannelLink(%v): exceeds fee exposure limit: "+
|
|
|
|
"remote dust: %v, remote fee: %v", l.ShortChanID(),
|
|
|
|
totalRemoteDust, remoteFee)
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return false, nil
|
2024-06-03 18:43:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// isOverexposedWithHtlc calculates whether the proposed HTLC will make the
|
|
|
|
// channel exceed the fee threshold. It first fetches the largest fee-rate that
|
|
|
|
// may be on any unrevoked commitment transaction. Then, using this fee-rate,
|
|
|
|
// determines if the to-be-added HTLC is dust. If the HTLC is dust, it adds to
|
|
|
|
// the overall dust sum. If it is not dust, it contributes to weight, which
|
|
|
|
// also adds to the overall dust sum by an increase in fees. If the dust sum on
|
|
|
|
// either commitment exceeds the configured fee threshold, this function
|
|
|
|
// returns true.
|
|
|
|
func (l *channelLink) isOverexposedWithHtlc(htlc *lnwire.UpdateAddHTLC,
|
|
|
|
incoming bool) bool {
|
|
|
|
|
|
|
|
dustClosure := l.getDustClosure()
|
|
|
|
|
|
|
|
feeRate := l.channel.WorstCaseFeeRate()
|
|
|
|
|
|
|
|
amount := htlc.Amount.ToSatoshis()
|
|
|
|
|
|
|
|
// See if this HTLC is dust on both the local and remote commitments.
|
2024-07-31 02:05:04 +02:00
|
|
|
isLocalDust := dustClosure(feeRate, incoming, lntypes.Local, amount)
|
|
|
|
isRemoteDust := dustClosure(feeRate, incoming, lntypes.Remote, amount)
|
2024-06-03 18:43:33 +02:00
|
|
|
|
|
|
|
// Calculate the dust sum for the local and remote commitments.
|
2024-07-31 02:05:04 +02:00
|
|
|
localDustSum := l.getDustSum(
|
|
|
|
lntypes.Local, fn.None[chainfee.SatPerKWeight](),
|
|
|
|
)
|
|
|
|
remoteDustSum := l.getDustSum(
|
|
|
|
lntypes.Remote, fn.None[chainfee.SatPerKWeight](),
|
|
|
|
)
|
2024-06-03 18:43:33 +02:00
|
|
|
|
|
|
|
// Grab the larger of the local and remote commitment fees w/o dust.
|
|
|
|
commitFee := l.getCommitFee(false)
|
|
|
|
|
|
|
|
if l.getCommitFee(true) > commitFee {
|
|
|
|
commitFee = l.getCommitFee(true)
|
|
|
|
}
|
|
|
|
|
2024-10-12 18:30:34 +02:00
|
|
|
commitFeeMSat := lnwire.NewMSatFromSatoshis(commitFee)
|
|
|
|
|
|
|
|
localDustSum += commitFeeMSat
|
|
|
|
remoteDustSum += commitFeeMSat
|
2024-06-03 18:43:33 +02:00
|
|
|
|
|
|
|
// Calculate the additional fee increase if this is a non-dust HTLC.
|
|
|
|
weight := lntypes.WeightUnit(input.HTLCWeight)
|
|
|
|
additional := lnwire.NewMSatFromSatoshis(
|
|
|
|
feeRate.FeeForWeight(weight),
|
|
|
|
)
|
|
|
|
|
|
|
|
if isLocalDust {
|
|
|
|
// If this is dust, it doesn't contribute to weight but does
|
|
|
|
// contribute to the overall dust sum.
|
|
|
|
localDustSum += lnwire.NewMSatFromSatoshis(amount)
|
|
|
|
} else {
|
|
|
|
// Account for the fee increase that comes with an increase in
|
|
|
|
// weight.
|
|
|
|
localDustSum += additional
|
|
|
|
}
|
|
|
|
|
|
|
|
if localDustSum > l.cfg.MaxFeeExposure {
|
|
|
|
// The max fee exposure was exceeded.
|
2024-10-12 18:30:34 +02:00
|
|
|
l.log.Debugf("ChannelLink(%v): HTLC %v makes the channel "+
|
|
|
|
"overexposed, total local dust: %v (current commit "+
|
|
|
|
"fee: %v)", l.ShortChanID(), htlc, localDustSum)
|
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if isRemoteDust {
|
|
|
|
// If this is dust, it doesn't contribute to weight but does
|
|
|
|
// contribute to the overall dust sum.
|
|
|
|
remoteDustSum += lnwire.NewMSatFromSatoshis(amount)
|
|
|
|
} else {
|
|
|
|
// Account for the fee increase that comes with an increase in
|
|
|
|
// weight.
|
|
|
|
remoteDustSum += additional
|
|
|
|
}
|
|
|
|
|
2024-10-12 18:30:34 +02:00
|
|
|
if remoteDustSum > l.cfg.MaxFeeExposure {
|
|
|
|
// The max fee exposure was exceeded.
|
|
|
|
l.log.Debugf("ChannelLink(%v): HTLC %v makes the channel "+
|
|
|
|
"overexposed, total remote dust: %v (current commit "+
|
|
|
|
"fee: %v)", l.ShortChanID(), htlc, remoteDustSum)
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
2024-06-03 18:43:33 +02:00
|
|
|
}
|
|
|
|
|
2021-09-28 17:37:37 +02:00
|
|
|
// dustClosure is a function that evaluates whether an HTLC is dust. It returns
|
|
|
|
// true if the HTLC is dust. It takes in a feerate, a boolean denoting whether
|
|
|
|
// the HTLC is incoming (i.e. one that the remote sent), a boolean denoting
|
|
|
|
// whether to evaluate on the local or remote commit, and finally an HTLC
|
|
|
|
// amount to test.
|
2024-07-31 02:05:04 +02:00
|
|
|
type dustClosure func(feerate chainfee.SatPerKWeight, incoming bool,
|
|
|
|
whoseCommit lntypes.ChannelParty, amt btcutil.Amount) bool
|
2021-09-28 17:37:37 +02:00
|
|
|
|
|
|
|
// dustHelper is used to construct the dustClosure.
|
|
|
|
func dustHelper(chantype channeldb.ChannelType, localDustLimit,
|
|
|
|
remoteDustLimit btcutil.Amount) dustClosure {
|
|
|
|
|
2024-07-31 02:05:04 +02:00
|
|
|
isDust := func(feerate chainfee.SatPerKWeight, incoming bool,
|
|
|
|
whoseCommit lntypes.ChannelParty, amt btcutil.Amount) bool {
|
2021-09-28 17:37:37 +02:00
|
|
|
|
2024-07-31 02:05:04 +02:00
|
|
|
var dustLimit btcutil.Amount
|
|
|
|
if whoseCommit.IsLocal() {
|
|
|
|
dustLimit = localDustLimit
|
|
|
|
} else {
|
|
|
|
dustLimit = remoteDustLimit
|
2021-09-28 17:37:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return lnwallet.HtlcIsDust(
|
2024-07-31 02:05:04 +02:00
|
|
|
chantype, incoming, whoseCommit, feerate, amt,
|
|
|
|
dustLimit,
|
2021-09-28 17:37:37 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
return isDust
|
|
|
|
}
|
|
|
|
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
// zeroConfConfirmed returns whether or not the zero-conf channel has
|
|
|
|
// confirmed on-chain.
|
|
|
|
//
|
|
|
|
// Part of the scidAliasHandler interface.
|
|
|
|
func (l *channelLink) zeroConfConfirmed() bool {
|
|
|
|
return l.channel.State().ZeroConfConfirmed()
|
|
|
|
}
|
|
|
|
|
|
|
|
// confirmedScid returns the confirmed SCID for a zero-conf channel. This
|
|
|
|
// should not be called for non-zero-conf channels.
|
|
|
|
//
|
|
|
|
// Part of the scidAliasHandler interface.
|
|
|
|
func (l *channelLink) confirmedScid() lnwire.ShortChannelID {
|
|
|
|
return l.channel.State().ZeroConfRealScid()
|
|
|
|
}
|
|
|
|
|
|
|
|
// isZeroConf returns whether or not the underlying channel is a zero-conf
|
|
|
|
// channel.
|
|
|
|
//
|
|
|
|
// Part of the scidAliasHandler interface.
|
|
|
|
func (l *channelLink) isZeroConf() bool {
|
|
|
|
return l.channel.State().IsZeroConf()
|
|
|
|
}
|
|
|
|
|
|
|
|
// negotiatedAliasFeature returns whether or not the underlying channel has
|
|
|
|
// negotiated the option-scid-alias feature bit. This will be true for both
|
|
|
|
// option-scid-alias and zero-conf channel-types. It will also be true for
|
|
|
|
// channels with the feature bit but without the above channel-types.
|
|
|
|
//
|
|
|
|
// Part of the scidAliasFeature interface.
|
|
|
|
func (l *channelLink) negotiatedAliasFeature() bool {
|
|
|
|
return l.channel.State().NegotiatedAliasFeature()
|
|
|
|
}
|
|
|
|
|
|
|
|
// getAliases returns the set of aliases for the underlying channel.
|
|
|
|
//
|
|
|
|
// Part of the scidAliasHandler interface.
|
|
|
|
func (l *channelLink) getAliases() []lnwire.ShortChannelID {
|
|
|
|
return l.cfg.GetAliases(l.ShortChanID())
|
|
|
|
}
|
|
|
|
|
|
|
|
// attachFailAliasUpdate sets the link's FailAliasUpdate function.
|
|
|
|
//
|
|
|
|
// Part of the scidAliasHandler interface.
|
|
|
|
func (l *channelLink) attachFailAliasUpdate(closure func(
|
2024-08-21 08:39:37 +02:00
|
|
|
sid lnwire.ShortChannelID, incoming bool) *lnwire.ChannelUpdate1) {
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
|
|
|
|
l.Lock()
|
|
|
|
l.cfg.FailAliasUpdate = closure
|
|
|
|
l.Unlock()
|
|
|
|
}
|
|
|
|
|
2018-03-13 02:52:52 +01:00
|
|
|
// AttachMailBox updates the current mailbox used by this link, and hooks up
|
|
|
|
// the mailbox's message and packet outboxes to the link's upstream and
|
|
|
|
// downstream chans, respectively.
|
2017-11-27 08:20:17 +01:00
|
|
|
func (l *channelLink) AttachMailBox(mailbox MailBox) {
|
|
|
|
l.Lock()
|
|
|
|
l.mailBox = mailbox
|
|
|
|
l.upstream = mailbox.MessageOutBox()
|
|
|
|
l.downstream = mailbox.PacketOutBox()
|
|
|
|
l.Unlock()
|
2021-09-28 17:43:51 +02:00
|
|
|
|
|
|
|
// Set the mailbox's fee rate. This may be refreshing a feerate that was
|
|
|
|
// never committed.
|
|
|
|
l.mailBox.SetFeeRate(l.getFeeRate())
|
|
|
|
|
|
|
|
// Also set the mailbox's dust closure so that it can query whether HTLC's
|
|
|
|
// are dust given the current feerate.
|
|
|
|
l.mailBox.SetDustClosure(l.getDustClosure())
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
|
2017-06-17 00:01:00 +02:00
|
|
|
// UpdateForwardingPolicy updates the forwarding policy for the target
|
|
|
|
// ChannelLink. Once updated, the link will use the new forwarding policy to
|
2019-08-30 02:54:08 +02:00
|
|
|
// govern if it an incoming HTLC should be forwarded or not. We assume that
|
|
|
|
// fields that are zero are intentionally set to zero, so we'll use newPolicy to
|
|
|
|
// update all of the link's FwrdingPolicy's values.
|
2017-06-17 00:01:00 +02:00
|
|
|
//
|
|
|
|
// NOTE: Part of the ChannelLink interface.
|
2023-07-17 12:53:24 +02:00
|
|
|
func (l *channelLink) UpdateForwardingPolicy(
|
|
|
|
newPolicy models.ForwardingPolicy) {
|
|
|
|
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
|
|
|
|
2019-08-30 02:54:08 +02:00
|
|
|
l.cfg.FwrdingPolicy = newPolicy
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
}
|
2017-06-17 00:01:00 +02:00
|
|
|
|
2020-01-14 14:07:42 +01:00
|
|
|
// CheckHtlcForward should return a nil error if the passed HTLC details
|
|
|
|
// satisfy the current forwarding policy fo the target link. Otherwise,
|
|
|
|
// a LinkError with a valid protocol failure message should be returned
|
|
|
|
// in order to signal to the source of the HTLC, the policy consistency
|
|
|
|
// issue.
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
//
|
|
|
|
// NOTE: Part of the ChannelLink interface.
|
2019-09-27 16:21:34 +02:00
|
|
|
func (l *channelLink) CheckHtlcForward(payHash [32]byte,
|
2018-06-26 05:15:46 +02:00
|
|
|
incomingHtlcAmt, amtToForward lnwire.MilliSatoshi,
|
|
|
|
incomingTimeout, outgoingTimeout uint32,
|
2022-09-19 12:06:34 +02:00
|
|
|
inboundFee models.InboundFee,
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
heightNow uint32, originalScid lnwire.ShortChannelID) *LinkError {
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
|
|
|
|
l.RLock()
|
2018-06-07 03:07:20 +02:00
|
|
|
policy := l.cfg.FwrdingPolicy
|
|
|
|
l.RUnlock()
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
|
2022-09-19 12:06:34 +02:00
|
|
|
// Using the outgoing HTLC amount, we'll calculate the outgoing
|
|
|
|
// fee this incoming HTLC must carry in order to satisfy the constraints
|
|
|
|
// of the outgoing link.
|
|
|
|
outFee := ExpectedFee(policy, amtToForward)
|
|
|
|
|
|
|
|
// Then calculate the inbound fee that we charge based on the sum of
|
|
|
|
// outgoing HTLC amount and outgoing fee.
|
|
|
|
inFee := inboundFee.CalcFee(amtToForward + outFee)
|
|
|
|
|
|
|
|
// Add up both fee components. It is important to calculate both fees
|
|
|
|
// separately. An alternative way of calculating is to first determine
|
|
|
|
// an aggregate fee and apply that to the outgoing HTLC amount. However,
|
|
|
|
// rounding may cause the result to be slightly higher than in the case
|
|
|
|
// of separately rounded fee components. This potentially causes failed
|
|
|
|
// forwards for senders and is something to be avoided.
|
|
|
|
expectedFee := inFee + int64(outFee)
|
2019-04-19 11:11:16 +02:00
|
|
|
|
|
|
|
// If the actual fee is less than our expected fee, then we'll reject
|
|
|
|
// this HTLC as it didn't provide a sufficient amount of fees, or the
|
|
|
|
// values have been tampered with, or the send used incorrect/dated
|
|
|
|
// information to construct the forwarding information for this hop. In
|
2022-09-19 12:06:34 +02:00
|
|
|
// any case, we'll cancel this HTLC.
|
|
|
|
actualFee := int64(incomingHtlcAmt) - int64(amtToForward)
|
2019-04-19 11:11:16 +02:00
|
|
|
if incomingHtlcAmt < amtToForward || actualFee < expectedFee {
|
2021-05-17 12:13:59 +02:00
|
|
|
l.log.Warnf("outgoing htlc(%x) has insufficient fee: "+
|
2022-09-19 12:06:34 +02:00
|
|
|
"expected %v, got %v: incoming=%v, outgoing=%v, "+
|
|
|
|
"inboundFee=%v",
|
|
|
|
payHash[:], expectedFee, actualFee,
|
|
|
|
incomingHtlcAmt, amtToForward, inboundFee,
|
|
|
|
)
|
2019-04-19 11:11:16 +02:00
|
|
|
|
|
|
|
// As part of the returned error, we'll send our latest routing
|
|
|
|
// policy so the sending node obtains the most up to date data.
|
2024-08-21 08:39:37 +02:00
|
|
|
cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
return lnwire.NewFeeInsufficient(amtToForward, *upd)
|
|
|
|
}
|
|
|
|
failure := l.createFailureWithUpdate(false, originalScid, cb)
|
2020-01-14 14:07:42 +01:00
|
|
|
return NewLinkError(failure)
|
2019-04-19 11:11:16 +02:00
|
|
|
}
|
|
|
|
|
2023-03-08 07:27:45 +01:00
|
|
|
// Check whether the outgoing htlc satisfies the channel policy.
|
|
|
|
err := l.canSendHtlc(
|
|
|
|
policy, payHash, amtToForward, outgoingTimeout, heightNow,
|
|
|
|
originalScid,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-19 11:11:16 +02:00
|
|
|
// Finally, we'll ensure that the time-lock on the outgoing HTLC meets
|
|
|
|
// the following constraint: the incoming time-lock minus our time-lock
|
|
|
|
// delta should equal the outgoing time lock. Otherwise, whether the
|
|
|
|
// sender messed up, or an intermediate node tampered with the HTLC.
|
|
|
|
timeDelta := policy.TimeLockDelta
|
|
|
|
if incomingTimeout < outgoingTimeout+timeDelta {
|
2021-05-17 12:13:59 +02:00
|
|
|
l.log.Warnf("incoming htlc(%x) has incorrect time-lock value: "+
|
2019-04-19 11:11:16 +02:00
|
|
|
"expected at least %v block delta, got %v block delta",
|
|
|
|
payHash[:], timeDelta, incomingTimeout-outgoingTimeout)
|
|
|
|
|
|
|
|
// Grab the latest routing policy so the sending node is up to
|
|
|
|
// date with our current policy.
|
2024-08-21 08:39:37 +02:00
|
|
|
cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
return lnwire.NewIncorrectCltvExpiry(
|
|
|
|
incomingTimeout, *upd,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
failure := l.createFailureWithUpdate(false, originalScid, cb)
|
2020-01-14 14:07:42 +01:00
|
|
|
return NewLinkError(failure)
|
2019-04-19 11:11:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-14 14:07:42 +01:00
|
|
|
// CheckHtlcTransit should return a nil error if the passed HTLC details
|
|
|
|
// satisfy the current channel policy. Otherwise, a LinkError with a
|
|
|
|
// valid protocol failure message should be returned in order to signal
|
|
|
|
// the violation. This call is intended to be used for locally initiated
|
|
|
|
// payments for which there is no corresponding incoming htlc.
|
2019-09-27 16:21:34 +02:00
|
|
|
func (l *channelLink) CheckHtlcTransit(payHash [32]byte,
|
2019-04-19 11:11:16 +02:00
|
|
|
amt lnwire.MilliSatoshi, timeout uint32,
|
2020-01-14 14:07:42 +01:00
|
|
|
heightNow uint32) *LinkError {
|
2019-04-19 11:11:16 +02:00
|
|
|
|
|
|
|
l.RLock()
|
|
|
|
policy := l.cfg.FwrdingPolicy
|
|
|
|
l.RUnlock()
|
|
|
|
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
// We pass in hop.Source here as this is only used in the Switch when
|
|
|
|
// trying to send over a local link. This causes the fallback mechanism
|
|
|
|
// to occur.
|
2019-09-27 16:21:34 +02:00
|
|
|
return l.canSendHtlc(
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
policy, payHash, amt, timeout, heightNow, hop.Source,
|
2019-04-19 11:11:16 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-05-15 12:07:13 +02:00
|
|
|
// canSendHtlc checks whether the given htlc parameters satisfy
|
2019-04-19 11:11:16 +02:00
|
|
|
// the channel's amount and time lock constraints.
|
2023-07-17 12:53:24 +02:00
|
|
|
func (l *channelLink) canSendHtlc(policy models.ForwardingPolicy,
|
2019-04-19 11:11:16 +02:00
|
|
|
payHash [32]byte, amt lnwire.MilliSatoshi, timeout uint32,
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
heightNow uint32, originalScid lnwire.ShortChannelID) *LinkError {
|
2019-04-19 11:11:16 +02:00
|
|
|
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
// As our first sanity check, we'll ensure that the passed HTLC isn't
|
|
|
|
// too small for the next hop. If so, then we'll cancel the HTLC
|
|
|
|
// directly.
|
2019-11-15 10:09:27 +01:00
|
|
|
if amt < policy.MinHTLCOut {
|
2021-05-17 12:13:59 +02:00
|
|
|
l.log.Warnf("outgoing htlc(%x) is too small: min_htlc=%v, "+
|
2019-11-15 10:09:27 +01:00
|
|
|
"htlc_value=%v", payHash[:], policy.MinHTLCOut,
|
2019-04-19 11:11:16 +02:00
|
|
|
amt)
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
|
|
|
|
// As part of the returned error, we'll send our latest routing
|
|
|
|
// policy so the sending node obtains the most up to date data.
|
2024-08-21 08:39:37 +02:00
|
|
|
cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
return lnwire.NewAmountBelowMinimum(amt, *upd)
|
|
|
|
}
|
|
|
|
failure := l.createFailureWithUpdate(false, originalScid, cb)
|
2020-01-14 14:07:42 +01:00
|
|
|
return NewLinkError(failure)
|
2017-06-17 00:01:00 +02:00
|
|
|
}
|
|
|
|
|
2019-09-27 16:01:18 +02:00
|
|
|
// Next, ensure that the passed HTLC isn't too large. If so, we'll
|
|
|
|
// cancel the HTLC directly.
|
2019-04-19 11:11:16 +02:00
|
|
|
if policy.MaxHTLC != 0 && amt > policy.MaxHTLC {
|
2021-05-17 12:13:59 +02:00
|
|
|
l.log.Warnf("outgoing htlc(%x) is too large: max_htlc=%v, "+
|
2019-04-19 11:11:16 +02:00
|
|
|
"htlc_value=%v", payHash[:], policy.MaxHTLC, amt)
|
2018-12-09 03:21:18 +01:00
|
|
|
|
2019-09-27 16:01:18 +02:00
|
|
|
// As part of the returned error, we'll send our latest routing
|
|
|
|
// policy so the sending node obtains the most up-to-date data.
|
2024-08-21 08:39:37 +02:00
|
|
|
cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
return lnwire.NewTemporaryChannelFailure(upd)
|
|
|
|
}
|
|
|
|
failure := l.createFailureWithUpdate(false, originalScid, cb)
|
2020-02-06 18:35:16 +01:00
|
|
|
return NewDetailedLinkError(failure, OutgoingFailureHTLCExceedsMax)
|
2018-12-09 03:21:18 +01:00
|
|
|
}
|
|
|
|
|
2019-03-26 11:54:08 +01:00
|
|
|
// We want to avoid offering an HTLC which will expire in the near
|
2019-04-05 10:50:00 +02:00
|
|
|
// future, so we'll reject an HTLC if the outgoing expiration time is
|
|
|
|
// too close to the current height.
|
2019-04-19 11:11:16 +02:00
|
|
|
if timeout <= heightNow+l.cfg.OutgoingCltvRejectDelta {
|
2021-05-17 12:13:59 +02:00
|
|
|
l.log.Warnf("htlc(%x) has an expiry that's too soon: "+
|
2018-06-26 05:23:10 +02:00
|
|
|
"outgoing_expiry=%v, best_height=%v", payHash[:],
|
2019-04-19 11:11:16 +02:00
|
|
|
timeout, heightNow)
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
|
2024-08-21 08:39:37 +02:00
|
|
|
cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
return lnwire.NewExpiryTooSoon(*upd)
|
|
|
|
}
|
|
|
|
failure := l.createFailureWithUpdate(false, originalScid, cb)
|
2020-01-14 14:07:42 +01:00
|
|
|
return NewLinkError(failure)
|
2018-06-26 05:23:10 +02:00
|
|
|
}
|
|
|
|
|
2019-04-05 10:50:00 +02:00
|
|
|
// Check absolute max delta.
|
2019-07-27 03:05:58 +02:00
|
|
|
if timeout > l.cfg.MaxOutgoingCltvExpiry+heightNow {
|
2021-05-17 12:13:59 +02:00
|
|
|
l.log.Warnf("outgoing htlc(%x) has a time lock too far in "+
|
2019-10-01 11:06:56 +02:00
|
|
|
"the future: got %v, but maximum is %v", payHash[:],
|
2019-07-27 03:05:58 +02:00
|
|
|
timeout-heightNow, l.cfg.MaxOutgoingCltvExpiry)
|
2018-10-15 08:41:56 +02:00
|
|
|
|
2020-01-14 14:07:42 +01:00
|
|
|
return NewLinkError(&lnwire.FailExpiryTooFar{})
|
2018-10-15 08:41:56 +02:00
|
|
|
}
|
|
|
|
|
2019-09-27 16:49:16 +02:00
|
|
|
// Check to see if there is enough balance in this channel.
|
|
|
|
if amt > l.Bandwidth() {
|
2021-05-17 12:13:59 +02:00
|
|
|
l.log.Warnf("insufficient bandwidth to route htlc: %v is "+
|
2021-05-15 12:07:51 +02:00
|
|
|
"larger than %v", amt, l.Bandwidth())
|
2024-08-21 08:39:37 +02:00
|
|
|
cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage {
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
return lnwire.NewTemporaryChannelFailure(upd)
|
|
|
|
}
|
|
|
|
failure := l.createFailureWithUpdate(false, originalScid, cb)
|
2020-01-14 14:07:42 +01:00
|
|
|
return NewDetailedLinkError(
|
2020-02-06 18:35:16 +01:00
|
|
|
failure, OutgoingFailureInsufficientBalance,
|
2020-01-14 14:07:42 +01:00
|
|
|
)
|
2019-09-27 16:49:16 +02:00
|
|
|
}
|
|
|
|
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
return nil
|
2017-06-17 00:01:00 +02:00
|
|
|
}
|
|
|
|
|
2017-06-01 01:43:37 +02:00
|
|
|
// Stats returns the statistics of channel link.
|
|
|
|
//
|
2017-05-03 16:07:55 +02:00
|
|
|
// NOTE: Part of the ChannelLink interface.
|
2017-08-22 08:36:43 +02:00
|
|
|
func (l *channelLink) Stats() (uint64, lnwire.MilliSatoshi, lnwire.MilliSatoshi) {
|
2017-05-03 16:07:55 +02:00
|
|
|
snapshot := l.channel.StateSnapshot()
|
2017-08-22 08:50:12 +02:00
|
|
|
|
2017-11-10 23:58:28 +01:00
|
|
|
return snapshot.ChannelCommitment.CommitHeight,
|
|
|
|
snapshot.TotalMSatSent,
|
|
|
|
snapshot.TotalMSatReceived
|
2017-05-03 16:07:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// String returns the string representation of channel link.
|
2017-06-01 01:43:37 +02:00
|
|
|
//
|
2017-05-03 16:07:55 +02:00
|
|
|
// NOTE: Part of the ChannelLink interface.
|
|
|
|
func (l *channelLink) String() string {
|
|
|
|
return l.channel.ChannelPoint().String()
|
|
|
|
}
|
|
|
|
|
2021-08-03 20:59:15 +02:00
|
|
|
// handleSwitchPacket handles the switch packets. This packets which might be
|
2017-06-01 01:43:37 +02:00
|
|
|
// forwarded to us from another channel link in case the htlc update came from
|
|
|
|
// another peer or if the update was created by user
|
|
|
|
//
|
2021-08-03 20:59:15 +02:00
|
|
|
// NOTE: Part of the packetHandler interface.
|
|
|
|
func (l *channelLink) handleSwitchPacket(pkt *htlcPacket) error {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Tracef("received switch packet inkey=%v, outkey=%v",
|
2017-11-27 08:20:17 +01:00
|
|
|
pkt.inKey(), pkt.outKey())
|
2018-03-13 02:52:52 +01:00
|
|
|
|
2020-04-15 07:33:53 +02:00
|
|
|
return l.mailBox.AddPacket(pkt)
|
2017-05-03 16:07:55 +02:00
|
|
|
}
|
|
|
|
|
2017-06-01 01:43:37 +02:00
|
|
|
// HandleChannelUpdate handles the htlc requests as settle/add/fail which sent
|
|
|
|
// to us from remote peer we have a channel with.
|
|
|
|
//
|
2017-05-03 16:07:55 +02:00
|
|
|
// NOTE: Part of the ChannelLink interface.
|
|
|
|
func (l *channelLink) HandleChannelUpdate(message lnwire.Message) {
|
2023-02-16 06:25:53 +01:00
|
|
|
select {
|
2024-10-17 13:38:34 +02:00
|
|
|
case <-l.Quit:
|
2023-02-16 06:25:53 +01:00
|
|
|
// Return early if the link is already in the process of
|
|
|
|
// quitting. It doesn't make sense to hand the message to the
|
|
|
|
// mailbox here.
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2024-01-12 01:03:03 +01:00
|
|
|
err := l.mailBox.AddMessage(message)
|
|
|
|
if err != nil {
|
|
|
|
l.log.Errorf("failed to add Message to mailbox: %v", err)
|
|
|
|
}
|
2017-05-03 16:07:55 +02:00
|
|
|
}
|
2017-05-03 17:57:13 +02:00
|
|
|
|
2017-07-14 20:40:42 +02:00
|
|
|
// updateChannelFee updates the commitment fee-per-kw on this channel by
|
|
|
|
// committing to an update_fee message.
|
2019-10-31 03:43:05 +01:00
|
|
|
func (l *channelLink) updateChannelFee(feePerKw chainfee.SatPerKWeight) error {
|
2023-05-04 15:38:51 +02:00
|
|
|
l.log.Infof("updating commit fee to %v", feePerKw)
|
2017-11-24 05:31:45 +01:00
|
|
|
|
2017-12-11 01:19:13 +01:00
|
|
|
// We skip sending the UpdateFee message if the channel is not
|
2018-02-07 04:11:11 +01:00
|
|
|
// currently eligible to forward messages.
|
2023-11-26 21:43:21 +01:00
|
|
|
if !l.EligibleToUpdate() {
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Debugf("skipping fee update for inactive channel")
|
2017-12-11 01:19:13 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
// Check and see if our proposed fee-rate would make us exceed the fee
|
|
|
|
// threshold.
|
|
|
|
thresholdExceeded, err := l.exceedsFeeExposureLimit(feePerKw)
|
|
|
|
if err != nil {
|
|
|
|
// This shouldn't typically happen. If it does, it indicates
|
|
|
|
// something is wrong with our channel state.
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if thresholdExceeded {
|
|
|
|
return fmt.Errorf("link fee threshold exceeded")
|
|
|
|
}
|
|
|
|
|
2017-11-24 05:31:45 +01:00
|
|
|
// First, we'll update the local fee on our commitment.
|
2017-07-14 20:40:42 +02:00
|
|
|
if err := l.channel.UpdateFee(feePerKw); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-09-28 17:43:51 +02:00
|
|
|
// The fee passed the channel's validation checks, so we update the
|
|
|
|
// mailbox feerate.
|
|
|
|
l.mailBox.SetFeeRate(feePerKw)
|
|
|
|
|
2017-11-24 05:31:45 +01:00
|
|
|
// We'll then attempt to send a new UpdateFee message, and also lock it
|
|
|
|
// in immediately by triggering a commitment update.
|
2017-12-01 07:17:48 +01:00
|
|
|
msg := lnwire.NewUpdateFee(l.ChanID(), uint32(feePerKw))
|
2018-06-08 05:17:15 +02:00
|
|
|
if err := l.cfg.Peer.SendMessage(false, msg); err != nil {
|
2017-11-24 05:31:45 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return l.updateCommitTx()
|
2017-07-14 20:40:42 +02:00
|
|
|
}
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
// processRemoteSettleFails accepts a batch of settle/fail payment descriptors
|
|
|
|
// after receiving a revocation from the remote party, and reprocesses them in
|
|
|
|
// the context of the provided forwarding package. Any settles or fails that
|
|
|
|
// have already been acknowledged in the forwarding package will not be sent to
|
|
|
|
// the switch.
|
2024-08-16 22:28:44 +02:00
|
|
|
func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg) {
|
|
|
|
if len(fwdPkg.SettleFails) == 0 {
|
2017-11-27 08:20:17 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-11-14 20:58:54 +01:00
|
|
|
l.log.Debugf("settle-fail-filter: %v", fwdPkg.SettleFailFilter)
|
2017-11-27 08:20:17 +01:00
|
|
|
|
|
|
|
var switchPackets []*htlcPacket
|
2024-08-16 22:28:44 +02:00
|
|
|
for i, update := range fwdPkg.SettleFails {
|
|
|
|
destRef := fwdPkg.DestRef(uint16(i))
|
|
|
|
|
2018-03-13 02:52:52 +01:00
|
|
|
// Skip any settles or fails that have already been
|
|
|
|
// acknowledged by the incoming link that originated the
|
|
|
|
// forwarded Add.
|
2017-11-27 08:20:17 +01:00
|
|
|
if fwdPkg.SettleFailFilter.Contains(uint16(i)) {
|
|
|
|
continue
|
|
|
|
}
|
2017-05-03 17:57:13 +02:00
|
|
|
|
2018-03-13 02:52:52 +01:00
|
|
|
// TODO(roasbeef): rework log entries to a shared
|
|
|
|
// interface.
|
|
|
|
|
2024-08-16 22:28:44 +02:00
|
|
|
switch msg := update.UpdateMsg.(type) {
|
2017-06-16 23:58:02 +02:00
|
|
|
// A settle for an HTLC we previously forwarded HTLC has been
|
2018-02-28 07:14:44 +01:00
|
|
|
// received. So we'll forward the HTLC to the switch which will
|
|
|
|
// handle propagating the settle to the prior hop.
|
2024-08-16 22:28:44 +02:00
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
2018-04-27 11:51:13 +02:00
|
|
|
// If hodl.SettleIncoming is requested, we will not
|
|
|
|
// forward the SETTLE to the switch and will not signal
|
|
|
|
// a free slot on the commitment transaction.
|
2019-08-14 19:57:31 +02:00
|
|
|
if l.cfg.HodlMask.Active(hodl.SettleIncoming) {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Warnf(hodl.SettleIncoming.Warning())
|
2018-04-27 11:51:13 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-10-24 08:18:26 +02:00
|
|
|
settlePacket := &htlcPacket{
|
2017-10-30 19:56:51 +01:00
|
|
|
outgoingChanID: l.ShortChanID(),
|
2024-08-16 22:28:44 +02:00
|
|
|
outgoingHTLCID: msg.ID,
|
|
|
|
destRef: &destRef,
|
|
|
|
htlc: msg,
|
2017-06-16 23:58:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add the packet to the batch to be forwarded, and
|
|
|
|
// notify the overflow queue that a spare spot has been
|
|
|
|
// freed up within the commitment state.
|
2017-11-27 08:20:17 +01:00
|
|
|
switchPackets = append(switchPackets, settlePacket)
|
2017-05-03 17:57:13 +02:00
|
|
|
|
2018-02-28 07:14:44 +01:00
|
|
|
// A failureCode message for a previously forwarded HTLC has
|
|
|
|
// been received. As a result a new slot will be freed up in
|
|
|
|
// our commitment state, so we'll forward this to the switch so
|
|
|
|
// the backwards undo can continue.
|
2024-08-16 22:28:44 +02:00
|
|
|
case *lnwire.UpdateFailHTLC:
|
2018-04-27 11:51:13 +02:00
|
|
|
// If hodl.SettleIncoming is requested, we will not
|
|
|
|
// forward the FAIL to the switch and will not signal a
|
|
|
|
// free slot on the commitment transaction.
|
2019-08-14 19:57:31 +02:00
|
|
|
if l.cfg.HodlMask.Active(hodl.FailIncoming) {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Warnf(hodl.FailIncoming.Warning())
|
2018-04-27 11:51:13 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-10-03 17:22:43 +02:00
|
|
|
// Fetch the reason the HTLC was canceled so we can
|
2020-02-06 18:35:17 +01:00
|
|
|
// continue to propagate it. This failure originated
|
|
|
|
// from another node, so the linkFailure field is not
|
|
|
|
// set on the packet.
|
2017-10-24 08:18:26 +02:00
|
|
|
failPacket := &htlcPacket{
|
2017-10-30 19:56:51 +01:00
|
|
|
outgoingChanID: l.ShortChanID(),
|
2024-08-16 22:28:44 +02:00
|
|
|
outgoingHTLCID: msg.ID,
|
|
|
|
destRef: &destRef,
|
|
|
|
htlc: msg,
|
2017-06-16 23:58:02 +02:00
|
|
|
}
|
|
|
|
|
2024-08-16 22:28:44 +02:00
|
|
|
l.log.Debugf("Failed to send HTLC with ID=%d", msg.ID)
|
2021-07-08 20:32:42 +02:00
|
|
|
|
2019-05-01 03:20:23 +02:00
|
|
|
// If the failure message lacks an HMAC (but includes
|
|
|
|
// the 4 bytes for encoding the message and padding
|
|
|
|
// lengths, then this means that we received it as an
|
|
|
|
// UpdateFailMalformedHTLC. As a result, we'll signal
|
|
|
|
// that we need to convert this error within the switch
|
|
|
|
// to an actual error, by encrypting it as if we were
|
|
|
|
// the originating hop.
|
|
|
|
convertedErrorSize := lnwire.FailureMessageLength + 4
|
2024-08-16 22:28:44 +02:00
|
|
|
if len(msg.Reason) == convertedErrorSize {
|
2019-05-01 03:20:23 +02:00
|
|
|
failPacket.convertedError = true
|
|
|
|
}
|
|
|
|
|
2017-06-16 23:58:02 +02:00
|
|
|
// Add the packet to the batch to be forwarded, and
|
|
|
|
// notify the overflow queue that a spare spot has been
|
|
|
|
// freed up within the commitment state.
|
2017-11-27 08:20:17 +01:00
|
|
|
switchPackets = append(switchPackets, failPacket)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-08 05:55:07 +02:00
|
|
|
// Only spawn the task forward packets we have a non-zero number.
|
|
|
|
if len(switchPackets) > 0 {
|
2022-02-03 15:34:25 +01:00
|
|
|
go l.forwardBatch(false, switchPackets...)
|
2018-05-08 05:55:07 +02:00
|
|
|
}
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// processRemoteAdds serially processes each of the Add payment descriptors
|
|
|
|
// which have been "locked-in" by receiving a revocation from the remote party.
|
|
|
|
// The forwarding package provided instructs how to process this batch,
|
|
|
|
// indicating whether this is the first time these Adds are being processed, or
|
|
|
|
// whether we are reprocessing as a result of a failure or restart. Adds that
|
|
|
|
// have already been acknowledged in the forwarding package will be ignored.
|
2024-08-16 22:19:36 +02:00
|
|
|
//
|
|
|
|
//nolint:funlen
|
|
|
|
func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg) {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Tracef("processing %d remote adds for height %d",
|
2024-08-16 22:19:36 +02:00
|
|
|
len(fwdPkg.Adds), fwdPkg.Height)
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2019-09-05 13:35:39 +02:00
|
|
|
decodeReqs := make(
|
2024-08-16 22:19:36 +02:00
|
|
|
[]hop.DecodeHopIteratorRequest, 0, len(fwdPkg.Adds),
|
2019-09-05 13:35:39 +02:00
|
|
|
)
|
2024-08-16 22:19:36 +02:00
|
|
|
for _, update := range fwdPkg.Adds {
|
|
|
|
if msg, ok := update.UpdateMsg.(*lnwire.UpdateAddHTLC); ok {
|
2017-11-27 08:20:17 +01:00
|
|
|
// Before adding the new htlc to the state machine,
|
|
|
|
// parse the onion object in order to obtain the
|
|
|
|
// routing information with DecodeHopIterator function
|
|
|
|
// which process the Sphinx packet.
|
2024-08-16 22:19:36 +02:00
|
|
|
onionReader := bytes.NewReader(msg.OnionBlob[:])
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2019-09-05 13:35:39 +02:00
|
|
|
req := hop.DecodeHopIteratorRequest{
|
2023-01-27 20:19:56 +01:00
|
|
|
OnionReader: onionReader,
|
2024-08-16 22:19:36 +02:00
|
|
|
RHash: msg.PaymentHash[:],
|
|
|
|
IncomingCltv: msg.Expiry,
|
|
|
|
IncomingAmount: msg.Amount,
|
|
|
|
BlindingPoint: msg.BlindingPoint,
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
decodeReqs = append(decodeReqs, req)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Atomically decode the incoming htlcs, simultaneously checking for
|
|
|
|
// replay attempts. A particular index in the returned, spare list of
|
2018-03-13 02:52:52 +01:00
|
|
|
// channel iterators should only be used if the failure code at the
|
|
|
|
// same index is lnwire.FailCodeNone.
|
2017-11-27 08:20:17 +01:00
|
|
|
decodeResps, sphinxErr := l.cfg.DecodeHopIterators(
|
|
|
|
fwdPkg.ID(), decodeReqs,
|
|
|
|
)
|
|
|
|
if sphinxErr != nil {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInternalError},
|
2018-05-09 15:49:58 +02:00
|
|
|
"unable to decode hop iterators: %v", sphinxErr)
|
2019-04-10 13:10:25 +02:00
|
|
|
return
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
|
2019-04-10 13:10:25 +02:00
|
|
|
var switchPackets []*htlcPacket
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2024-08-16 22:19:36 +02:00
|
|
|
for i, update := range fwdPkg.Adds {
|
2017-11-27 08:20:17 +01:00
|
|
|
idx := uint16(i)
|
|
|
|
|
2024-08-16 22:19:36 +02:00
|
|
|
//nolint:forcetypeassert
|
|
|
|
add := *update.UpdateMsg.(*lnwire.UpdateAddHTLC)
|
|
|
|
sourceRef := fwdPkg.SourceRef(idx)
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
if fwdPkg.State == channeldb.FwdStateProcessed &&
|
|
|
|
fwdPkg.AckFilter.Contains(idx) {
|
|
|
|
|
2018-03-13 02:52:52 +01:00
|
|
|
// If this index is already found in the ack filter,
|
|
|
|
// the response to this forwarding decision has already
|
|
|
|
// been committed by one of our commitment txns. ADDs
|
|
|
|
// in this state are waiting for the rest of the fwding
|
|
|
|
// package to get acked before being garbage collected.
|
2017-11-27 08:20:17 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-06-16 23:58:02 +02:00
|
|
|
// An incoming HTLC add has been full-locked in. As a result we
|
2017-10-19 02:36:28 +02:00
|
|
|
// can now examine the forwarding details of the HTLC, and the
|
2017-07-15 05:08:29 +02:00
|
|
|
// HTLC itself to decide if: we should forward it, cancel it,
|
2017-06-16 23:58:02 +02:00
|
|
|
// or are able to settle it (and it adheres to our fee related
|
|
|
|
// constraints).
|
|
|
|
|
2018-03-13 02:55:04 +01:00
|
|
|
// Before adding the new htlc to the state machine, parse the
|
|
|
|
// onion object in order to obtain the routing information with
|
|
|
|
// DecodeHopIterator function which process the Sphinx packet.
|
|
|
|
chanIterator, failureCode := decodeResps[i].Result()
|
|
|
|
if failureCode != lnwire.CodeNone {
|
2024-04-23 18:33:04 +02:00
|
|
|
// If we're unable to process the onion blob then we
|
2018-03-13 02:55:04 +01:00
|
|
|
// should send the malformed htlc error to payment
|
|
|
|
// sender.
|
2024-08-16 22:19:36 +02:00
|
|
|
l.sendMalformedHTLCError(
|
|
|
|
add.ID, failureCode, add.OnionBlob, &sourceRef,
|
|
|
|
)
|
2018-03-13 02:55:04 +01:00
|
|
|
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Errorf("unable to decode onion hop "+
|
2018-03-13 02:55:04 +01:00
|
|
|
"iterator: %v", failureCode)
|
|
|
|
continue
|
|
|
|
}
|
2017-07-15 05:08:29 +02:00
|
|
|
|
2021-08-03 20:49:17 +02:00
|
|
|
heightNow := l.cfg.BestHeight()
|
2018-03-13 02:55:04 +01:00
|
|
|
|
2024-04-23 18:33:04 +02:00
|
|
|
pld, routeRole, pldErr := chanIterator.HopPayload()
|
2024-04-25 15:46:31 +02:00
|
|
|
if pldErr != nil {
|
2019-07-31 06:52:17 +02:00
|
|
|
// If we're unable to process the onion payload, or we
|
2019-10-31 05:19:53 +01:00
|
|
|
// received invalid onion payload failure, then we
|
|
|
|
// should send an error back to the caller so the HTLC
|
|
|
|
// can be canceled.
|
|
|
|
var failedType uint64
|
2024-04-25 15:46:31 +02:00
|
|
|
|
|
|
|
// We need to get the underlying error value, so we
|
|
|
|
// can't use errors.As as suggested by the linter.
|
|
|
|
//nolint:errorlint
|
|
|
|
if e, ok := pldErr.(hop.ErrInvalidPayload); ok {
|
2019-10-31 05:19:53 +01:00
|
|
|
failedType = uint64(e.Type)
|
|
|
|
}
|
|
|
|
|
2024-04-23 18:33:04 +02:00
|
|
|
// If we couldn't parse the payload, make our best
|
|
|
|
// effort at creating an error encrypter that knows
|
|
|
|
// what blinding type we were, but if we couldn't
|
|
|
|
// parse the payload we have no way of knowing whether
|
|
|
|
// we were the introduction node or not.
|
|
|
|
//
|
|
|
|
//nolint:lll
|
|
|
|
obfuscator, failCode := chanIterator.ExtractErrorEncrypter(
|
|
|
|
l.cfg.ExtractErrorEncrypter,
|
|
|
|
// We need our route role here because we
|
|
|
|
// couldn't parse or validate the payload.
|
|
|
|
routeRole == hop.RouteRoleIntroduction,
|
|
|
|
)
|
|
|
|
if failCode != lnwire.CodeNone {
|
|
|
|
l.log.Errorf("could not extract error "+
|
|
|
|
"encrypter: %v", pldErr)
|
|
|
|
|
|
|
|
// We can't process this htlc, send back
|
|
|
|
// malformed.
|
|
|
|
l.sendMalformedHTLCError(
|
2024-08-16 22:19:36 +02:00
|
|
|
add.ID, failureCode, add.OnionBlob,
|
|
|
|
&sourceRef,
|
2024-04-23 18:33:04 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-10-31 05:19:53 +01:00
|
|
|
// TODO: currently none of the test unit infrastructure
|
|
|
|
// is setup to handle TLV payloads, so testing this
|
|
|
|
// would require implementing a separate mock iterator
|
|
|
|
// for TLV payloads that also supports injecting invalid
|
|
|
|
// payloads. Deferring this non-trival effort till a
|
|
|
|
// later date
|
2020-02-06 18:35:17 +01:00
|
|
|
failure := lnwire.NewInvalidOnionPayload(failedType, 0)
|
2024-08-16 21:45:35 +02:00
|
|
|
|
2019-07-31 06:52:17 +02:00
|
|
|
l.sendHTLCError(
|
2024-08-16 22:19:36 +02:00
|
|
|
add, sourceRef, NewLinkError(failure),
|
2024-08-16 21:45:35 +02:00
|
|
|
obfuscator, false,
|
2019-07-31 06:52:17 +02:00
|
|
|
)
|
|
|
|
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Errorf("unable to decode forwarding "+
|
2024-04-25 15:46:31 +02:00
|
|
|
"instructions: %v", pldErr)
|
|
|
|
|
2019-07-31 06:52:17 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-04-23 18:33:04 +02:00
|
|
|
// Retrieve onion obfuscator from onion blob in order to
|
|
|
|
// produce initial obfuscation of the onion failureCode.
|
|
|
|
obfuscator, failureCode := chanIterator.ExtractErrorEncrypter(
|
|
|
|
l.cfg.ExtractErrorEncrypter,
|
|
|
|
routeRole == hop.RouteRoleIntroduction,
|
|
|
|
)
|
|
|
|
if failureCode != lnwire.CodeNone {
|
|
|
|
// If we're unable to process the onion blob than we
|
|
|
|
// should send the malformed htlc error to payment
|
|
|
|
// sender.
|
|
|
|
l.sendMalformedHTLCError(
|
2024-08-16 22:19:36 +02:00
|
|
|
add.ID, failureCode, add.OnionBlob,
|
|
|
|
&sourceRef,
|
2024-04-23 18:33:04 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
l.log.Errorf("unable to decode onion "+
|
|
|
|
"obfuscator: %v", failureCode)
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-11-05 00:10:15 +01:00
|
|
|
fwdInfo := pld.ForwardingInfo()
|
|
|
|
|
2024-04-03 14:58:53 +02:00
|
|
|
// Check whether the payload we've just processed uses our
|
|
|
|
// node as the introduction point (gave us a blinding key in
|
|
|
|
// the payload itself) and fail it back if we don't support
|
|
|
|
// route blinding.
|
|
|
|
if fwdInfo.NextBlinding.IsSome() &&
|
|
|
|
l.cfg.DisallowRouteBlinding {
|
|
|
|
|
|
|
|
failure := lnwire.NewInvalidBlinding(
|
2024-08-16 22:19:36 +02:00
|
|
|
fn.Some(add.OnionBlob),
|
2024-04-03 14:58:53 +02:00
|
|
|
)
|
2024-08-16 21:45:35 +02:00
|
|
|
|
2024-04-03 14:58:53 +02:00
|
|
|
l.sendHTLCError(
|
2024-08-16 22:19:36 +02:00
|
|
|
add, sourceRef, NewLinkError(failure),
|
2024-08-16 21:45:35 +02:00
|
|
|
obfuscator, false,
|
2024-04-03 14:58:53 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
l.log.Error("rejected htlc that uses use as an " +
|
|
|
|
"introduction point when we do not support " +
|
|
|
|
"route blinding")
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-03-13 02:55:04 +01:00
|
|
|
switch fwdInfo.NextHop {
|
2019-08-30 23:11:38 +02:00
|
|
|
case hop.Exit:
|
2019-04-10 13:10:25 +02:00
|
|
|
err := l.processExitHop(
|
2024-08-16 22:19:36 +02:00
|
|
|
add, sourceRef, obfuscator, fwdInfo,
|
2024-08-16 22:02:51 +02:00
|
|
|
heightNow, pld,
|
2018-04-25 05:43:55 +02:00
|
|
|
)
|
2018-03-13 02:55:04 +01:00
|
|
|
if err != nil {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{
|
|
|
|
code: ErrInternalError,
|
|
|
|
}, err.Error()) //nolint
|
2017-05-03 17:57:13 +02:00
|
|
|
|
2019-04-10 13:10:25 +02:00
|
|
|
return
|
2018-05-18 14:01:01 +02:00
|
|
|
}
|
|
|
|
|
2018-03-13 02:55:04 +01:00
|
|
|
// There are additional channels left within this route. So
|
2018-06-26 05:23:10 +02:00
|
|
|
// we'll simply do some forwarding package book-keeping.
|
2018-03-13 02:55:04 +01:00
|
|
|
default:
|
2018-04-27 11:51:13 +02:00
|
|
|
// If hodl.AddIncoming is requested, we will not
|
|
|
|
// validate the forwarded ADD, nor will we send the
|
|
|
|
// packet to the htlc switch.
|
2019-08-14 19:57:31 +02:00
|
|
|
if l.cfg.HodlMask.Active(hodl.AddIncoming) {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Warnf(hodl.AddIncoming.Warning())
|
2018-04-27 11:51:13 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-03-13 02:55:04 +01:00
|
|
|
switch fwdPkg.State {
|
|
|
|
case channeldb.FwdStateProcessed:
|
|
|
|
// This add was not forwarded on the previous
|
|
|
|
// processing phase, run it through our
|
|
|
|
// validation pipeline to reproduce an error.
|
|
|
|
// This may trigger a different error due to
|
|
|
|
// expiring timelocks, but we expect that an
|
|
|
|
// error will be reproduced.
|
|
|
|
if !fwdPkg.FwdFilter.Contains(idx) {
|
|
|
|
break
|
|
|
|
}
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2018-03-13 02:55:04 +01:00
|
|
|
// Otherwise, it was already processed, we can
|
2024-08-16 22:19:36 +02:00
|
|
|
// can collect it and continue.
|
|
|
|
outgoingAdd := &lnwire.UpdateAddHTLC{
|
2024-04-02 15:50:13 +02:00
|
|
|
Expiry: fwdInfo.OutgoingCTLV,
|
|
|
|
Amount: fwdInfo.AmountToForward,
|
2024-08-16 22:19:36 +02:00
|
|
|
PaymentHash: add.PaymentHash,
|
2024-04-02 15:50:13 +02:00
|
|
|
BlindingPoint: fwdInfo.NextBlinding,
|
2018-03-13 02:55:04 +01:00
|
|
|
}
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2018-03-13 02:55:04 +01:00
|
|
|
// Finally, we'll encode the onion packet for
|
|
|
|
// the _next_ hop using the hop iterator
|
|
|
|
// decoded for the current hop.
|
2024-08-16 22:19:36 +02:00
|
|
|
buf := bytes.NewBuffer(
|
|
|
|
outgoingAdd.OnionBlob[0:0],
|
|
|
|
)
|
2018-03-13 02:55:04 +01:00
|
|
|
|
|
|
|
// We know this cannot fail, as this ADD
|
|
|
|
// was marked forwarded in a previous
|
|
|
|
// round of processing.
|
|
|
|
chanIterator.EncodeNextHop(buf)
|
|
|
|
|
2022-09-19 12:06:34 +02:00
|
|
|
inboundFee := l.cfg.FwrdingPolicy.InboundFee
|
|
|
|
|
2024-04-16 12:29:15 +02:00
|
|
|
//nolint:lll
|
2018-03-13 02:55:04 +01:00
|
|
|
updatePacket := &htlcPacket{
|
2024-04-16 12:29:15 +02:00
|
|
|
incomingChanID: l.ShortChanID(),
|
2024-08-16 22:19:36 +02:00
|
|
|
incomingHTLCID: add.ID,
|
2024-04-16 12:29:15 +02:00
|
|
|
outgoingChanID: fwdInfo.NextHop,
|
2024-08-16 22:19:36 +02:00
|
|
|
sourceRef: &sourceRef,
|
|
|
|
incomingAmount: add.Amount,
|
|
|
|
amount: outgoingAdd.Amount,
|
|
|
|
htlc: outgoingAdd,
|
2024-04-16 12:29:15 +02:00
|
|
|
obfuscator: obfuscator,
|
2024-08-16 22:19:36 +02:00
|
|
|
incomingTimeout: add.Expiry,
|
2024-04-16 12:29:15 +02:00
|
|
|
outgoingTimeout: fwdInfo.OutgoingCTLV,
|
|
|
|
inOnionCustomRecords: pld.CustomRecords(),
|
|
|
|
inboundFee: inboundFee,
|
2024-08-16 22:19:36 +02:00
|
|
|
inWireCustomRecords: add.CustomRecords.Copy(),
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
htlcswitch: perform fee related checks at forwarding time
In this commit, we fix a very old, lingering bug within the link. When
accepting an HTLC we are meant to validate the fee against the
constraints of the *outgoing* link. This is due to the fact that we're
offering a payment transit service on our outgoing link. Before this
commit, we would use the policies of the *incoming* link. This would at
times lead to odd routing errors as we would go to route, get an error
update and then route again, repeating the process.
With this commit, we'll properly use the incoming link for timelock
related constraints, and the outgoing link for fee related constraints.
We do this by introducing a new HtlcSatisfiesPolicy method in the link.
This method should return a non-nil error if the link can carry the HTLC
as it satisfies its current forwarding policy. We'll use this method now
at *forwarding* time to ensure that we only forward to links that
actually accept the policy. This fixes a number of bugs that existed
before that could result in a link accepting an HTLC that actually
violated its policy. In the case that the policy is violated for *all*
links, we take care to return the error returned by the *target* link so
the caller can update their sending accordingly.
In this commit, we also remove the prior linkControl channel in the
channelLink. Instead, of sending a message to update the internal link
policy, we'll use a mutex in place. This simplifies the code, and also
adds some necessary refactoring in anticipation of the next follow up
commit.
2018-04-04 04:51:40 +02:00
|
|
|
switchPackets = append(
|
|
|
|
switchPackets, updatePacket,
|
|
|
|
)
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2018-03-13 02:55:04 +01:00
|
|
|
continue
|
|
|
|
}
|
2017-08-03 06:10:35 +02:00
|
|
|
|
2018-06-26 05:23:10 +02:00
|
|
|
// TODO(roasbeef): ensure don't accept outrageous
|
|
|
|
// timeout for htlc
|
2017-06-29 15:40:45 +02:00
|
|
|
|
2018-03-13 02:55:04 +01:00
|
|
|
// With all our forwarding constraints met, we'll
|
|
|
|
// create the outgoing HTLC using the parameters as
|
|
|
|
// specified in the forwarding info.
|
|
|
|
addMsg := &lnwire.UpdateAddHTLC{
|
2024-04-02 15:50:13 +02:00
|
|
|
Expiry: fwdInfo.OutgoingCTLV,
|
|
|
|
Amount: fwdInfo.AmountToForward,
|
2024-08-16 22:19:36 +02:00
|
|
|
PaymentHash: add.PaymentHash,
|
2024-04-02 15:50:13 +02:00
|
|
|
BlindingPoint: fwdInfo.NextBlinding,
|
2018-03-13 02:55:04 +01:00
|
|
|
}
|
2017-06-16 23:58:02 +02:00
|
|
|
|
2018-03-13 02:55:04 +01:00
|
|
|
// Finally, we'll encode the onion packet for the
|
|
|
|
// _next_ hop using the hop iterator decoded for the
|
|
|
|
// current hop.
|
|
|
|
buf := bytes.NewBuffer(addMsg.OnionBlob[0:0])
|
|
|
|
err := chanIterator.EncodeNextHop(buf)
|
|
|
|
if err != nil {
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Errorf("unable to encode the "+
|
2018-03-13 02:55:04 +01:00
|
|
|
"remaining route %v", err)
|
|
|
|
|
2024-08-21 08:39:37 +02:00
|
|
|
cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage { //nolint:lll
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
return lnwire.NewTemporaryChannelFailure(upd)
|
|
|
|
}
|
|
|
|
|
2019-09-27 16:01:18 +02:00
|
|
|
failure := l.createFailureWithUpdate(
|
server+htlcswitch: prevent privacy leaks, allow alias routing
This intent of this change is to prevent privacy leaks when routing
with aliases and also to allow routing when using an alias. The
aliases are our aliases.
Introduces are two maps:
* aliasToReal:
This is an N->1 mapping for a channel. The keys are the set of
aliases and the value is the confirmed, on-chain SCID.
* baseIndex:
This is also an N->1 mapping for a channel. The keys are the set
of aliases and the value is the "base" SCID (whatever is in the
OpenChannel.ShortChannelID field). There is also a base->base
mapping, so not all keys are aliases.
The above maps are populated when a link is added to the switch and
when the channel has confirmed on-chain. The maps are not removed
from if the link is removed, but this is fine since forwarding won't
occur.
* getLinkByMapping
This function is introduced to adhere to the spec requirements that
using the confirmed SCID of a private, scid-alias-feature-bit
channel does not work. Lnd implements a stricter version of the spec
and disallows this behavior if the feature-bit was negotiated, rather
than just the channel type. The old, privacy-leak behavior is
preserved.
The spec also requires that if we must fail back an HTLC, the
ChannelUpdate must use the SCID of whatever was in the onion, to avoid
a privacy leak. This is also done by passing in the relevant SCID to
the mailbox and link. Lnd will also cancel back on the "incoming" side
if the InterceptableSwitch was used or if the link failed to decrypt
the onion. In this case, we are cautious and replace the SCID if an
alias exists.
2022-04-04 22:44:51 +02:00
|
|
|
true, hop.Source, cb,
|
2018-05-08 05:00:32 +02:00
|
|
|
)
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2018-03-13 02:55:04 +01:00
|
|
|
l.sendHTLCError(
|
2024-08-16 22:19:36 +02:00
|
|
|
add, sourceRef, NewLinkError(failure),
|
|
|
|
obfuscator, false,
|
2018-03-13 02:55:04 +01:00
|
|
|
)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that this add has been reprocessed, only append
|
|
|
|
// it to our list of packets to forward to the switch
|
|
|
|
// this is the first time processing the add. If the
|
|
|
|
// fwd pkg has already been processed, then we entered
|
|
|
|
// the above section to recreate a previous error. If
|
|
|
|
// the packet had previously been forwarded, it would
|
|
|
|
// have been added to switchPackets at the top of this
|
|
|
|
// section.
|
|
|
|
if fwdPkg.State == channeldb.FwdStateLockedIn {
|
2022-09-19 12:06:34 +02:00
|
|
|
inboundFee := l.cfg.FwrdingPolicy.InboundFee
|
|
|
|
|
2024-04-16 12:29:15 +02:00
|
|
|
//nolint:lll
|
2018-03-13 02:55:04 +01:00
|
|
|
updatePacket := &htlcPacket{
|
2024-04-16 12:29:15 +02:00
|
|
|
incomingChanID: l.ShortChanID(),
|
2024-08-16 22:19:36 +02:00
|
|
|
incomingHTLCID: add.ID,
|
2024-04-16 12:29:15 +02:00
|
|
|
outgoingChanID: fwdInfo.NextHop,
|
2024-08-16 22:19:36 +02:00
|
|
|
sourceRef: &sourceRef,
|
|
|
|
incomingAmount: add.Amount,
|
2024-04-16 12:29:15 +02:00
|
|
|
amount: addMsg.Amount,
|
|
|
|
htlc: addMsg,
|
|
|
|
obfuscator: obfuscator,
|
2024-08-16 22:19:36 +02:00
|
|
|
incomingTimeout: add.Expiry,
|
2024-04-16 12:29:15 +02:00
|
|
|
outgoingTimeout: fwdInfo.OutgoingCTLV,
|
|
|
|
inOnionCustomRecords: pld.CustomRecords(),
|
|
|
|
inboundFee: inboundFee,
|
2024-08-16 22:19:36 +02:00
|
|
|
inWireCustomRecords: add.CustomRecords.Copy(),
|
2017-10-24 08:18:26 +02:00
|
|
|
}
|
2018-03-13 02:55:04 +01:00
|
|
|
|
|
|
|
fwdPkg.FwdFilter.Set(idx)
|
|
|
|
switchPackets = append(switchPackets,
|
|
|
|
updatePacket)
|
2017-05-03 17:57:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
// Commit the htlcs we are intending to forward if this package has not
|
|
|
|
// been fully processed.
|
|
|
|
if fwdPkg.State == channeldb.FwdStateLockedIn {
|
|
|
|
err := l.channel.SetFwdFilter(fwdPkg.Height, fwdPkg.FwdFilter)
|
|
|
|
if err != nil {
|
2024-08-20 12:46:24 +02:00
|
|
|
l.failf(LinkFailureError{code: ErrInternalError},
|
2018-05-09 15:49:58 +02:00
|
|
|
"unable to set fwd filter: %v", err)
|
2019-04-10 13:10:25 +02:00
|
|
|
return
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(switchPackets) == 0 {
|
2019-04-10 13:10:25 +02:00
|
|
|
return
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
|
2022-02-03 15:34:25 +01:00
|
|
|
replay := fwdPkg.State != channeldb.FwdStateLockedIn
|
|
|
|
|
|
|
|
l.log.Debugf("forwarding %d packets to switch: replay=%v",
|
|
|
|
len(switchPackets), replay)
|
2017-11-27 08:20:17 +01:00
|
|
|
|
2018-05-08 05:55:07 +02:00
|
|
|
// NOTE: This call is made synchronous so that we ensure all circuits
|
|
|
|
// are committed in the exact order that they are processed in the link.
|
|
|
|
// Failing to do this could cause reorderings/gaps in the range of
|
|
|
|
// opened circuits, which violates assumptions made by the circuit
|
|
|
|
// trimming.
|
2022-02-03 15:34:25 +01:00
|
|
|
l.forwardBatch(replay, switchPackets...)
|
2017-11-27 08:20:17 +01:00
|
|
|
}
|
|
|
|
|
2019-02-08 10:01:54 +01:00
|
|
|
// processExitHop handles an htlc for which this link is the exit hop. It
|
|
|
|
// returns a boolean indicating whether the commitment tx needs an update.
|
2024-08-16 22:02:51 +02:00
|
|
|
func (l *channelLink) processExitHop(add lnwire.UpdateAddHTLC,
|
|
|
|
sourceRef channeldb.AddRef, obfuscator hop.ErrorEncrypter,
|
|
|
|
fwdInfo hop.ForwardingInfo, heightNow uint32,
|
|
|
|
payload invoices.Payload) error {
|
2019-02-08 10:01:54 +01:00
|
|
|
|
|
|
|
// If hodl.ExitSettle is requested, we will not validate the final hop's
|
|
|
|
// ADD, nor will we settle the corresponding invoice or respond with the
|
|
|
|
// preimage.
|
2019-08-14 19:57:31 +02:00
|
|
|
if l.cfg.HodlMask.Active(hodl.ExitSettle) {
|
2024-10-31 17:35:27 +01:00
|
|
|
l.log.Warnf("%s for htlc(rhash=%x,htlcIndex=%v)",
|
|
|
|
hodl.ExitSettle.Warning(), add.PaymentHash, add.ID)
|
2019-02-08 10:01:54 +01:00
|
|
|
|
2019-04-10 13:10:25 +02:00
|
|
|
return nil
|
2019-02-08 10:01:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// As we're the exit hop, we'll double check the hop-payload included in
|
|
|
|
// the HTLC to ensure that it was crafted correctly by the sender and
|
2023-06-14 22:02:34 +02:00
|
|
|
// is compatible with the HTLC we were extended.
|
2024-05-23 13:26:32 +02:00
|
|
|
//
|
|
|
|
// For a special case, if the fwdInfo doesn't have any blinded path
|
|
|
|
// information, and the incoming HTLC had special extra data, then
|
|
|
|
// we'll skip this amount check. The invoice acceptor will make sure we
|
|
|
|
// reject the HTLC if it's not containing the correct amount after
|
|
|
|
// examining the custom data.
|
|
|
|
hasBlindedPath := fwdInfo.NextBlinding.IsSome()
|
|
|
|
customHTLC := len(add.CustomRecords) > 0 && !hasBlindedPath
|
|
|
|
log.Tracef("Exit hop has_blinded_path=%v custom_htlc_bypass=%v",
|
|
|
|
hasBlindedPath, customHTLC)
|
|
|
|
|
|
|
|
if !customHTLC && add.Amount < fwdInfo.AmountToForward {
|
2023-06-14 22:02:34 +02:00
|
|
|
l.log.Errorf("onion payload of incoming htlc(%x) has "+
|
2024-08-16 22:02:51 +02:00
|
|
|
"incompatible value: expected <=%v, got %v",
|
|
|
|
add.PaymentHash, add.Amount, fwdInfo.AmountToForward)
|
2019-02-08 10:01:54 +01:00
|
|
|
|
2020-02-06 18:35:17 +01:00
|
|
|
failure := NewLinkError(
|
2024-08-16 22:02:51 +02:00
|
|
|
lnwire.NewFinalIncorrectHtlcAmount(add.Amount),
|
2020-02-06 18:35:17 +01:00
|
|
|
)
|
2024-08-16 22:02:51 +02:00
|
|
|
l.sendHTLCError(add, sourceRef, failure, obfuscator, true)
|
2019-02-08 10:01:54 +01:00
|
|
|
|
2019-04-10 13:10:25 +02:00
|
|
|
return nil
|
2019-02-08 10:01:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll also ensure that our time-lock value has been computed
|
2019-04-16 12:11:20 +02:00
|
|
|
// correctly.
|
2024-08-16 22:02:51 +02:00
|
|
|
if add.Expiry < fwdInfo.OutgoingCTLV {
|
2023-06-14 22:02:34 +02:00
|
|
|
l.log.Errorf("onion payload of incoming htlc(%x) has "+
|
|
|
|
"incompatible time-lock: expected <=%v, got %v",
|
2024-08-16 22:02:51 +02:00
|
|
|
add.PaymentHash, add.Expiry, fwdInfo.OutgoingCTLV)
|
2019-02-08 10:01:54 +01:00
|
|
|
|
2020-02-06 18:35:17 +01:00
|
|
|
failure := NewLinkError(
|
2024-08-16 22:02:51 +02:00
|
|
|
lnwire.NewFinalIncorrectCltvExpiry(add.Expiry),
|
2020-02-06 18:35:17 +01:00
|
|
|
)
|
2024-08-16 21:45:35 +02:00
|
|
|
|
2024-08-16 22:02:51 +02:00
|
|
|
l.sendHTLCError(add, sourceRef, failure, obfuscator, true)
|
2019-02-08 10:01:54 +01:00
|
|
|
|
2019-04-10 13:10:25 +02:00
|
|
|
return nil
|
2019-02-08 10:01:54 +01:00
|
|
|
}
|
|
|
|
|
2019-02-20 12:11:15 +01:00
|
|
|
// Notify the invoiceRegistry of the exit hop htlc. If we crash right
|
|
|
|
// after this, this code will be re-executed after restart. We will
|
|
|
|
// receive back a resolution event.
|
2024-08-16 22:02:51 +02:00
|
|
|
invoiceHash := lntypes.Hash(add.PaymentHash)
|
2019-04-16 12:11:20 +02:00
|
|
|
|
2022-11-18 12:15:22 +01:00
|
|
|
circuitKey := models.CircuitKey{
|
2019-08-08 15:48:31 +02:00
|
|
|
ChanID: l.ShortChanID(),
|
2024-08-16 22:02:51 +02:00
|
|
|
HtlcID: add.ID,
|
2019-08-08 15:48:31 +02:00
|
|
|
}
|
|
|
|
|
2019-02-20 12:11:15 +01:00
|
|
|
event, err := l.cfg.Registry.NotifyExitHopHtlc(
|
2024-08-16 22:02:51 +02:00
|
|
|
invoiceHash, add.Amount, add.Expiry, int32(heightNow),
|
2024-05-08 19:17:05 +02:00
|
|
|
circuitKey, l.hodlQueue.ChanIn(), add.CustomRecords, payload,
|
2019-02-20 12:11:15 +01:00
|
|
|
)
|
2019-12-20 11:25:08 +01:00
|
|
|
if err != nil {
|
2019-04-10 13:10:25 +02:00
|
|
|
return err
|
2019-02-08 10:01:54 +01:00
|
|
|
}
|
|
|
|
|
2019-02-11 12:01:05 +01:00
|
|
|
// Create a hodlHtlc struct and decide either resolved now or later.
|
2019-02-20 12:11:15 +01:00
|
|
|
htlc := hodlHtlc{
|
2024-08-16 22:05:54 +02:00
|
|
|
add: add,
|
|
|
|
sourceRef: sourceRef,
|
2019-02-20 12:11:15 +01:00
|
|
|
obfuscator: obfuscator,
|
|
|
|
}
|
2019-02-11 12:01:05 +01:00
|
|
|
|
2019-12-20 11:25:07 +01:00
|
|
|
// If the event is nil, the invoice is being held, so we save payment
|
|
|
|
// descriptor for future reference.
|
2019-02-11 12:01:05 +01:00
|
|
|
if event == nil {
|
2019-08-14 21:11:34 +02:00
|
|
|
l.hodlMap[circuitKey] = htlc
|
2019-04-10 13:10:25 +02:00
|
|
|
return nil
|
2019-02-11 12:01:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Process the received resolution.
|
2020-02-06 18:35:10 +01:00
|
|
|
return l.processHtlcResolution(event, htlc)
|
2019-02-08 10:57:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// settleHTLC settles the HTLC on the channel.
|
2020-02-19 16:34:48 +01:00
|
|
|
func (l *channelLink) settleHTLC(preimage lntypes.Preimage,
|
2024-08-16 21:48:04 +02:00
|
|
|
htlcIndex uint64, sourceRef channeldb.AddRef) error {
|
2019-02-08 10:57:50 +01:00
|
|
|
|
|
|
|
hash := preimage.Hash()
|
|
|
|
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Infof("settling htlc %v as exit hop", hash)
|
2019-02-08 10:57:50 +01:00
|
|
|
|
|
|
|
err := l.channel.SettleHTLC(
|
2024-08-16 21:48:04 +02:00
|
|
|
preimage, htlcIndex, &sourceRef, nil, nil,
|
2019-02-18 21:27:54 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
2024-02-26 12:19:38 +01:00
|
|
|
return fmt.Errorf("unable to settle htlc: %w", err)
|
2019-02-18 21:27:54 +01:00
|
|
|
}
|
|
|
|
|
2019-02-08 10:01:54 +01:00
|
|
|
// If the link is in hodl.BogusSettle mode, replace the preimage with a
|
|
|
|
// fake one before sending it to the peer.
|
2019-08-14 19:57:31 +02:00
|
|
|
if l.cfg.HodlMask.Active(hodl.BogusSettle) {
|
2019-10-01 11:06:56 +02:00
|
|
|
l.log.Warnf(hodl.BogusSettle.Warning())
|
2019-02-08 10:01:54 +01:00
|
|
|
preimage = [32]byte{}
|
|
|
|
copy(preimage[:], bytes.Repeat([]byte{2}, 32))
|
|
|
|
}
|
|
|
|
|
|
|
|
// HTLC was successfully settled locally send notification about it
|
|
|
|
// remote peer.
|
|
|
|
l.cfg.Peer.SendMessage(false, &lnwire.UpdateFulfillHTLC{
|
|
|
|
ChanID: l.ChanID(),
|
2024-08-16 21:48:04 +02:00
|
|
|
ID: htlcIndex,
|
2019-02-08 10:01:54 +01:00
|
|
|
PaymentPreimage: preimage,
|
|
|
|
})
|
|
|
|
|
2020-02-19 16:34:48 +01:00
|
|
|
// Once we have successfully settled the htlc, notify a settle event.
|
|
|
|
l.cfg.HtlcNotifier.NotifySettleEvent(
|
|
|
|
HtlcKey{
|
2022-11-18 12:15:22 +01:00
|
|
|
IncomingCircuit: models.CircuitKey{
|
2020-02-19 16:34:48 +01:00
|
|
|
ChanID: l.ShortChanID(),
|
2024-08-16 21:48:04 +02:00
|
|
|
HtlcID: htlcIndex,
|
2020-02-19 16:34:48 +01:00
|
|
|
},
|
|
|
|
},
|
2021-06-15 21:01:24 +02:00
|
|
|
preimage,
|
2020-02-19 16:34:48 +01:00
|
|
|
HtlcEventTypeReceive,
|
|
|
|
)
|
|
|
|
|
2019-02-08 10:57:50 +01:00
|
|
|
return nil
|
2019-02-08 10:01:54 +01:00
|
|
|
}
|
|
|
|
|
2017-11-27 08:20:17 +01:00
|
|
|
// forwardBatch forwards the given htlcPackets to the switch, and waits on the
|
|
|
|
// err chan for the individual responses. This method is intended to be spawned
|
|
|
|
// as a goroutine so the responses can be handled in the background.
|
2022-02-03 15:34:25 +01:00
|
|
|
func (l *channelLink) forwardBatch(replay bool, packets ...*htlcPacket) {
|
2017-11-27 08:20:17 +01:00
|
|
|
// Don't forward packets for which we already have a response in our
|
|
|
|
// mailbox. This could happen if a packet fails and is buffered in the
|
|
|
|
// mailbox, and the incoming link flaps.
|
|
|
|
var filteredPkts = make([]*htlcPacket, 0, len(packets))
|
|
|
|
for _, pkt := range packets {
|
|
|
|
if l.mailBox.HasPacket(pkt.inKey()) {
|
|
|
|
continue
|
2017-05-03 17:57:13 +02:00
|
|
|
}
|
2017-11-27 08:20:17 +01:00
|
|
|
|
|
|
|
filteredPkts = append(filteredPkts, pkt)
|
2017-05-03 17:57:13 +02:00
|
|
|
}
|
|
|
|
|
2024-10-17 13:38:34 +02:00
|
|
|
err := l.cfg.ForwardPackets(l.Quit, replay, filteredPkts...)
|
2022-02-03 15:34:25 +01:00
|
|
|
if err != nil {
|
2020-05-19 11:13:02 +02:00
|
|
|
log.Errorf("Unhandled error while reforwarding htlc "+
|
|
|
|
"settle/fail over htlcswitch: %v", err)
|
|
|
|
}
|
2017-05-03 17:57:13 +02:00
|
|
|
}
|
|
|
|
|
2017-07-15 05:08:29 +02:00
|
|
|
// sendHTLCError functions cancels HTLC and send cancel message back to the
|
|
|
|
// peer from which HTLC was received.
|
2024-08-16 21:45:35 +02:00
|
|
|
func (l *channelLink) sendHTLCError(add lnwire.UpdateAddHTLC,
|
|
|
|
sourceRef channeldb.AddRef, failure *LinkError,
|
|
|
|
e hop.ErrorEncrypter, isReceive bool) {
|
2017-10-11 04:36:52 +02:00
|
|
|
|
2020-02-06 18:35:17 +01:00
|
|
|
reason, err := e.EncryptFirstHop(failure.WireMessage())
|
2017-06-29 15:40:45 +02:00
|
|
|
if err != nil {
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Errorf("unable to obfuscate error: %v", err)
|
2017-06-29 15:40:45 +02:00
|
|
|
return
|
|
|
|
}
|
2017-05-03 17:57:13 +02:00
|
|
|
|
2024-08-16 21:45:35 +02:00
|
|
|
err = l.channel.FailHTLC(add.ID, reason, &sourceRef, nil, nil)
|
2017-05-03 17:57:13 +02:00
|
|
|
if err != nil {
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Errorf("unable cancel htlc: %v", err)
|
2017-05-03 17:57:13 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-04-08 21:51:15 +02:00
|
|
|
// Send the appropriate failure message depending on whether we're
|
|
|
|
// in a blinded route or not.
|
|
|
|
if err := l.sendIncomingHTLCFailureMsg(
|
2024-08-16 21:45:35 +02:00
|
|
|
add.ID, e, reason,
|
2024-04-08 21:51:15 +02:00
|
|
|
); err != nil {
|
|
|
|
l.log.Errorf("unable to send HTLC failure: %v", err)
|
|
|
|
return
|
|
|
|
}
|
2020-02-19 16:34:47 +01:00
|
|
|
|
|
|
|
// Notify a link failure on our incoming link. Outgoing htlc information
|
|
|
|
// is not available at this point, because we have not decrypted the
|
|
|
|
// onion, so it is excluded.
|
|
|
|
var eventType HtlcEventType
|
|
|
|
if isReceive {
|
|
|
|
eventType = HtlcEventTypeReceive
|
|
|
|
} else {
|
|
|
|
eventType = HtlcEventTypeForward
|
|
|
|
}
|
|
|
|
|
|
|
|
l.cfg.HtlcNotifier.NotifyLinkFailEvent(
|
|
|
|
HtlcKey{
|
2022-11-18 12:15:22 +01:00
|
|
|
IncomingCircuit: models.CircuitKey{
|
2020-02-19 16:34:47 +01:00
|
|
|
ChanID: l.ShortChanID(),
|
2024-08-16 21:45:35 +02:00
|
|
|
HtlcID: add.ID,
|
2020-02-19 16:34:47 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
HtlcInfo{
|
2024-08-16 21:45:35 +02:00
|
|
|
IncomingTimeLock: add.Expiry,
|
|
|
|
IncomingAmt: add.Amount,
|
2020-02-19 16:34:47 +01:00
|
|
|
},
|
|
|
|
eventType,
|
|
|
|
failure,
|
|
|
|
true,
|
|
|
|
)
|
2017-05-03 17:57:13 +02:00
|
|
|
}
|
2017-07-12 15:44:17 +02:00
|
|
|
|
2024-04-08 21:51:15 +02:00
|
|
|
// sendPeerHTLCFailure handles sending a HTLC failure message back to the
|
|
|
|
// peer from which the HTLC was received. This function is primarily used to
|
|
|
|
// handle the special requirements of route blinding, specifically:
|
|
|
|
// - Forwarding nodes must switch out any errors with MalformedFailHTLC
|
|
|
|
// - Introduction nodes should return regular HTLC failure messages.
|
|
|
|
//
|
|
|
|
// It accepts the original opaque failure, which will be used in the case
|
|
|
|
// that we're not part of a blinded route and an error encrypter that'll be
|
|
|
|
// used if we are the introduction node and need to present an error as if
|
|
|
|
// we're the failing party.
|
|
|
|
func (l *channelLink) sendIncomingHTLCFailureMsg(htlcIndex uint64,
|
|
|
|
e hop.ErrorEncrypter,
|
|
|
|
originalFailure lnwire.OpaqueReason) error {
|
|
|
|
|
|
|
|
var msg lnwire.Message
|
|
|
|
switch {
|
|
|
|
// Our circuit's error encrypter will be nil if this was a locally
|
|
|
|
// initiated payment. We can only hit a blinded error for a locally
|
|
|
|
// initiated payment if we allow ourselves to be picked as the
|
|
|
|
// introduction node for our own payments and in that case we
|
|
|
|
// shouldn't reach this code. To prevent the HTLC getting stuck,
|
|
|
|
// we fail it back and log an error.
|
|
|
|
// code.
|
|
|
|
case e == nil:
|
|
|
|
msg = &lnwire.UpdateFailHTLC{
|
|
|
|
ChanID: l.ChanID(),
|
|
|
|
ID: htlcIndex,
|
|
|
|
Reason: originalFailure,
|
|
|
|
}
|
|
|
|
|
|
|
|
l.log.Errorf("Unexpected blinded failure when "+
|
|
|
|
"we are the sending node, incoming htlc: %v(%v)",
|
|
|
|
l.ShortChanID(), htlcIndex)
|
|
|
|
|
|
|
|
// For cleartext hops (ie, non-blinded/normal) we don't need any
|
|
|
|
// transformation on the error message and can just send the original.
|
|
|
|
case !e.Type().IsBlinded():
|
|
|
|
msg = &lnwire.UpdateFailHTLC{
|
|
|
|
ChanID: l.ChanID(),
|
|
|
|
ID: htlcIndex,
|
|
|
|
Reason: originalFailure,
|
|
|
|
}
|
|
|
|
|
|
|
|
// When we're the introduction node, we need to convert the error to
|
|
|
|
// a UpdateFailHTLC.
|
|
|
|
case e.Type() == hop.EncrypterTypeIntroduction:
|
|
|
|
l.log.Debugf("Introduction blinded node switching out failure "+
|
|
|
|
"error: %v", htlcIndex)
|
|
|
|
|
|
|
|
// The specification does not require that we set the onion
|
|
|
|
// blob.
|
2024-08-20 19:28:37 +02:00
|
|
|
failureMsg := lnwire.NewInvalidBlinding(
|
|
|
|
fn.None[[lnwire.OnionPacketSize]byte](),
|
|
|
|
)
|
2024-04-08 21:51:15 +02:00
|
|
|
reason, err := e.EncryptFirstHop(failureMsg)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
msg = &lnwire.UpdateFailHTLC{
|
|
|
|
ChanID: l.ChanID(),
|
|
|
|
ID: htlcIndex,
|
|
|
|
Reason: reason,
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are a relaying node, we need to switch out any error that
|
|
|
|
// we've received to a malformed HTLC error.
|
|
|
|
case e.Type() == hop.EncrypterTypeRelaying:
|
|
|
|
l.log.Debugf("Relaying blinded node switching out malformed "+
|
|
|
|
"error: %v", htlcIndex)
|
|
|
|
|
|
|
|
msg = &lnwire.UpdateFailMalformedHTLC{
|
|
|
|
ChanID: l.ChanID(),
|
|
|
|
ID: htlcIndex,
|
|
|
|
FailureCode: lnwire.CodeInvalidBlinding,
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unexpected encrypter: %d", e)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := l.cfg.Peer.SendMessage(false, msg); err != nil {
|
|
|
|
l.log.Warnf("Send update fail failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-15 05:08:29 +02:00
|
|
|
// sendMalformedHTLCError helper function which sends the malformed HTLC update
|
2017-06-29 15:40:45 +02:00
|
|
|
// to the payment sender.
|
2017-10-24 09:48:52 +02:00
|
|
|
func (l *channelLink) sendMalformedHTLCError(htlcIndex uint64,
|
2024-08-20 19:01:34 +02:00
|
|
|
code lnwire.FailCode, onionBlob [lnwire.OnionPacketSize]byte,
|
|
|
|
sourceRef *channeldb.AddRef) {
|
2017-08-14 13:21:57 +02:00
|
|
|
|
2024-08-20 19:01:34 +02:00
|
|
|
shaOnionBlob := sha256.Sum256(onionBlob[:])
|
2017-11-27 08:20:17 +01:00
|
|
|
err := l.channel.MalformedFailHTLC(htlcIndex, code, shaOnionBlob, sourceRef)
|
2017-06-29 15:40:45 +02:00
|
|
|
if err != nil {
|
2019-10-01 11:16:24 +02:00
|
|
|
l.log.Errorf("unable cancel htlc: %v", err)
|
2017-06-29 15:40:45 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-08 05:17:15 +02:00
|
|
|
l.cfg.Peer.SendMessage(false, &lnwire.UpdateFailMalformedHTLC{
|
2017-06-29 15:40:45 +02:00
|
|
|
ChanID: l.ChanID(),
|
2017-10-24 09:48:52 +02:00
|
|
|
ID: htlcIndex,
|
2017-08-14 13:21:57 +02:00
|
|
|
ShaOnionBlob: shaOnionBlob,
|
2017-06-29 15:40:45 +02:00
|
|
|
FailureCode: code,
|
2018-06-08 05:17:15 +02:00
|
|
|
})
|
2017-06-29 15:40:45 +02:00
|
|
|
}
|
|
|
|
|
2024-08-20 12:46:24 +02:00
|
|
|
// failf is a function which is used to encapsulate the action necessary for
|
2018-05-09 15:49:58 +02:00
|
|
|
// properly failing the link. It takes a LinkFailureError, which will be passed
|
|
|
|
// to the OnChannelFailure closure, in order for it to determine if we should
|
|
|
|
// force close the channel, and if we should send an error message to the
|
|
|
|
// remote peer.
|
2024-08-20 12:46:24 +02:00
|
|
|
func (l *channelLink) failf(linkErr LinkFailureError, format string,
|
|
|
|
a ...interface{}) {
|
|
|
|
|
2024-04-06 02:08:38 +02:00
|
|
|
reason := fmt.Errorf(format, a...)
|
2018-05-23 15:14:46 +02:00
|
|
|
|
|
|
|
// Return if we have already notified about a failure.
|
|
|
|
if l.failed {
|
2019-10-02 15:53:29 +02:00
|
|
|
l.log.Warnf("ignoring link failure (%v), as link already "+
|
2019-10-01 11:06:56 +02:00
|
|
|
"failed", reason)
|
2018-05-23 15:14:46 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-05 15:57:28 +01:00
|
|
|
l.log.Errorf("failing link: %s with error: %v", reason, linkErr)
|
2018-05-23 15:14:46 +02:00
|
|
|
|
|
|
|
// Set failed, such that we won't process any more updates, and notify
|
|
|
|
// the peer about the failure.
|
|
|
|
l.failed = true
|
2018-05-09 15:49:58 +02:00
|
|
|
l.cfg.OnChannelFailure(l.ChanID(), l.ShortChanID(), linkErr)
|
2017-06-29 15:40:45 +02:00
|
|
|
}
|
2024-05-02 16:52:47 +02:00
|
|
|
|
|
|
|
// FundingCustomBlob returns the custom funding blob of the channel that this
|
|
|
|
// link is associated with. The funding blob represents static information about
|
|
|
|
// the channel that was created at channel funding time.
|
|
|
|
func (l *channelLink) FundingCustomBlob() fn.Option[tlv.Blob] {
|
|
|
|
if l.channel == nil {
|
|
|
|
return fn.None[tlv.Blob]()
|
|
|
|
}
|
|
|
|
|
|
|
|
if l.channel.State() == nil {
|
|
|
|
return fn.None[tlv.Blob]()
|
|
|
|
}
|
|
|
|
|
|
|
|
return l.channel.State().CustomBlob
|
|
|
|
}
|
|
|
|
|
|
|
|
// CommitmentCustomBlob returns the custom blob of the current local commitment
|
|
|
|
// of the channel that this link is associated with.
|
|
|
|
func (l *channelLink) CommitmentCustomBlob() fn.Option[tlv.Blob] {
|
|
|
|
if l.channel == nil {
|
|
|
|
return fn.None[tlv.Blob]()
|
|
|
|
}
|
|
|
|
|
|
|
|
return l.channel.LocalCommitmentBlob()
|
|
|
|
}
|