2020-07-02 23:46:06 +02:00
|
|
|
package peer
|
2015-12-20 22:16:38 +01:00
|
|
|
|
|
|
|
import (
|
2018-05-10 23:40:29 +02:00
|
|
|
"bytes"
|
2016-01-14 06:41:46 +01:00
|
|
|
"container/list"
|
2018-08-31 23:54:35 +02:00
|
|
|
"errors"
|
2016-07-10 01:41:06 +02:00
|
|
|
"fmt"
|
2023-07-14 19:24:10 +02:00
|
|
|
"math/rand"
|
2015-12-21 00:10:09 +01:00
|
|
|
"net"
|
2024-05-16 20:19:20 +02:00
|
|
|
"strings"
|
2015-12-20 22:16:38 +01:00
|
|
|
"sync"
|
2016-01-14 06:41:46 +01:00
|
|
|
"sync/atomic"
|
2015-12-20 22:16:38 +01:00
|
|
|
"time"
|
|
|
|
|
2022-02-23 14:48:00 +01:00
|
|
|
"github.com/btcsuite/btcd/btcec/v2"
|
2024-05-29 19:57:47 +02:00
|
|
|
"github.com/btcsuite/btcd/btcutil"
|
2018-06-05 03:34:16 +02:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/connmgr"
|
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2022-05-28 10:10:09 +02:00
|
|
|
"github.com/btcsuite/btclog"
|
2024-05-29 19:57:47 +02:00
|
|
|
"github.com/btcsuite/btcwallet/waddrmgr"
|
2016-06-21 21:32:32 +02:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2019-02-22 05:11:33 +01:00
|
|
|
"github.com/lightningnetwork/lnd/buffer"
|
2022-05-28 10:10:09 +02:00
|
|
|
"github.com/lightningnetwork/lnd/build"
|
2017-05-05 01:03:47 +02:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2016-06-21 21:32:32 +02:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2023-07-17 12:53:24 +02:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/models"
|
2020-03-07 04:42:58 +01:00
|
|
|
"github.com/lightningnetwork/lnd/channelnotifier"
|
2018-05-10 23:40:29 +02:00
|
|
|
"github.com/lightningnetwork/lnd/contractcourt"
|
2020-09-24 16:19:54 +02:00
|
|
|
"github.com/lightningnetwork/lnd/discovery"
|
2019-12-16 22:06:59 +01:00
|
|
|
"github.com/lightningnetwork/lnd/feature"
|
2024-02-06 18:11:26 +01:00
|
|
|
"github.com/lightningnetwork/lnd/fn"
|
2020-11-16 23:51:49 +01:00
|
|
|
"github.com/lightningnetwork/lnd/funding"
|
2017-05-02 22:04:58 +02:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
2020-09-24 16:19:54 +02:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch/hodl"
|
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch/hop"
|
|
|
|
"github.com/lightningnetwork/lnd/input"
|
|
|
|
"github.com/lightningnetwork/lnd/invoices"
|
2018-07-05 22:27:35 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnpeer"
|
2024-07-31 01:44:18 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lntypes"
|
2023-03-29 13:24:07 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnutils"
|
2016-01-18 04:14:47 +01:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2020-09-24 16:19:54 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
2020-06-17 02:33:06 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet/chancloser"
|
2016-01-18 04:14:47 +01:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2024-01-31 03:00:11 +01:00
|
|
|
"github.com/lightningnetwork/lnd/msgmux"
|
2020-09-24 16:19:54 +02:00
|
|
|
"github.com/lightningnetwork/lnd/netann"
|
|
|
|
"github.com/lightningnetwork/lnd/pool"
|
2020-03-17 07:22:35 +01:00
|
|
|
"github.com/lightningnetwork/lnd/queue"
|
2022-11-23 06:23:18 +01:00
|
|
|
"github.com/lightningnetwork/lnd/subscribe"
|
2018-08-01 23:48:32 +02:00
|
|
|
"github.com/lightningnetwork/lnd/ticker"
|
2024-03-27 14:38:50 +01:00
|
|
|
"github.com/lightningnetwork/lnd/tlv"
|
2020-09-24 16:19:54 +02:00
|
|
|
"github.com/lightningnetwork/lnd/watchtower/wtclient"
|
2016-01-17 04:03:03 +01:00
|
|
|
)
|
|
|
|
|
2015-12-20 22:16:38 +01:00
|
|
|
const (
|
2016-06-21 21:32:32 +02:00
|
|
|
// pingInterval is the interval at which ping messages are sent.
|
2017-01-22 23:35:26 +01:00
|
|
|
pingInterval = 1 * time.Minute
|
2016-01-17 04:03:03 +01:00
|
|
|
|
2023-07-14 19:24:10 +02:00
|
|
|
// pingTimeout is the amount of time we will wait for a pong response
|
|
|
|
// before considering the peer to be unresponsive.
|
|
|
|
//
|
|
|
|
// This MUST be a smaller value than the pingInterval.
|
|
|
|
pingTimeout = 30 * time.Second
|
|
|
|
|
2017-10-16 00:13:27 +02:00
|
|
|
// idleTimeout is the duration of inactivity before we time out a peer.
|
|
|
|
idleTimeout = 5 * time.Minute
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// writeMessageTimeout is the timeout used when writing a message to the
|
|
|
|
// peer.
|
2019-03-27 00:40:24 +01:00
|
|
|
writeMessageTimeout = 5 * time.Second
|
2018-06-27 02:27:22 +02:00
|
|
|
|
2019-02-22 05:11:33 +01:00
|
|
|
// readMessageTimeout is the timeout used when reading a message from a
|
|
|
|
// peer.
|
|
|
|
readMessageTimeout = 5 * time.Second
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// handshakeTimeout is the timeout used when waiting for the peer's init
|
|
|
|
// message.
|
2018-11-09 18:58:14 +01:00
|
|
|
handshakeTimeout = 15 * time.Second
|
|
|
|
|
2020-06-27 03:04:15 +02:00
|
|
|
// ErrorBufferSize is the number of historic peer errors that we store.
|
|
|
|
ErrorBufferSize = 10
|
2024-05-15 21:15:54 +02:00
|
|
|
|
|
|
|
// pongSizeCeiling is the upper bound on a uniformly distributed random
|
|
|
|
// variable that we use for requesting pong responses. We don't use the
|
|
|
|
// MaxPongBytes (upper bound accepted by the protocol) because it is
|
|
|
|
// needlessly wasteful of precious Tor bandwidth for little to no gain.
|
|
|
|
pongSizeCeiling = 4096
|
2024-05-16 20:19:20 +02:00
|
|
|
|
|
|
|
// torTimeoutMultiplier is the scaling factor we use on network timeouts
|
|
|
|
// for Tor peers.
|
|
|
|
torTimeoutMultiplier = 3
|
2015-12-20 22:16:38 +01:00
|
|
|
)
|
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
var (
|
|
|
|
// ErrChannelNotFound is an error returned when a channel is queried and
|
|
|
|
// either the Brontide doesn't know of it, or the channel in question
|
|
|
|
// is pending.
|
|
|
|
ErrChannelNotFound = fmt.Errorf("channel not found")
|
|
|
|
)
|
|
|
|
|
2018-02-07 04:11:11 +01:00
|
|
|
// outgoingMsg packages an lnwire.Message to be sent out on the wire, along with
|
2016-06-21 21:32:32 +02:00
|
|
|
// a buffered channel which will be sent upon once the write is complete. This
|
|
|
|
// buffered channel acts as a semaphore to be used for synchronization purposes.
|
2018-02-07 04:11:11 +01:00
|
|
|
type outgoingMsg struct {
|
2019-03-06 02:08:05 +01:00
|
|
|
priority bool
|
|
|
|
msg lnwire.Message
|
|
|
|
errChan chan error // MUST be buffered.
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
|
|
|
|
2018-09-26 11:12:57 +02:00
|
|
|
// newChannelMsg packages a channeldb.OpenChannel with a channel that allows
|
2020-06-26 23:55:40 +02:00
|
|
|
// the receiver of the request to report when the channel creation process has
|
|
|
|
// completed.
|
2017-01-24 03:19:54 +01:00
|
|
|
type newChannelMsg struct {
|
2023-03-16 14:24:04 +01:00
|
|
|
// channel is used when the pending channel becomes active.
|
2023-01-20 05:26:05 +01:00
|
|
|
channel *lnpeer.NewChannel
|
2023-03-16 14:24:04 +01:00
|
|
|
|
|
|
|
// channelID is used when there's a new pending channel.
|
|
|
|
channelID lnwire.ChannelID
|
|
|
|
|
|
|
|
err chan error
|
2017-01-24 03:19:54 +01:00
|
|
|
}
|
|
|
|
|
2021-05-31 12:06:48 +02:00
|
|
|
type customMsg struct {
|
|
|
|
peer [33]byte
|
|
|
|
msg lnwire.Custom
|
|
|
|
}
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// closeMsg is a wrapper struct around any wire messages that deal with the
|
2017-11-23 08:21:07 +01:00
|
|
|
// cooperative channel closure negotiation process. This struct includes the
|
|
|
|
// raw channel ID targeted along with the original message.
|
|
|
|
type closeMsg struct {
|
|
|
|
cid lnwire.ChannelID
|
|
|
|
msg lnwire.Message
|
|
|
|
}
|
|
|
|
|
2020-06-27 03:04:15 +02:00
|
|
|
// PendingUpdate describes the pending state of a closing channel.
|
|
|
|
type PendingUpdate struct {
|
2018-12-20 18:52:27 +01:00
|
|
|
Txid []byte
|
|
|
|
OutputIndex uint32
|
|
|
|
}
|
|
|
|
|
2020-06-27 03:04:15 +02:00
|
|
|
// ChannelCloseUpdate contains the outcome of the close channel operation.
|
|
|
|
type ChannelCloseUpdate struct {
|
2018-12-20 18:52:27 +01:00
|
|
|
ClosingTxid []byte
|
|
|
|
Success bool
|
2024-05-29 19:57:49 +02:00
|
|
|
|
|
|
|
// LocalCloseOutput is an optional, additional output on the closing
|
|
|
|
// transaction that the local party should be paid to. This will only be
|
|
|
|
// populated if the local balance isn't dust.
|
|
|
|
LocalCloseOutput fn.Option[chancloser.CloseOutput]
|
|
|
|
|
|
|
|
// RemoteCloseOutput is an optional, additional output on the closing
|
|
|
|
// transaction that the remote party should be paid to. This will only
|
|
|
|
// be populated if the remote balance isn't dust.
|
|
|
|
RemoteCloseOutput fn.Option[chancloser.CloseOutput]
|
|
|
|
|
|
|
|
// AuxOutputs is an optional set of additional outputs that might be
|
|
|
|
// included in the closing transaction. These are used for custom
|
|
|
|
// channel types.
|
|
|
|
AuxOutputs fn.Option[chancloser.AuxCloseOutputs]
|
2018-12-20 18:52:27 +01:00
|
|
|
}
|
|
|
|
|
2020-06-27 03:04:15 +02:00
|
|
|
// TimestampedError is a timestamped error that is used to store the most recent
|
2020-03-17 07:22:35 +01:00
|
|
|
// errors we have experienced with our peers.
|
2020-06-27 03:04:15 +02:00
|
|
|
type TimestampedError struct {
|
|
|
|
Error error
|
|
|
|
Timestamp time.Time
|
2020-03-17 07:22:35 +01:00
|
|
|
}
|
|
|
|
|
2020-09-24 16:19:54 +02:00
|
|
|
// Config defines configuration fields that are necessary for a peer object
|
|
|
|
// to function.
|
|
|
|
type Config struct {
|
|
|
|
// Conn is the underlying network connection for this peer.
|
|
|
|
Conn MessageConn
|
|
|
|
|
|
|
|
// ConnReq stores information related to the persistent connection request
|
|
|
|
// for this peer.
|
|
|
|
ConnReq *connmgr.ConnReq
|
|
|
|
|
|
|
|
// PubKeyBytes is the serialized, compressed public key of this peer.
|
|
|
|
PubKeyBytes [33]byte
|
|
|
|
|
|
|
|
// Addr is the network address of the peer.
|
|
|
|
Addr *lnwire.NetAddress
|
|
|
|
|
|
|
|
// Inbound indicates whether or not the peer is an inbound peer.
|
|
|
|
Inbound bool
|
|
|
|
|
|
|
|
// Features is the set of features that we advertise to the remote party.
|
|
|
|
Features *lnwire.FeatureVector
|
|
|
|
|
|
|
|
// LegacyFeatures is the set of features that we advertise to the remote
|
|
|
|
// peer for backwards compatibility. Nodes that have not implemented
|
|
|
|
// flat features will still be able to read our feature bits from the
|
|
|
|
// legacy global field, but we will also advertise everything in the
|
|
|
|
// default features field.
|
|
|
|
LegacyFeatures *lnwire.FeatureVector
|
|
|
|
|
|
|
|
// OutgoingCltvRejectDelta defines the number of blocks before expiry of
|
|
|
|
// an htlc where we don't offer it anymore.
|
|
|
|
OutgoingCltvRejectDelta uint32
|
|
|
|
|
|
|
|
// ChanActiveTimeout specifies the duration the peer will wait to request
|
|
|
|
// a channel reenable, beginning from the time the peer was started.
|
|
|
|
ChanActiveTimeout time.Duration
|
|
|
|
|
|
|
|
// ErrorBuffer stores a set of errors related to a peer. It contains error
|
|
|
|
// messages that our peer has recently sent us over the wire and records of
|
|
|
|
// unknown messages that were sent to us so that we can have a full track
|
|
|
|
// record of the communication errors we have had with our peer. If we
|
|
|
|
// choose to disconnect from a peer, it also stores the reason we had for
|
|
|
|
// disconnecting.
|
|
|
|
ErrorBuffer *queue.CircularBuffer
|
|
|
|
|
|
|
|
// WritePool is the task pool that manages reuse of write buffers. Write
|
|
|
|
// tasks are submitted to the pool in order to conserve the total number of
|
|
|
|
// write buffers allocated at any one time, and decouple write buffer
|
|
|
|
// allocation from the peer life cycle.
|
|
|
|
WritePool *pool.Write
|
|
|
|
|
|
|
|
// ReadPool is the task pool that manages reuse of read buffers.
|
|
|
|
ReadPool *pool.Read
|
|
|
|
|
|
|
|
// Switch is a pointer to the htlcswitch. It is used to setup, get, and
|
|
|
|
// tear-down ChannelLinks.
|
2021-08-03 21:18:37 +02:00
|
|
|
Switch messageSwitch
|
2020-09-24 16:19:54 +02:00
|
|
|
|
|
|
|
// InterceptSwitch is a pointer to the InterceptableSwitch, a wrapper around
|
|
|
|
// the regular Switch. We only export it here to pass ForwardPackets to the
|
|
|
|
// ChannelLinkConfig.
|
|
|
|
InterceptSwitch *htlcswitch.InterceptableSwitch
|
|
|
|
|
|
|
|
// ChannelDB is used to fetch opened channels, and closed channels.
|
2021-09-21 19:18:17 +02:00
|
|
|
ChannelDB *channeldb.ChannelStateDB
|
2020-09-24 16:19:54 +02:00
|
|
|
|
|
|
|
// ChannelGraph is a pointer to the channel graph which is used to
|
|
|
|
// query information about the set of known active channels.
|
|
|
|
ChannelGraph *channeldb.ChannelGraph
|
|
|
|
|
|
|
|
// ChainArb is used to subscribe to channel events, update contract signals,
|
|
|
|
// and force close channels.
|
|
|
|
ChainArb *contractcourt.ChainArbitrator
|
|
|
|
|
|
|
|
// AuthGossiper is needed so that the Brontide impl can register with the
|
|
|
|
// gossiper and process remote channel announcements.
|
|
|
|
AuthGossiper *discovery.AuthenticatedGossiper
|
|
|
|
|
|
|
|
// ChanStatusMgr is used to set or un-set the disabled bit in channel
|
|
|
|
// updates.
|
|
|
|
ChanStatusMgr *netann.ChanStatusManager
|
|
|
|
|
|
|
|
// ChainIO is used to retrieve the best block.
|
|
|
|
ChainIO lnwallet.BlockChainIO
|
|
|
|
|
|
|
|
// FeeEstimator is used to compute our target ideal fee-per-kw when
|
|
|
|
// initializing the coop close process.
|
|
|
|
FeeEstimator chainfee.Estimator
|
|
|
|
|
|
|
|
// Signer is used when creating *lnwallet.LightningChannel instances.
|
|
|
|
Signer input.Signer
|
|
|
|
|
|
|
|
// SigPool is used when creating *lnwallet.LightningChannel instances.
|
|
|
|
SigPool *lnwallet.SigPool
|
|
|
|
|
2020-12-01 14:31:42 +01:00
|
|
|
// Wallet is used to publish transactions and generates delivery
|
|
|
|
// scripts during the coop close process.
|
2020-09-24 16:19:54 +02:00
|
|
|
Wallet *lnwallet.LightningWallet
|
|
|
|
|
|
|
|
// ChainNotifier is used to receive confirmations of a coop close
|
|
|
|
// transaction.
|
|
|
|
ChainNotifier chainntnfs.ChainNotifier
|
|
|
|
|
2023-07-13 01:26:43 +02:00
|
|
|
// BestBlockView is used to efficiently query for up-to-date
|
|
|
|
// blockchain state information
|
|
|
|
BestBlockView chainntnfs.BestBlockView
|
|
|
|
|
2020-09-24 16:19:54 +02:00
|
|
|
// RoutingPolicy is used to set the forwarding policy for links created by
|
|
|
|
// the Brontide.
|
2023-07-17 12:53:24 +02:00
|
|
|
RoutingPolicy models.ForwardingPolicy
|
2020-09-24 16:19:54 +02:00
|
|
|
|
|
|
|
// Sphinx is used when setting up ChannelLinks so they can decode sphinx
|
|
|
|
// onion blobs.
|
|
|
|
Sphinx *hop.OnionProcessor
|
|
|
|
|
|
|
|
// WitnessBeacon is used when setting up ChannelLinks so they can add any
|
|
|
|
// preimages that they learn.
|
|
|
|
WitnessBeacon contractcourt.WitnessBeacon
|
|
|
|
|
|
|
|
// Invoices is passed to the ChannelLink on creation and handles all
|
|
|
|
// invoice-related logic.
|
|
|
|
Invoices *invoices.InvoiceRegistry
|
|
|
|
|
|
|
|
// ChannelNotifier is used by the link to notify other sub-systems about
|
|
|
|
// channel-related events and by the Brontide to subscribe to
|
|
|
|
// ActiveLinkEvents.
|
|
|
|
ChannelNotifier *channelnotifier.ChannelNotifier
|
|
|
|
|
|
|
|
// HtlcNotifier is used when creating a ChannelLink.
|
|
|
|
HtlcNotifier *htlcswitch.HtlcNotifier
|
|
|
|
|
2023-05-16 15:22:45 +02:00
|
|
|
// TowerClient is used to backup revoked states.
|
2023-08-11 14:53:54 +02:00
|
|
|
TowerClient wtclient.ClientManager
|
2020-11-26 00:06:46 +01:00
|
|
|
|
2020-09-24 16:19:54 +02:00
|
|
|
// DisconnectPeer is used to disconnect this peer if the cooperative close
|
|
|
|
// process fails.
|
|
|
|
DisconnectPeer func(*btcec.PublicKey) error
|
|
|
|
|
|
|
|
// GenNodeAnnouncement is used to send our node announcement to the remote
|
|
|
|
// on startup.
|
2023-04-05 10:21:59 +02:00
|
|
|
GenNodeAnnouncement func(...netann.NodeAnnModifier) (
|
|
|
|
lnwire.NodeAnnouncement, error)
|
2020-09-24 16:19:54 +02:00
|
|
|
|
|
|
|
// PrunePersistentPeerConnection is used to remove all internal state
|
|
|
|
// related to this peer in the server.
|
|
|
|
PrunePersistentPeerConnection func([33]byte)
|
|
|
|
|
|
|
|
// FetchLastChanUpdate fetches our latest channel update for a target
|
|
|
|
// channel.
|
2024-08-21 08:39:37 +02:00
|
|
|
FetchLastChanUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate1,
|
2020-09-24 16:19:54 +02:00
|
|
|
error)
|
|
|
|
|
2020-11-17 02:26:08 +01:00
|
|
|
// FundingManager is an implementation of the funding.Controller interface.
|
|
|
|
FundingManager funding.Controller
|
2020-09-24 16:19:54 +02:00
|
|
|
|
|
|
|
// Hodl is used when creating ChannelLinks to specify HodlFlags as
|
|
|
|
// breakpoints in dev builds.
|
|
|
|
Hodl *hodl.Config
|
|
|
|
|
|
|
|
// UnsafeReplay is used when creating ChannelLinks to specify whether or
|
|
|
|
// not to replay adds on its commitment tx.
|
|
|
|
UnsafeReplay bool
|
|
|
|
|
|
|
|
// MaxOutgoingCltvExpiry is used when creating ChannelLinks and is the max
|
|
|
|
// number of blocks that funds could be locked up for when forwarding
|
|
|
|
// payments.
|
|
|
|
MaxOutgoingCltvExpiry uint32
|
|
|
|
|
|
|
|
// MaxChannelFeeAllocation is used when creating ChannelLinks and is the
|
|
|
|
// maximum percentage of total funds that can be allocated to a channel's
|
|
|
|
// commitment fee. This only applies for the initiator of the channel.
|
|
|
|
MaxChannelFeeAllocation float64
|
|
|
|
|
2020-12-10 14:16:53 +01:00
|
|
|
// MaxAnchorsCommitFeeRate is the maximum fee rate we'll use as an
|
|
|
|
// initiator for anchor channel commitments.
|
|
|
|
MaxAnchorsCommitFeeRate chainfee.SatPerKWeight
|
|
|
|
|
2021-03-02 13:11:10 +01:00
|
|
|
// CoopCloseTargetConfs is the confirmation target that will be used
|
|
|
|
// to estimate the fee rate to use during a cooperative channel
|
|
|
|
// closure initiated by the remote peer.
|
|
|
|
CoopCloseTargetConfs uint32
|
|
|
|
|
2020-09-24 16:19:54 +02:00
|
|
|
// ServerPubKey is the serialized, compressed public key of our lnd node.
|
|
|
|
// It is used to determine which policy (channel edge) to pass to the
|
|
|
|
// ChannelLink.
|
|
|
|
ServerPubKey [33]byte
|
|
|
|
|
2021-04-08 14:29:03 +02:00
|
|
|
// ChannelCommitInterval is the maximum time that is allowed to pass between
|
|
|
|
// receiving a channel state update and signing the next commitment.
|
|
|
|
// Setting this to a longer duration allows for more efficient channel
|
|
|
|
// operations at the cost of latency.
|
|
|
|
ChannelCommitInterval time.Duration
|
|
|
|
|
2022-01-21 00:44:57 +01:00
|
|
|
// PendingCommitInterval is the maximum time that is allowed to pass
|
|
|
|
// while waiting for the remote party to revoke a locally initiated
|
|
|
|
// commitment state. Setting this to a longer duration if a slow
|
|
|
|
// response is expected from the remote party or large number of
|
|
|
|
// payments are attempted at the same time.
|
|
|
|
PendingCommitInterval time.Duration
|
|
|
|
|
2021-04-09 15:10:27 +02:00
|
|
|
// ChannelCommitBatchSize is the maximum number of channel state updates
|
|
|
|
// that is accumulated before signing a new commitment.
|
|
|
|
ChannelCommitBatchSize uint32
|
|
|
|
|
2021-05-31 12:06:48 +02:00
|
|
|
// HandleCustomMessage is called whenever a custom message is received
|
|
|
|
// from the peer.
|
|
|
|
HandleCustomMessage func(peer [33]byte, msg *lnwire.Custom) error
|
|
|
|
|
2022-04-04 22:56:29 +02:00
|
|
|
// GetAliases is passed to created links so the Switch and link can be
|
|
|
|
// aware of the channel's aliases.
|
|
|
|
GetAliases func(base lnwire.ShortChannelID) []lnwire.ShortChannelID
|
|
|
|
|
|
|
|
// RequestAlias allows the Brontide struct to request an alias to send
|
|
|
|
// to the peer.
|
|
|
|
RequestAlias func() (lnwire.ShortChannelID, error)
|
|
|
|
|
|
|
|
// AddLocalAlias persists an alias to an underlying alias store.
|
|
|
|
AddLocalAlias func(alias, base lnwire.ShortChannelID,
|
2024-03-12 18:15:14 +01:00
|
|
|
gossip, liveUpdate bool) error
|
2022-04-04 22:56:29 +02:00
|
|
|
|
2024-04-25 19:00:42 +02:00
|
|
|
// AuxLeafStore is an optional store that can be used to store auxiliary
|
|
|
|
// leaves for certain custom channel types.
|
|
|
|
AuxLeafStore fn.Option[lnwallet.AuxLeafStore]
|
|
|
|
|
2024-04-09 04:48:36 +02:00
|
|
|
// AuxSigner is an optional signer that can be used to sign auxiliary
|
|
|
|
// leaves for certain custom channel types.
|
|
|
|
AuxSigner fn.Option[lnwallet.AuxSigner]
|
|
|
|
|
2022-01-11 04:11:59 +01:00
|
|
|
// PongBuf is a slice we'll reuse instead of allocating memory on the
|
|
|
|
// heap. Since only reads will occur and no writes, there is no need
|
|
|
|
// for any synchronization primitives. As a result, it's safe to share
|
|
|
|
// this across multiple Peer struct instances.
|
|
|
|
PongBuf []byte
|
|
|
|
|
2024-04-02 15:04:27 +02:00
|
|
|
// Adds the option to disable forwarding payments in blinded routes
|
|
|
|
// by failing back any blinding-related payloads as if they were
|
|
|
|
// invalid.
|
|
|
|
DisallowRouteBlinding bool
|
|
|
|
|
2024-06-03 18:43:33 +02:00
|
|
|
// MaxFeeExposure limits the number of outstanding fees in a channel.
|
|
|
|
// This value will be passed to created links.
|
|
|
|
MaxFeeExposure lnwire.MilliSatoshi
|
|
|
|
|
2024-04-05 01:13:30 +02:00
|
|
|
// MsgRouter is an optional instance of the main message router that
|
|
|
|
// the peer will use. If None, then a new default version will be used
|
|
|
|
// in place.
|
|
|
|
MsgRouter fn.Option[msgmux.Router]
|
|
|
|
|
2024-05-29 19:57:46 +02:00
|
|
|
// AuxChanCloser is an optional instance of an abstraction that can be
|
|
|
|
// used to modify the way the co-op close transaction is constructed.
|
|
|
|
AuxChanCloser fn.Option[chancloser.AuxChanCloser]
|
|
|
|
|
2020-09-24 16:19:54 +02:00
|
|
|
// Quit is the server's quit channel. If this is closed, we halt operation.
|
|
|
|
Quit chan struct{}
|
|
|
|
}
|
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
// Brontide is an active peer on the Lightning Network. This struct is responsible
|
2016-11-11 02:15:25 +01:00
|
|
|
// for managing any channel state related to this peer. To do so, it has
|
|
|
|
// several helper goroutines to handle events such as HTLC timeouts, new
|
|
|
|
// funding workflow, and detecting an uncooperative closure of any active
|
|
|
|
// channels.
|
2022-04-26 18:51:54 +02:00
|
|
|
// TODO(roasbeef): proper reconnection logic.
|
2020-07-02 23:46:06 +02:00
|
|
|
type Brontide struct {
|
2018-06-01 00:41:41 +02:00
|
|
|
// MUST be used atomically.
|
|
|
|
started int32
|
|
|
|
disconnect int32
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// MUST be used atomically.
|
2017-01-30 09:53:09 +01:00
|
|
|
bytesReceived uint64
|
|
|
|
bytesSent uint64
|
|
|
|
|
2024-05-16 20:19:20 +02:00
|
|
|
// isTorConnection is a flag that indicates whether or not we believe
|
|
|
|
// the remote peer is a tor connection. It is not always possible to
|
|
|
|
// know this with certainty but we have heuristics we use that should
|
|
|
|
// catch most cases.
|
|
|
|
//
|
|
|
|
// NOTE: We judge the tor-ness of a connection by if the remote peer has
|
|
|
|
// ".onion" in the address OR if it's connected over localhost.
|
|
|
|
// This will miss cases where our peer is connected to our clearnet
|
|
|
|
// address over the tor network (via exit nodes). It will also misjudge
|
|
|
|
// actual localhost connections as tor. We need to include this because
|
|
|
|
// inbound connections to our tor address will appear to come from the
|
|
|
|
// local socks5 proxy. This heuristic is only used to expand the timeout
|
|
|
|
// window for peers so it is OK to misjudge this. If you use this field
|
|
|
|
// for any other purpose you should seriously consider whether or not
|
|
|
|
// this heuristic is good enough for your use case.
|
|
|
|
isTorConnection bool
|
|
|
|
|
2023-07-14 19:24:10 +02:00
|
|
|
pingManager *PingManager
|
2017-01-30 09:53:09 +01:00
|
|
|
|
2021-08-13 00:30:55 +02:00
|
|
|
// lastPingPayload stores an unsafe pointer wrapped as an atomic
|
|
|
|
// variable which points to the last payload the remote party sent us
|
|
|
|
// as their ping.
|
|
|
|
//
|
|
|
|
// MUST be used atomically.
|
|
|
|
lastPingPayload atomic.Value
|
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
cfg Config
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2019-10-01 23:35:23 +02:00
|
|
|
// activeSignal when closed signals that the peer is now active and
|
|
|
|
// ready to process messages.
|
|
|
|
activeSignal chan struct{}
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// startTime is the time this peer connection was successfully established.
|
|
|
|
// It will be zero for peers that did not successfully call Start().
|
2018-08-02 09:22:38 +02:00
|
|
|
startTime time.Time
|
2015-12-20 22:16:38 +01:00
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
// sendQueue is the channel which is used to queue outgoing messages to be
|
2016-06-21 21:32:32 +02:00
|
|
|
// written onto the wire. Note that this channel is unbuffered.
|
2018-02-07 04:11:11 +01:00
|
|
|
sendQueue chan outgoingMsg
|
2016-06-21 21:32:32 +02:00
|
|
|
|
|
|
|
// outgoingQueue is a buffered channel which allows second/third party
|
|
|
|
// objects to queue messages to be sent out on the wire.
|
2018-02-07 04:11:11 +01:00
|
|
|
outgoingQueue chan outgoingMsg
|
2015-12-20 22:16:38 +01:00
|
|
|
|
2016-06-21 21:32:32 +02:00
|
|
|
// activeChannels is a map which stores the state machines of all
|
|
|
|
// active channels. Channels are indexed into the map by the txid of
|
|
|
|
// the funding transaction which opened the channel.
|
2020-03-07 04:43:51 +01:00
|
|
|
//
|
|
|
|
// NOTE: On startup, pending channels are stored as nil in this map.
|
|
|
|
// Confirmed channels have channel data populated in the map. This means
|
|
|
|
// that accesses to this map should nil-check the LightningChannel to
|
|
|
|
// see if this is a pending channel or not. The tradeoff here is either
|
|
|
|
// having two maps everywhere (one for pending, one for confirmed chans)
|
|
|
|
// or having an extra nil-check per access.
|
2023-03-29 13:24:07 +02:00
|
|
|
activeChannels *lnutils.SyncMap[
|
|
|
|
lnwire.ChannelID, *lnwallet.LightningChannel]
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2019-02-15 02:13:44 +01:00
|
|
|
// addedChannels tracks any new channels opened during this peer's
|
|
|
|
// lifecycle. We use this to filter out these new channels when the time
|
|
|
|
// comes to request a reenable for active channels, since they will have
|
|
|
|
// waited a shorter duration.
|
2023-03-29 13:24:07 +02:00
|
|
|
addedChannels *lnutils.SyncMap[lnwire.ChannelID, struct{}]
|
2019-02-15 02:13:44 +01:00
|
|
|
|
2023-03-16 03:17:13 +01:00
|
|
|
// newActiveChannel is used by the fundingManager to send fully opened
|
2016-06-21 21:32:32 +02:00
|
|
|
// channels to the source peer which handled the funding workflow.
|
2023-03-16 03:17:13 +01:00
|
|
|
newActiveChannel chan *newChannelMsg
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2023-03-16 14:24:04 +01:00
|
|
|
// newPendingChannel is used by the fundingManager to send pending open
|
|
|
|
// channels to the source peer which handled the funding workflow.
|
|
|
|
newPendingChannel chan *newChannelMsg
|
|
|
|
|
2023-06-08 13:42:07 +02:00
|
|
|
// removePendingChannel is used by the fundingManager to cancel pending
|
|
|
|
// open channels to the source peer when the funding flow is failed.
|
|
|
|
removePendingChannel chan *newChannelMsg
|
|
|
|
|
2019-09-25 21:01:11 +02:00
|
|
|
// activeMsgStreams is a map from channel id to the channel streams that
|
|
|
|
// proxy messages to individual, active links.
|
|
|
|
activeMsgStreams map[lnwire.ChannelID]*msgStream
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// activeChanCloses is a map that keeps track of all the active
|
|
|
|
// cooperative channel closures. Any channel closing messages are directed
|
|
|
|
// to one of these active state machines. Once the channel has been closed,
|
|
|
|
// the state machine will be deleted from the map.
|
2020-06-17 02:33:06 +02:00
|
|
|
activeChanCloses map[lnwire.ChannelID]*chancloser.ChanCloser
|
2017-11-23 08:21:07 +01:00
|
|
|
|
2016-11-11 02:15:25 +01:00
|
|
|
// localCloseChanReqs is a channel in which any local requests to close
|
|
|
|
// a particular channel are sent over.
|
2017-05-02 22:04:58 +02:00
|
|
|
localCloseChanReqs chan *htlcswitch.ChanClose
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2018-08-31 01:54:53 +02:00
|
|
|
// linkFailures receives all reported channel failures from the switch,
|
|
|
|
// and instructs the channelManager to clean remaining channel state.
|
|
|
|
linkFailures chan linkFailureReport
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// chanCloseMsgs is a channel that any message related to channel
|
|
|
|
// closures are sent over. This includes lnwire.Shutdown message as
|
|
|
|
// well as lnwire.ClosingSigned messages.
|
|
|
|
chanCloseMsgs chan *closeMsg
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2019-11-08 14:31:47 +01:00
|
|
|
// remoteFeatures is the feature vector received from the peer during
|
|
|
|
// the connection handshake.
|
|
|
|
remoteFeatures *lnwire.FeatureVector
|
2017-02-16 13:39:38 +01:00
|
|
|
|
2020-02-17 12:47:26 +01:00
|
|
|
// resentChanSyncMsg is a set that keeps track of which channels we
|
|
|
|
// have re-sent channel reestablishment messages for. This is done to
|
|
|
|
// avoid getting into loop where both peers will respond to the other
|
|
|
|
// peer's chansync message with its own over and over again.
|
|
|
|
resentChanSyncMsg map[lnwire.ChannelID]struct{}
|
|
|
|
|
2022-12-13 19:26:20 +01:00
|
|
|
// channelEventClient is the channel event subscription client that's
|
|
|
|
// used to assist retry enabling the channels. This client is only
|
|
|
|
// created when the reenableTimeout is no greater than 1 minute. Once
|
|
|
|
// created, it is canceled once the reenabling has been finished.
|
|
|
|
//
|
|
|
|
// NOTE: we choose to create the client conditionally to avoid
|
|
|
|
// potentially holding lots of un-consumed events.
|
|
|
|
channelEventClient *subscribe.Client
|
|
|
|
|
2024-01-31 03:00:11 +01:00
|
|
|
// msgRouter is an instance of the msgmux.Router which is used to send
|
|
|
|
// off new wire messages for handing.
|
|
|
|
msgRouter fn.Option[msgmux.Router]
|
|
|
|
|
2024-07-09 23:05:33 +02:00
|
|
|
// globalMsgRouter is a flag that indicates whether we have a global
|
|
|
|
// msg router. If so, then we don't worry about stopping the msg router
|
|
|
|
// when a peer disconnects.
|
|
|
|
globalMsgRouter bool
|
|
|
|
|
2023-07-25 20:14:41 +02:00
|
|
|
startReady chan struct{}
|
|
|
|
quit chan struct{}
|
|
|
|
wg sync.WaitGroup
|
2022-05-28 10:10:09 +02:00
|
|
|
|
|
|
|
// log is a peer-specific logging instance.
|
|
|
|
log btclog.Logger
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
// A compile-time check to ensure that Brontide satisfies the lnpeer.Peer interface.
|
|
|
|
var _ lnpeer.Peer = (*Brontide)(nil)
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
// NewBrontide creates a new Brontide from a peer.Config struct.
|
|
|
|
func NewBrontide(cfg Config) *Brontide {
|
2022-05-28 10:10:09 +02:00
|
|
|
logPrefix := fmt.Sprintf("Peer(%x):", cfg.PubKeyBytes)
|
|
|
|
|
2024-07-09 23:05:33 +02:00
|
|
|
// We have a global message router if one was passed in via the config.
|
|
|
|
// In this case, we don't need to attempt to tear it down when the peer
|
|
|
|
// is stopped.
|
|
|
|
globalMsgRouter := cfg.MsgRouter.IsSome()
|
|
|
|
|
2024-04-05 01:13:30 +02:00
|
|
|
// We'll either use the msg router instance passed in, or create a new
|
|
|
|
// blank instance.
|
|
|
|
msgRouter := cfg.MsgRouter.Alt(fn.Some[msgmux.Router](
|
|
|
|
msgmux.NewMultiMsgRouter(),
|
|
|
|
))
|
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
p := &Brontide{
|
2023-03-29 13:24:07 +02:00
|
|
|
cfg: cfg,
|
|
|
|
activeSignal: make(chan struct{}),
|
|
|
|
sendQueue: make(chan outgoingMsg),
|
|
|
|
outgoingQueue: make(chan outgoingMsg),
|
|
|
|
addedChannels: &lnutils.SyncMap[lnwire.ChannelID, struct{}]{},
|
|
|
|
activeChannels: &lnutils.SyncMap[
|
|
|
|
lnwire.ChannelID, *lnwallet.LightningChannel,
|
|
|
|
]{},
|
2024-01-20 15:24:26 +01:00
|
|
|
newActiveChannel: make(chan *newChannelMsg, 1),
|
|
|
|
newPendingChannel: make(chan *newChannelMsg, 1),
|
|
|
|
removePendingChannel: make(chan *newChannelMsg),
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
activeMsgStreams: make(map[lnwire.ChannelID]*msgStream),
|
2020-06-17 02:33:06 +02:00
|
|
|
activeChanCloses: make(map[lnwire.ChannelID]*chancloser.ChanCloser),
|
2017-11-23 08:21:07 +01:00
|
|
|
localCloseChanReqs: make(chan *htlcswitch.ChanClose),
|
2018-08-31 01:54:53 +02:00
|
|
|
linkFailures: make(chan linkFailureReport),
|
2017-11-23 08:21:07 +01:00
|
|
|
chanCloseMsgs: make(chan *closeMsg),
|
2020-02-17 12:47:26 +01:00
|
|
|
resentChanSyncMsg: make(map[lnwire.ChannelID]struct{}),
|
2023-07-25 20:14:41 +02:00
|
|
|
startReady: make(chan struct{}),
|
2020-06-30 03:29:22 +02:00
|
|
|
quit: make(chan struct{}),
|
2022-05-28 10:10:09 +02:00
|
|
|
log: build.NewPrefixLog(logPrefix, peerLog),
|
2024-04-05 01:13:30 +02:00
|
|
|
msgRouter: msgRouter,
|
2024-07-09 23:05:33 +02:00
|
|
|
globalMsgRouter: globalMsgRouter,
|
2016-01-17 04:03:03 +01:00
|
|
|
}
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2024-05-16 20:19:20 +02:00
|
|
|
if cfg.Conn != nil && cfg.Conn.RemoteAddr() != nil {
|
|
|
|
remoteAddr := cfg.Conn.RemoteAddr().String()
|
|
|
|
p.isTorConnection = strings.Contains(remoteAddr, ".onion") ||
|
|
|
|
strings.Contains(remoteAddr, "127.0.0.1")
|
|
|
|
}
|
|
|
|
|
2023-07-14 19:24:10 +02:00
|
|
|
var (
|
|
|
|
lastBlockHeader *wire.BlockHeader
|
|
|
|
lastSerializedBlockHeader [wire.MaxBlockHeaderPayload]byte
|
|
|
|
)
|
|
|
|
newPingPayload := func() []byte {
|
|
|
|
// We query the BestBlockHeader from our BestBlockView each time
|
|
|
|
// this is called, and update our serialized block header if
|
|
|
|
// they differ. Over time, we'll use this to disseminate the
|
|
|
|
// latest block header between all our peers, which can later be
|
|
|
|
// used to cross-check our own view of the network to mitigate
|
|
|
|
// various types of eclipse attacks.
|
|
|
|
header, err := p.cfg.BestBlockView.BestBlockHeader()
|
|
|
|
if err != nil && header == lastBlockHeader {
|
|
|
|
return lastSerializedBlockHeader[:]
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := bytes.NewBuffer(lastSerializedBlockHeader[0:0])
|
|
|
|
err = header.Serialize(buf)
|
|
|
|
if err == nil {
|
|
|
|
lastBlockHeader = header
|
|
|
|
} else {
|
|
|
|
p.log.Warn("unable to serialize current block" +
|
|
|
|
"header for ping payload generation." +
|
|
|
|
"This should be impossible and means" +
|
|
|
|
"there is an implementation bug.")
|
|
|
|
}
|
|
|
|
|
|
|
|
return lastSerializedBlockHeader[:]
|
|
|
|
}
|
|
|
|
|
2024-05-15 21:15:54 +02:00
|
|
|
// TODO(roasbeef): make dynamic in order to create fake cover traffic.
|
|
|
|
//
|
|
|
|
// NOTE(proofofkeags): this was changed to be dynamic to allow better
|
|
|
|
// pong identification, however, more thought is needed to make this
|
|
|
|
// actually usable as a traffic decoy.
|
2023-07-14 19:24:10 +02:00
|
|
|
randPongSize := func() uint16 {
|
|
|
|
return uint16(
|
|
|
|
// We don't need cryptographic randomness here.
|
|
|
|
/* #nosec */
|
2024-05-15 21:15:54 +02:00
|
|
|
rand.Intn(pongSizeCeiling) + 1,
|
2023-07-14 19:24:10 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
p.pingManager = NewPingManager(&PingManagerConfig{
|
|
|
|
NewPingPayload: newPingPayload,
|
|
|
|
NewPongSize: randPongSize,
|
2024-05-16 20:19:20 +02:00
|
|
|
IntervalDuration: p.scaleTimeout(pingInterval),
|
|
|
|
TimeoutDuration: p.scaleTimeout(pingTimeout),
|
2023-07-14 19:24:10 +02:00
|
|
|
SendPing: func(ping *lnwire.Ping) {
|
|
|
|
p.queueMsg(ping, nil)
|
|
|
|
},
|
|
|
|
OnPongFailure: func(err error) {
|
|
|
|
eStr := "pong response failure for %s: %v " +
|
|
|
|
"-- disconnecting"
|
|
|
|
p.log.Warnf(eStr, p, err)
|
2024-01-15 22:25:27 +01:00
|
|
|
go p.Disconnect(fmt.Errorf(eStr, p, err))
|
2023-07-14 19:24:10 +02:00
|
|
|
},
|
|
|
|
})
|
|
|
|
|
2020-06-30 03:29:22 +02:00
|
|
|
return p
|
2016-06-21 21:32:32 +02:00
|
|
|
}
|
|
|
|
|
2017-04-12 06:59:45 +02:00
|
|
|
// Start starts all helper goroutines the peer needs for normal operations. In
|
2020-12-01 14:31:42 +01:00
|
|
|
// the case this peer has already been started, then this function is a noop.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) Start() error {
|
2016-01-17 04:03:03 +01:00
|
|
|
if atomic.AddInt32(&p.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-25 20:14:41 +02:00
|
|
|
// Once we've finished starting up the peer, we'll signal to other
|
|
|
|
// goroutines that the they can move forward to tear down the peer, or
|
|
|
|
// carry out other relevant changes.
|
|
|
|
defer close(p.startReady)
|
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Tracef("starting with conn[%v->%v]",
|
2021-12-12 20:56:20 +01:00
|
|
|
p.cfg.Conn.LocalAddr(), p.cfg.Conn.RemoteAddr())
|
2016-01-17 04:03:03 +01:00
|
|
|
|
2020-12-09 15:13:50 +01:00
|
|
|
// Fetch and then load all the active channels we have with this remote
|
|
|
|
// peer from the database.
|
|
|
|
activeChans, err := p.cfg.ChannelDB.FetchOpenChannels(
|
|
|
|
p.cfg.Addr.IdentityKey,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("Unable to fetch active chans "+
|
|
|
|
"for peer: %v", err)
|
2020-12-09 15:13:50 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(activeChans) == 0 {
|
2023-09-28 04:17:15 +02:00
|
|
|
go p.cfg.PrunePersistentPeerConnection(p.cfg.PubKeyBytes)
|
2020-12-09 15:13:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Quickly check if we have any existing legacy channels with this
|
|
|
|
// peer.
|
|
|
|
haveLegacyChan := false
|
|
|
|
for _, c := range activeChans {
|
|
|
|
if c.ChanType.IsTweakless() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
haveLegacyChan = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2017-03-17 03:45:10 +01:00
|
|
|
// Exchange local and global features, the init message should be very
|
|
|
|
// first between two nodes.
|
2020-12-09 15:13:50 +01:00
|
|
|
if err := p.sendInitMsg(haveLegacyChan); err != nil {
|
2024-02-26 12:19:38 +01:00
|
|
|
return fmt.Errorf("unable to send init msg: %w", err)
|
2017-02-16 13:39:38 +01:00
|
|
|
}
|
|
|
|
|
2017-03-17 03:45:10 +01:00
|
|
|
// Before we launch any of the helper goroutines off the peer struct,
|
2017-04-24 04:24:28 +02:00
|
|
|
// we'll first ensure proper adherence to the p2p protocol. The init
|
2017-03-17 03:45:10 +01:00
|
|
|
// message MUST be sent before any other message.
|
2017-03-30 03:33:20 +02:00
|
|
|
readErr := make(chan error, 1)
|
|
|
|
msgChan := make(chan lnwire.Message, 1)
|
2017-08-09 01:51:19 +02:00
|
|
|
p.wg.Add(1)
|
2017-03-30 03:33:20 +02:00
|
|
|
go func() {
|
2017-08-09 01:51:19 +02:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2017-04-20 01:23:17 +02:00
|
|
|
msg, err := p.readNextMessage()
|
2017-03-30 03:33:20 +02:00
|
|
|
if err != nil {
|
|
|
|
readErr <- err
|
|
|
|
msgChan <- nil
|
2017-08-11 03:07:45 +02:00
|
|
|
return
|
2017-03-30 03:33:20 +02:00
|
|
|
}
|
|
|
|
readErr <- nil
|
|
|
|
msgChan <- msg
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
2017-04-12 06:59:45 +02:00
|
|
|
// In order to avoid blocking indefinitely, we'll give the other peer
|
2018-11-09 18:58:14 +01:00
|
|
|
// an upper timeout to respond before we bail out early.
|
|
|
|
case <-time.After(handshakeTimeout):
|
|
|
|
return fmt.Errorf("peer did not complete handshake within %v",
|
|
|
|
handshakeTimeout)
|
2017-03-30 03:33:20 +02:00
|
|
|
case err := <-readErr:
|
|
|
|
if err != nil {
|
2024-02-26 12:19:38 +01:00
|
|
|
return fmt.Errorf("unable to read init msg: %w", err)
|
2017-03-30 03:33:20 +02:00
|
|
|
}
|
2017-02-16 13:39:38 +01:00
|
|
|
}
|
|
|
|
|
2017-05-11 02:37:59 +02:00
|
|
|
// Once the init message arrives, we can parse it so we can figure out
|
|
|
|
// the negotiation of features for this session.
|
2017-03-30 03:33:20 +02:00
|
|
|
msg := <-msgChan
|
2017-02-16 13:39:38 +01:00
|
|
|
if msg, ok := msg.(*lnwire.Init); ok {
|
|
|
|
if err := p.handleInitMsg(msg); err != nil {
|
2020-03-17 07:22:35 +01:00
|
|
|
p.storeError(err)
|
2017-02-16 13:39:38 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return errors.New("very first message between nodes " +
|
|
|
|
"must be init message")
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:23:15 +02:00
|
|
|
// Next, load all the active channels we have with this peer,
|
|
|
|
// registering them with the switch and launching the necessary
|
|
|
|
// goroutines required to operate them.
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Debugf("Loaded %v active channels from database",
|
|
|
|
len(activeChans))
|
2018-09-06 03:22:29 +02:00
|
|
|
|
2022-12-13 19:26:20 +01:00
|
|
|
// Conditionally subscribe to channel events before loading channels so
|
|
|
|
// we won't miss events. This subscription is used to listen to active
|
|
|
|
// channel event when reenabling channels. Once the reenabling process
|
|
|
|
// is finished, this subscription will be canceled.
|
2022-11-23 06:23:18 +01:00
|
|
|
//
|
|
|
|
// NOTE: ChannelNotifier must be started before subscribing events
|
|
|
|
// otherwise we'd panic here.
|
2022-12-13 19:26:20 +01:00
|
|
|
if err := p.attachChannelEventSubscription(); err != nil {
|
|
|
|
return err
|
2022-11-23 06:23:18 +01:00
|
|
|
}
|
|
|
|
|
2024-01-31 03:00:11 +01:00
|
|
|
// Register the message router now as we may need to register some
|
|
|
|
// endpoints while loading the channels below.
|
|
|
|
p.msgRouter.WhenSome(func(router msgmux.Router) {
|
|
|
|
router.Start()
|
|
|
|
})
|
|
|
|
|
2019-09-06 13:14:38 +02:00
|
|
|
msgs, err := p.loadActiveChannels(activeChans)
|
|
|
|
if err != nil {
|
2024-02-26 12:19:38 +01:00
|
|
|
return fmt.Errorf("unable to load channels: %w", err)
|
2017-04-24 04:23:15 +02:00
|
|
|
}
|
|
|
|
|
2018-08-02 09:22:38 +02:00
|
|
|
p.startTime = time.Now()
|
|
|
|
|
2023-11-16 17:02:23 +01:00
|
|
|
// Before launching the writeHandler goroutine, we send any channel
|
|
|
|
// sync messages that must be resent for borked channels. We do this to
|
|
|
|
// avoid data races with WriteMessage & Flush calls.
|
|
|
|
if len(msgs) > 0 {
|
|
|
|
p.log.Infof("Sending %d channel sync messages to peer after "+
|
|
|
|
"loading active channels", len(msgs))
|
|
|
|
|
|
|
|
// Send the messages directly via writeMessage and bypass the
|
|
|
|
// writeHandler goroutine.
|
|
|
|
for _, msg := range msgs {
|
|
|
|
if err := p.writeMessage(msg); err != nil {
|
|
|
|
return fmt.Errorf("unable to send "+
|
|
|
|
"reestablish msg: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-14 19:24:10 +02:00
|
|
|
err = p.pingManager.Start()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not start ping manager %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
p.wg.Add(4)
|
2017-05-11 02:37:59 +02:00
|
|
|
go p.queueHandler()
|
|
|
|
go p.writeHandler()
|
2022-12-13 19:26:20 +01:00
|
|
|
go p.channelManager()
|
2023-07-14 19:24:10 +02:00
|
|
|
go p.readHandler()
|
2017-05-11 02:37:59 +02:00
|
|
|
|
2019-10-01 23:35:23 +02:00
|
|
|
// Signal to any external processes that the peer is now active.
|
|
|
|
close(p.activeSignal)
|
|
|
|
|
2020-03-31 19:35:02 +02:00
|
|
|
// Node announcements don't propagate very well throughout the network
|
|
|
|
// as there isn't a way to efficiently query for them through their
|
|
|
|
// timestamp, mostly affecting nodes that were offline during the time
|
|
|
|
// of broadcast. We'll resend our node announcement to the remote peer
|
|
|
|
// as a best-effort delivery such that it can also propagate to their
|
|
|
|
// peers. To ensure they can successfully process it in most cases,
|
|
|
|
// we'll only resend it as long as we have at least one confirmed
|
|
|
|
// advertised channel with the remote peer.
|
|
|
|
//
|
|
|
|
// TODO(wilmer): Remove this once we're able to query for node
|
|
|
|
// announcements through their timestamps.
|
2024-08-01 03:03:57 +02:00
|
|
|
p.wg.Add(2)
|
2023-08-31 00:47:45 +02:00
|
|
|
go p.maybeSendNodeAnn(activeChans)
|
2024-08-01 03:03:57 +02:00
|
|
|
go p.maybeSendChannelUpdates()
|
2020-03-31 19:35:02 +02:00
|
|
|
|
2016-01-17 04:03:03 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-04 06:58:48 +02:00
|
|
|
// initGossipSync initializes either a gossip syncer or an initial routing
|
|
|
|
// dump, depending on the negotiated synchronization method.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) initGossipSync() {
|
2018-09-04 06:58:48 +02:00
|
|
|
// If the remote peer knows of the new gossip queries feature, then
|
|
|
|
// we'll create a new gossipSyncer in the AuthenticatedGossiper for it.
|
2020-06-27 03:05:10 +02:00
|
|
|
if p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional) {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Info("Negotiated chan series queries")
|
2018-09-04 06:58:48 +02:00
|
|
|
|
2023-12-14 02:08:48 +01:00
|
|
|
if p.cfg.AuthGossiper == nil {
|
|
|
|
// This should only ever be hit in the unit tests.
|
|
|
|
p.log.Warn("No AuthGossiper configured. Abandoning " +
|
|
|
|
"gossip sync.")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// Register the peer's gossip syncer with the gossiper.
|
|
|
|
// This blocks synchronously to ensure the gossip syncer is
|
2018-09-04 06:58:48 +02:00
|
|
|
// registered with the gossiper before attempting to read
|
|
|
|
// messages from the remote peer.
|
2019-03-23 03:56:33 +01:00
|
|
|
//
|
|
|
|
// TODO(wilmer): Only sync updates from non-channel peers. This
|
|
|
|
// requires an improved version of the current network
|
|
|
|
// bootstrapper to ensure we can find and connect to non-channel
|
|
|
|
// peers.
|
2020-06-30 03:29:22 +02:00
|
|
|
p.cfg.AuthGossiper.InitSyncState(p)
|
2018-09-04 06:58:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-10 20:18:33 +02:00
|
|
|
// taprootShutdownAllowed returns true if both parties have negotiated the
|
|
|
|
// shutdown-any-segwit feature.
|
|
|
|
func (p *Brontide) taprootShutdownAllowed() bool {
|
|
|
|
return p.RemoteFeatures().HasFeature(lnwire.ShutdownAnySegwitOptional) &&
|
|
|
|
p.LocalFeatures().HasFeature(lnwire.ShutdownAnySegwitOptional)
|
|
|
|
}
|
|
|
|
|
2018-08-26 02:10:57 +02:00
|
|
|
// QuitSignal is a method that should return a channel which will be sent upon
|
|
|
|
// or closed once the backing peer exits. This allows callers using the
|
|
|
|
// interface to cancel any processing in the event the backing implementation
|
|
|
|
// exits.
|
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) QuitSignal() <-chan struct{} {
|
2018-08-26 02:10:57 +02:00
|
|
|
return p.quit
|
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:47 +02:00
|
|
|
// internalKeyForAddr returns the internal key associated with a taproot
|
|
|
|
// address.
|
|
|
|
func internalKeyForAddr(wallet *lnwallet.LightningWallet,
|
|
|
|
deliveryScript []byte) (fn.Option[btcec.PublicKey], error) {
|
|
|
|
|
|
|
|
none := fn.None[btcec.PublicKey]()
|
|
|
|
|
|
|
|
pkScript, err := txscript.ParsePkScript(deliveryScript)
|
|
|
|
if err != nil {
|
|
|
|
return none, err
|
|
|
|
}
|
|
|
|
addr, err := pkScript.Address(&wallet.Cfg.NetParams)
|
|
|
|
if err != nil {
|
|
|
|
return none, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it's not a taproot address, we don't require to know the internal
|
|
|
|
// key in the first place. So we don't return an error here, but also no
|
|
|
|
// internal key.
|
|
|
|
_, isTaproot := addr.(*btcutil.AddressTaproot)
|
|
|
|
if !isTaproot {
|
|
|
|
return none, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
walletAddr, err := wallet.AddressInfo(addr)
|
|
|
|
if err != nil {
|
|
|
|
return none, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the address isn't known to the wallet, we can't determine the
|
|
|
|
// internal key.
|
|
|
|
if walletAddr == nil {
|
|
|
|
return none, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
pubKeyAddr, ok := walletAddr.(waddrmgr.ManagedPubKeyAddress)
|
|
|
|
if !ok {
|
|
|
|
return none, fmt.Errorf("expected pubkey addr, got %T",
|
|
|
|
pubKeyAddr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fn.Some(*pubKeyAddr.PubKey()), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// addrWithInternalKey takes a delivery script, then attempts to supplement it
|
|
|
|
// with information related to the internal key for the addr, but only if it's
|
|
|
|
// a taproot addr.
|
|
|
|
func (p *Brontide) addrWithInternalKey(
|
|
|
|
deliveryScript []byte) fn.Result[chancloser.DeliveryAddrWithKey] {
|
|
|
|
|
|
|
|
// TODO(roasbeef): not compatible with external shutdown addr?
|
|
|
|
// Currently, custom channels cannot be created with external upfront
|
|
|
|
// shutdown addresses, so this shouldn't be an issue. We only require
|
|
|
|
// the internal key for taproot addresses to be able to provide a non
|
|
|
|
// inclusion proof of any scripts.
|
|
|
|
|
|
|
|
internalKey, err := internalKeyForAddr(p.cfg.Wallet, deliveryScript)
|
|
|
|
if err != nil {
|
|
|
|
return fn.Err[chancloser.DeliveryAddrWithKey](err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fn.Ok(chancloser.DeliveryAddrWithKey{
|
|
|
|
DeliveryAddress: deliveryScript,
|
|
|
|
InternalKey: internalKey,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:23:15 +02:00
|
|
|
// loadActiveChannels creates indexes within the peer for tracking all active
|
2019-09-06 13:14:38 +02:00
|
|
|
// channels returned by the database. It returns a slice of channel reestablish
|
|
|
|
// messages that should be sent to the peer immediately, in case we have borked
|
|
|
|
// channels that haven't been closed yet.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
2019-09-06 13:14:38 +02:00
|
|
|
[]lnwire.Message, error) {
|
|
|
|
|
|
|
|
// Return a slice of messages to send to the peers in case the channel
|
|
|
|
// cannot be loaded normally.
|
|
|
|
var msgs []lnwire.Message
|
|
|
|
|
2022-04-04 22:56:29 +02:00
|
|
|
scidAliasNegotiated := p.hasNegotiatedScidAlias()
|
|
|
|
|
2017-04-24 04:23:15 +02:00
|
|
|
for _, dbChan := range chans {
|
2022-04-04 22:56:29 +02:00
|
|
|
hasScidFeature := dbChan.ChanType.HasScidAliasFeature()
|
|
|
|
if scidAliasNegotiated && !hasScidFeature {
|
|
|
|
// We'll request and store an alias, making sure that a
|
|
|
|
// gossiper mapping is not created for the alias to the
|
|
|
|
// real SCID. This is done because the peer and funding
|
|
|
|
// manager are not aware of each other's states and if
|
|
|
|
// we did not do this, we would accept alias channel
|
|
|
|
// updates after 6 confirmations, which would be buggy.
|
2023-03-15 22:36:58 +01:00
|
|
|
// We'll queue a channel_ready message with the new
|
2022-04-04 22:56:29 +02:00
|
|
|
// alias. This should technically be done *after* the
|
|
|
|
// reestablish, but this behavior is pre-existing since
|
|
|
|
// the funding manager may already queue a
|
2023-03-15 22:36:58 +01:00
|
|
|
// channel_ready before the channel_reestablish.
|
2022-04-04 22:56:29 +02:00
|
|
|
if !dbChan.IsPending {
|
|
|
|
aliasScid, err := p.cfg.RequestAlias()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = p.cfg.AddLocalAlias(
|
|
|
|
aliasScid, dbChan.ShortChanID(), false,
|
2024-03-12 18:15:14 +01:00
|
|
|
false,
|
2022-04-04 22:56:29 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(
|
2024-01-29 22:19:15 +01:00
|
|
|
dbChan.FundingOutpoint,
|
2022-04-04 22:56:29 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// Fetch the second commitment point to send in
|
2023-03-15 22:36:58 +01:00
|
|
|
// the channel_ready message.
|
2022-04-04 22:56:29 +02:00
|
|
|
second, err := dbChan.SecondCommitmentPoint()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-03-15 22:00:17 +01:00
|
|
|
channelReadyMsg := lnwire.NewChannelReady(
|
2022-04-04 22:56:29 +02:00
|
|
|
chanID, second,
|
|
|
|
)
|
2023-03-15 22:00:17 +01:00
|
|
|
channelReadyMsg.AliasScid = &aliasScid
|
2022-04-04 22:56:29 +02:00
|
|
|
|
2023-03-15 22:00:17 +01:00
|
|
|
msgs = append(msgs, channelReadyMsg)
|
2022-04-04 22:56:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we've negotiated the option-scid-alias feature
|
|
|
|
// and this channel does not have ScidAliasFeature set
|
|
|
|
// to true due to an upgrade where the feature bit was
|
|
|
|
// turned on, we'll update the channel's database
|
|
|
|
// state.
|
|
|
|
err := dbChan.MarkScidAliasNegotiated()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-25 19:00:42 +02:00
|
|
|
var chanOpts []lnwallet.ChannelOpt
|
|
|
|
p.cfg.AuxLeafStore.WhenSome(func(s lnwallet.AuxLeafStore) {
|
|
|
|
chanOpts = append(chanOpts, lnwallet.WithLeafStore(s))
|
|
|
|
})
|
2024-04-09 04:48:36 +02:00
|
|
|
p.cfg.AuxSigner.WhenSome(func(s lnwallet.AuxSigner) {
|
|
|
|
chanOpts = append(chanOpts, lnwallet.WithAuxSigner(s))
|
|
|
|
})
|
2018-01-17 05:25:34 +01:00
|
|
|
lnChan, err := lnwallet.NewLightningChannel(
|
2024-04-25 19:00:42 +02:00
|
|
|
p.cfg.Signer, dbChan, p.cfg.SigPool, chanOpts...,
|
2018-01-17 05:25:34 +01:00
|
|
|
)
|
2017-04-24 04:23:15 +02:00
|
|
|
if err != nil {
|
2024-01-31 03:00:11 +01:00
|
|
|
return nil, fmt.Errorf("unable to create channel "+
|
|
|
|
"state machine: %w", err)
|
2017-04-24 04:23:15 +02:00
|
|
|
}
|
2016-01-17 04:03:03 +01:00
|
|
|
|
2024-01-29 22:19:15 +01:00
|
|
|
chanPoint := dbChan.FundingOutpoint
|
2017-10-03 01:30:17 +02:00
|
|
|
|
2017-07-30 23:13:28 +02:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
2017-04-24 04:23:15 +02:00
|
|
|
|
2023-03-20 09:35:09 +01:00
|
|
|
p.log.Infof("Loading ChannelPoint(%v), isPending=%v",
|
|
|
|
chanPoint, lnChan.IsPending())
|
2017-04-24 04:23:15 +02:00
|
|
|
|
2017-12-18 06:45:35 +01:00
|
|
|
// Skip adding any permanently irreconcilable channels to the
|
|
|
|
// htlcswitch.
|
2020-06-27 03:05:10 +02:00
|
|
|
if !dbChan.HasChanStatus(channeldb.ChanStatusDefault) &&
|
|
|
|
!dbChan.HasChanStatus(channeldb.ChanStatusRestored) {
|
2020-04-11 01:23:49 +02:00
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Warnf("ChannelPoint(%v) has status %v, won't "+
|
2018-08-11 11:12:09 +02:00
|
|
|
"start.", chanPoint, dbChan.ChanStatus())
|
2019-09-06 13:14:38 +02:00
|
|
|
|
|
|
|
// To help our peer recover from a potential data loss,
|
|
|
|
// we resend our channel reestablish message if the
|
|
|
|
// channel is in a borked state. We won't process any
|
|
|
|
// channel reestablish message sent from the peer, but
|
|
|
|
// that's okay since the assumption is that we did when
|
|
|
|
// marking the channel borked.
|
2019-09-11 11:15:57 +02:00
|
|
|
chanSync, err := dbChan.ChanSyncMsg()
|
2019-09-06 13:14:38 +02:00
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("Unable to create channel "+
|
2019-09-06 13:14:38 +02:00
|
|
|
"reestablish message for channel %v: "+
|
|
|
|
"%v", chanPoint, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
msgs = append(msgs, chanSync)
|
2022-04-26 18:44:18 +02:00
|
|
|
|
|
|
|
// Check if this channel needs to have the cooperative
|
|
|
|
// close process restarted. If so, we'll need to send
|
|
|
|
// the Shutdown message that is returned.
|
|
|
|
if dbChan.HasChanStatus(
|
|
|
|
channeldb.ChanStatusCoopBroadcasted,
|
|
|
|
) {
|
2022-04-26 18:51:54 +02:00
|
|
|
|
2022-04-26 18:44:18 +02:00
|
|
|
shutdownMsg, err := p.restartCoopClose(lnChan)
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("Unable to restart "+
|
2022-04-26 18:44:18 +02:00
|
|
|
"coop close for channel: %v",
|
|
|
|
err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if shutdownMsg == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the message to the set of messages to
|
|
|
|
// send.
|
|
|
|
msgs = append(msgs, shutdownMsg)
|
|
|
|
}
|
|
|
|
|
2018-03-13 11:03:55 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-08-22 08:53:21 +02:00
|
|
|
// Before we register this new link with the HTLC Switch, we'll
|
|
|
|
// need to fetch its current link-layer forwarding policy from
|
|
|
|
// the database.
|
2020-05-07 05:49:36 +02:00
|
|
|
graph := p.cfg.ChannelGraph
|
2024-01-29 22:19:15 +01:00
|
|
|
info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(
|
|
|
|
&chanPoint,
|
|
|
|
)
|
2024-06-06 19:14:33 +02:00
|
|
|
if err != nil && !errors.Is(err, channeldb.ErrEdgeNotFound) {
|
2019-09-06 13:14:38 +02:00
|
|
|
return nil, err
|
2017-08-22 08:53:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll filter out our policy from the directional channel
|
|
|
|
// edges based whom the edge connects to. If it doesn't connect
|
|
|
|
// to us, then we know that we were the one that advertised the
|
|
|
|
// policy.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): can add helper method to get policy for
|
|
|
|
// particular channel.
|
2023-11-08 10:18:45 +01:00
|
|
|
var selfPolicy *models.ChannelEdgePolicy
|
2018-01-31 05:30:00 +01:00
|
|
|
if info != nil && bytes.Equal(info.NodeKey1Bytes[:],
|
2020-06-30 03:29:22 +02:00
|
|
|
p.cfg.ServerPubKey[:]) {
|
2018-01-31 05:30:00 +01:00
|
|
|
|
2017-08-22 08:53:21 +02:00
|
|
|
selfPolicy = p1
|
|
|
|
} else {
|
|
|
|
selfPolicy = p2
|
|
|
|
}
|
|
|
|
|
2017-08-23 20:32:50 +02:00
|
|
|
// If we don't yet have an advertised routing policy, then
|
|
|
|
// we'll use the current default, otherwise we'll translate the
|
|
|
|
// routing policy into a forwarding policy.
|
2023-07-17 12:53:24 +02:00
|
|
|
var forwardingPolicy *models.ForwardingPolicy
|
2017-08-23 20:32:50 +02:00
|
|
|
if selfPolicy != nil {
|
2022-09-19 12:06:34 +02:00
|
|
|
var inboundWireFee lnwire.Fee
|
|
|
|
_, err := selfPolicy.ExtraOpaqueData.ExtractRecords(
|
|
|
|
&inboundWireFee,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
inboundFee := models.NewInboundFeeFromWire(
|
|
|
|
inboundWireFee,
|
|
|
|
)
|
|
|
|
|
2023-07-17 12:53:24 +02:00
|
|
|
forwardingPolicy = &models.ForwardingPolicy{
|
2019-11-15 10:09:27 +01:00
|
|
|
MinHTLCOut: selfPolicy.MinHTLC,
|
2018-12-09 03:18:20 +01:00
|
|
|
MaxHTLC: selfPolicy.MaxHTLC,
|
2017-08-23 20:32:50 +02:00
|
|
|
BaseFee: selfPolicy.FeeBaseMSat,
|
|
|
|
FeeRate: selfPolicy.FeeProportionalMillionths,
|
|
|
|
TimeLockDelta: uint32(selfPolicy.TimeLockDelta),
|
2022-09-19 12:06:34 +02:00
|
|
|
|
|
|
|
InboundFee: inboundFee,
|
2017-08-23 20:32:50 +02:00
|
|
|
}
|
|
|
|
} else {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Warnf("Unable to find our forwarding policy "+
|
2018-08-22 09:32:44 +02:00
|
|
|
"for channel %v, using default values",
|
|
|
|
chanPoint)
|
2020-06-30 03:29:22 +02:00
|
|
|
forwardingPolicy = &p.cfg.RoutingPolicy
|
2017-08-22 08:53:21 +02:00
|
|
|
}
|
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Tracef("Using link policy of: %v",
|
2018-08-22 09:32:44 +02:00
|
|
|
spew.Sdump(forwardingPolicy))
|
2017-08-22 08:53:21 +02:00
|
|
|
|
2020-03-07 04:43:51 +01:00
|
|
|
// If the channel is pending, set the value to nil in the
|
2024-01-31 11:09:02 +01:00
|
|
|
// activeChannels map. This is done to signify that the channel
|
|
|
|
// is pending. We don't add the link to the switch here - it's
|
|
|
|
// the funding manager's responsibility to spin up pending
|
|
|
|
// channels. Adding them here would just be extra work as we'll
|
|
|
|
// tear them down when creating + adding the final link.
|
2020-03-07 04:43:51 +01:00
|
|
|
if lnChan.IsPending() {
|
2023-03-29 13:24:07 +02:00
|
|
|
p.activeChannels.Store(chanID, nil)
|
2020-03-07 04:43:51 +01:00
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-02-06 18:11:26 +01:00
|
|
|
shutdownInfo, err := lnChan.State().ShutdownInfo()
|
|
|
|
if err != nil && !errors.Is(err, channeldb.ErrNoShutdownInfo) {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
shutdownMsg fn.Option[lnwire.Shutdown]
|
|
|
|
shutdownInfoErr error
|
|
|
|
)
|
|
|
|
shutdownInfo.WhenSome(func(info channeldb.ShutdownInfo) {
|
|
|
|
// Compute an ideal fee.
|
|
|
|
feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
|
|
|
|
p.cfg.CoopCloseTargetConfs,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
shutdownInfoErr = fmt.Errorf("unable to "+
|
|
|
|
"estimate fee: %w", err)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:47 +02:00
|
|
|
addr, err := p.addrWithInternalKey(
|
|
|
|
info.DeliveryScript.Val,
|
|
|
|
).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
shutdownInfoErr = fmt.Errorf("unable to make "+
|
|
|
|
"delivery addr: %w", err)
|
|
|
|
return
|
|
|
|
}
|
2024-02-06 18:11:26 +01:00
|
|
|
chanCloser, err := p.createChanCloser(
|
2024-05-29 19:57:47 +02:00
|
|
|
lnChan, addr, feePerKw, nil, info.Closer(),
|
2024-02-06 18:11:26 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
shutdownInfoErr = fmt.Errorf("unable to "+
|
|
|
|
"create chan closer: %w", err)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(
|
2024-01-29 22:19:15 +01:00
|
|
|
lnChan.State().FundingOutpoint,
|
2024-02-06 18:11:26 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
p.activeChanCloses[chanID] = chanCloser
|
|
|
|
|
|
|
|
// Create the Shutdown message.
|
|
|
|
shutdown, err := chanCloser.ShutdownChan()
|
|
|
|
if err != nil {
|
|
|
|
delete(p.activeChanCloses, chanID)
|
|
|
|
shutdownInfoErr = err
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-06-06 15:15:51 +02:00
|
|
|
shutdownMsg = fn.Some(*shutdown)
|
2024-02-06 18:11:26 +01:00
|
|
|
})
|
|
|
|
if shutdownInfoErr != nil {
|
|
|
|
return nil, shutdownInfoErr
|
|
|
|
}
|
|
|
|
|
2020-03-07 04:43:51 +01:00
|
|
|
// Subscribe to the set of on-chain events for this channel.
|
2020-06-30 03:29:22 +02:00
|
|
|
chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents(
|
2024-01-29 22:19:15 +01:00
|
|
|
chanPoint,
|
2018-01-21 05:25:54 +01:00
|
|
|
)
|
2018-01-18 23:03:13 +01:00
|
|
|
if err != nil {
|
2019-09-06 13:14:38 +02:00
|
|
|
return nil, err
|
2018-01-18 23:03:13 +01:00
|
|
|
}
|
2017-05-02 22:04:58 +02:00
|
|
|
|
2018-06-12 08:52:38 +02:00
|
|
|
err = p.addLink(
|
2024-01-29 22:19:15 +01:00
|
|
|
&chanPoint, lnChan, forwardingPolicy, chainEvents,
|
2024-02-06 18:11:26 +01:00
|
|
|
true, shutdownMsg,
|
2018-06-12 08:52:38 +02:00
|
|
|
)
|
2018-05-23 13:33:41 +02:00
|
|
|
if err != nil {
|
2019-09-06 13:14:38 +02:00
|
|
|
return nil, fmt.Errorf("unable to add link %v to "+
|
|
|
|
"switch: %v", chanPoint, err)
|
2017-05-02 22:04:58 +02:00
|
|
|
}
|
2018-06-12 08:52:38 +02:00
|
|
|
|
2023-03-29 13:24:07 +02:00
|
|
|
p.activeChannels.Store(chanID, lnChan)
|
2017-04-24 04:23:15 +02:00
|
|
|
}
|
2016-01-17 04:03:03 +01:00
|
|
|
|
2019-09-06 13:14:38 +02:00
|
|
|
return msgs, nil
|
2016-01-17 04:03:03 +01:00
|
|
|
}
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// addLink creates and adds a new ChannelLink from the specified channel.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) addLink(chanPoint *wire.OutPoint,
|
2018-05-23 13:33:41 +02:00
|
|
|
lnChan *lnwallet.LightningChannel,
|
2023-07-17 12:53:24 +02:00
|
|
|
forwardingPolicy *models.ForwardingPolicy,
|
2018-05-23 13:33:41 +02:00
|
|
|
chainEvents *contractcourt.ChainEventSubscription,
|
2024-02-06 18:11:26 +01:00
|
|
|
syncStates bool, shutdownMsg fn.Option[lnwire.Shutdown]) error {
|
2018-05-23 13:33:41 +02:00
|
|
|
|
2018-05-23 13:39:04 +02:00
|
|
|
// onChannelFailure will be called by the link in case the channel
|
|
|
|
// fails for some reason.
|
|
|
|
onChannelFailure := func(chanID lnwire.ChannelID,
|
|
|
|
shortChanID lnwire.ShortChannelID,
|
|
|
|
linkErr htlcswitch.LinkFailureError) {
|
|
|
|
|
2018-08-31 01:54:53 +02:00
|
|
|
failure := linkFailureReport{
|
|
|
|
chanPoint: *chanPoint,
|
|
|
|
chanID: chanID,
|
|
|
|
shortChanID: shortChanID,
|
|
|
|
linkErr: linkErr,
|
|
|
|
}
|
|
|
|
|
2018-05-26 04:08:59 +02:00
|
|
|
select {
|
2018-08-31 01:54:53 +02:00
|
|
|
case p.linkFailures <- failure:
|
|
|
|
case <-p.quit:
|
2020-06-30 03:29:22 +02:00
|
|
|
case <-p.cfg.Quit:
|
2018-05-26 04:08:59 +02:00
|
|
|
}
|
2018-05-23 13:39:04 +02:00
|
|
|
}
|
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
updateContractSignals := func(signals *contractcourt.ContractSignals) error {
|
|
|
|
return p.cfg.ChainArb.UpdateContractSignals(*chanPoint, signals)
|
|
|
|
}
|
|
|
|
|
2022-01-04 22:21:36 +01:00
|
|
|
notifyContractUpdate := func(update *contractcourt.ContractUpdate) error {
|
|
|
|
return p.cfg.ChainArb.NotifyContractUpdate(*chanPoint, update)
|
|
|
|
}
|
|
|
|
|
2022-11-23 23:58:33 +01:00
|
|
|
//nolint:lll
|
2018-05-23 13:33:41 +02:00
|
|
|
linkCfg := htlcswitch.ChannelLinkConfig{
|
2022-01-21 00:44:57 +01:00
|
|
|
Peer: p,
|
|
|
|
DecodeHopIterators: p.cfg.Sphinx.DecodeHopIterators,
|
|
|
|
ExtractErrorEncrypter: p.cfg.Sphinx.ExtractErrorEncrypter,
|
|
|
|
FetchLastChannelUpdate: p.cfg.FetchLastChanUpdate,
|
|
|
|
HodlMask: p.cfg.Hodl.Mask(),
|
|
|
|
Registry: p.cfg.Invoices,
|
|
|
|
BestHeight: p.cfg.Switch.BestHeight,
|
|
|
|
Circuits: p.cfg.Switch.CircuitModifier(),
|
|
|
|
ForwardPackets: p.cfg.InterceptSwitch.ForwardPackets,
|
|
|
|
FwrdingPolicy: *forwardingPolicy,
|
|
|
|
FeeEstimator: p.cfg.FeeEstimator,
|
|
|
|
PreimageCache: p.cfg.WitnessBeacon,
|
|
|
|
ChainEvents: chainEvents,
|
|
|
|
UpdateContractSignals: updateContractSignals,
|
|
|
|
NotifyContractUpdate: notifyContractUpdate,
|
|
|
|
OnChannelFailure: onChannelFailure,
|
|
|
|
SyncStates: syncStates,
|
|
|
|
BatchTicker: ticker.New(p.cfg.ChannelCommitInterval),
|
|
|
|
FwdPkgGCTicker: ticker.New(time.Hour),
|
|
|
|
PendingCommitTicker: ticker.New(
|
|
|
|
p.cfg.PendingCommitInterval,
|
|
|
|
),
|
2021-04-09 15:10:27 +02:00
|
|
|
BatchSize: p.cfg.ChannelCommitBatchSize,
|
2020-05-14 14:26:07 +02:00
|
|
|
UnsafeReplay: p.cfg.UnsafeReplay,
|
2024-04-23 09:49:04 +02:00
|
|
|
MinUpdateTimeout: htlcswitch.DefaultMinLinkFeeUpdateTimeout,
|
|
|
|
MaxUpdateTimeout: htlcswitch.DefaultMaxLinkFeeUpdateTimeout,
|
2020-06-30 03:29:22 +02:00
|
|
|
OutgoingCltvRejectDelta: p.cfg.OutgoingCltvRejectDelta,
|
2023-05-31 09:48:19 +02:00
|
|
|
TowerClient: p.cfg.TowerClient,
|
2020-05-14 14:26:07 +02:00
|
|
|
MaxOutgoingCltvExpiry: p.cfg.MaxOutgoingCltvExpiry,
|
|
|
|
MaxFeeAllocation: p.cfg.MaxChannelFeeAllocation,
|
2020-12-10 14:16:53 +01:00
|
|
|
MaxAnchorsCommitFeeRate: p.cfg.MaxAnchorsCommitFeeRate,
|
2020-06-30 03:29:22 +02:00
|
|
|
NotifyActiveLink: p.cfg.ChannelNotifier.NotifyActiveLinkEvent,
|
|
|
|
NotifyActiveChannel: p.cfg.ChannelNotifier.NotifyActiveChannelEvent,
|
|
|
|
NotifyInactiveChannel: p.cfg.ChannelNotifier.NotifyInactiveChannelEvent,
|
2022-11-23 23:58:33 +01:00
|
|
|
NotifyInactiveLinkEvent: p.cfg.ChannelNotifier.NotifyInactiveLinkEvent,
|
2020-06-30 03:29:22 +02:00
|
|
|
HtlcNotifier: p.cfg.HtlcNotifier,
|
2022-04-04 22:56:29 +02:00
|
|
|
GetAliases: p.cfg.GetAliases,
|
2024-02-06 18:11:26 +01:00
|
|
|
PreviouslySentShutdown: shutdownMsg,
|
2024-04-02 15:04:27 +02:00
|
|
|
DisallowRouteBlinding: p.cfg.DisallowRouteBlinding,
|
2024-06-03 18:43:33 +02:00
|
|
|
MaxFeeExposure: p.cfg.MaxFeeExposure,
|
2018-05-23 13:33:41 +02:00
|
|
|
}
|
2018-05-10 23:40:29 +02:00
|
|
|
|
2018-08-02 11:34:23 +02:00
|
|
|
// Before adding our new link, purge the switch of any pending or live
|
|
|
|
// links going by the same channel id. If one is found, we'll shut it
|
|
|
|
// down to ensure that the mailboxes are only ever under the control of
|
|
|
|
// one link.
|
2024-01-29 22:19:15 +01:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(*chanPoint)
|
2021-08-03 21:18:37 +02:00
|
|
|
p.cfg.Switch.RemoveLink(chanID)
|
2018-08-02 11:34:23 +02:00
|
|
|
|
2018-05-23 13:33:41 +02:00
|
|
|
// With the channel link created, we'll now notify the htlc switch so
|
|
|
|
// this channel can be used to dispatch local payments and also
|
|
|
|
// passively forward payments.
|
2021-08-03 21:18:37 +02:00
|
|
|
return p.cfg.Switch.CreateAndAddLink(linkCfg, lnChan)
|
2018-05-23 13:33:41 +02:00
|
|
|
}
|
|
|
|
|
2020-03-31 19:35:02 +02:00
|
|
|
// maybeSendNodeAnn sends our node announcement to the remote peer if at least
|
2020-06-26 23:55:40 +02:00
|
|
|
// one confirmed public channel exists with them.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) maybeSendNodeAnn(channels []*channeldb.OpenChannel) {
|
2024-08-01 03:03:57 +02:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2020-03-31 19:35:02 +02:00
|
|
|
hasConfirmedPublicChan := false
|
|
|
|
for _, channel := range channels {
|
|
|
|
if channel.IsPending {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if channel.ChannelFlags&lnwire.FFAnnounceChannel == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
hasConfirmedPublicChan = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if !hasConfirmedPublicChan {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-04-05 10:21:59 +02:00
|
|
|
ourNodeAnn, err := p.cfg.GenNodeAnnouncement()
|
2020-03-31 19:35:02 +02:00
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Debugf("Unable to retrieve node announcement: %v", err)
|
2020-03-31 19:35:02 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := p.SendMessageLazy(false, &ourNodeAnn); err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Debugf("Unable to resend node announcement: %v", err)
|
2020-03-31 19:35:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-08-01 03:03:57 +02:00
|
|
|
// maybeSendChannelUpdates sends our channel updates to the remote peer if we
|
|
|
|
// have any active channels with them.
|
|
|
|
func (p *Brontide) maybeSendChannelUpdates() {
|
|
|
|
defer p.wg.Done()
|
|
|
|
|
|
|
|
// If we don't have any active channels, then we can exit early.
|
|
|
|
if p.activeChannels.Len() == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
maybeSendUpd := func(cid lnwire.ChannelID,
|
|
|
|
lnChan *lnwallet.LightningChannel) error {
|
|
|
|
|
|
|
|
// Nil channels are pending, so we'll skip them.
|
|
|
|
if lnChan == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
dbChan := lnChan.State()
|
|
|
|
scid := func() lnwire.ShortChannelID {
|
|
|
|
switch {
|
|
|
|
// Otherwise if it's a zero conf channel and confirmed,
|
|
|
|
// then we need to use the "real" scid.
|
|
|
|
case dbChan.IsZeroConf() && dbChan.ZeroConfConfirmed():
|
|
|
|
return dbChan.ZeroConfRealScid()
|
|
|
|
|
|
|
|
// Otherwise, we can use the normal scid.
|
|
|
|
default:
|
|
|
|
return dbChan.ShortChanID()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Now that we know the channel is in a good state, we'll try
|
|
|
|
// to fetch the update to send to the remote peer. If the
|
|
|
|
// channel is pending, and not a zero conf channel, we'll get
|
|
|
|
// an error here which we'll ignore.
|
|
|
|
chanUpd, err := p.cfg.FetchLastChanUpdate(scid)
|
|
|
|
if err != nil {
|
|
|
|
p.log.Debugf("Unable to fetch channel update for "+
|
|
|
|
"ChannelPoint(%v), scid=%v: %v",
|
|
|
|
dbChan.FundingOutpoint, dbChan.ShortChanID, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
p.log.Debugf("Sending channel update for ChannelPoint(%v), "+
|
|
|
|
"scid=%v", dbChan.FundingOutpoint, dbChan.ShortChanID)
|
|
|
|
|
|
|
|
// We'll send it as a normal message instead of using the lazy
|
|
|
|
// queue to prioritize transmission of the fresh update.
|
|
|
|
if err := p.SendMessage(false, chanUpd); err != nil {
|
|
|
|
err := fmt.Errorf("unable to send channel update for "+
|
|
|
|
"ChannelPoint(%v), scid=%v: %w",
|
|
|
|
dbChan.FundingOutpoint, dbChan.ShortChanID(),
|
|
|
|
err)
|
|
|
|
p.log.Errorf(err.Error())
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
p.activeChannels.ForEach(maybeSendUpd)
|
|
|
|
}
|
|
|
|
|
2017-04-24 04:29:38 +02:00
|
|
|
// WaitForDisconnect waits until the peer has disconnected. A peer may be
|
2020-06-26 23:55:40 +02:00
|
|
|
// disconnected if the local or remote side terminates the connection, or an
|
2018-07-31 05:51:08 +02:00
|
|
|
// irrecoverable protocol error has been encountered. This method will only
|
|
|
|
// begin watching the peer's waitgroup after the ready channel or the peer's
|
|
|
|
// quit channel are signaled. The ready channel should only be signaled if a
|
|
|
|
// call to Start returns no error. Otherwise, if the peer fails to start,
|
|
|
|
// calling Disconnect will signal the quit channel and the method will not
|
|
|
|
// block, since no goroutines were spawned.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) WaitForDisconnect(ready chan struct{}) {
|
2023-07-25 20:14:41 +02:00
|
|
|
// Before we try to call the `Wait` goroutine, we'll make sure the main
|
|
|
|
// set of goroutines are already active.
|
|
|
|
select {
|
|
|
|
case <-p.startReady:
|
|
|
|
case <-p.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-07-31 05:51:08 +02:00
|
|
|
select {
|
|
|
|
case <-ready:
|
|
|
|
case <-p.quit:
|
|
|
|
}
|
|
|
|
|
2018-05-08 05:31:22 +02:00
|
|
|
p.wg.Wait()
|
2017-04-24 04:29:38 +02:00
|
|
|
}
|
2017-02-21 07:06:16 +01:00
|
|
|
|
2016-07-14 01:40:01 +02:00
|
|
|
// Disconnect terminates the connection with the remote peer. Additionally, a
|
|
|
|
// signal is sent to the server and htlcSwitch indicating the resources
|
|
|
|
// allocated to the peer can now be cleaned up.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) Disconnect(reason error) {
|
2016-07-14 01:40:01 +02:00
|
|
|
if !atomic.CompareAndSwapInt32(&p.disconnect, 0, 1) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-25 20:14:41 +02:00
|
|
|
// Make sure initialization has completed before we try to tear things
|
|
|
|
// down.
|
|
|
|
select {
|
|
|
|
case <-p.startReady:
|
|
|
|
case <-p.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-17 07:22:35 +01:00
|
|
|
err := fmt.Errorf("disconnecting %s, reason: %v", p, reason)
|
|
|
|
p.storeError(err)
|
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Infof(err.Error())
|
2017-02-07 00:04:52 +01:00
|
|
|
|
2024-05-15 23:27:45 +02:00
|
|
|
// Stop PingManager before closing TCP connection.
|
|
|
|
p.pingManager.Stop()
|
|
|
|
|
2017-02-07 00:04:52 +01:00
|
|
|
// Ensure that the TCP connection is properly closed before continuing.
|
2020-06-30 03:29:22 +02:00
|
|
|
p.cfg.Conn.Close()
|
2016-07-14 01:40:01 +02:00
|
|
|
|
|
|
|
close(p.quit)
|
2024-01-31 03:00:11 +01:00
|
|
|
|
2024-07-09 23:05:33 +02:00
|
|
|
// If our msg router isn't global (local to this instance), then we'll
|
|
|
|
// stop it. Otherwise, we'll leave it running.
|
|
|
|
if !p.globalMsgRouter {
|
|
|
|
p.msgRouter.WhenSome(func(router msgmux.Router) {
|
|
|
|
router.Stop()
|
|
|
|
})
|
|
|
|
}
|
2016-07-14 01:40:01 +02:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:32:32 +02:00
|
|
|
// String returns the string representation of this peer.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) String() string {
|
2020-06-30 03:29:22 +02:00
|
|
|
return fmt.Sprintf("%x@%s", p.cfg.PubKeyBytes, p.cfg.Conn.RemoteAddr())
|
2016-06-21 21:32:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// readNextMessage reads, and returns the next message on the wire along with
|
|
|
|
// any additional raw payload.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) readNextMessage() (lnwire.Message, error) {
|
2020-09-24 16:19:54 +02:00
|
|
|
noiseConn := p.cfg.Conn
|
2019-02-22 05:11:33 +01:00
|
|
|
err := noiseConn.SetReadDeadline(time.Time{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pktLen, err := noiseConn.ReadNextHeader()
|
|
|
|
if err != nil {
|
2023-06-14 00:09:11 +02:00
|
|
|
return nil, fmt.Errorf("read next header: %w", err)
|
2019-02-22 05:11:33 +01:00
|
|
|
}
|
|
|
|
|
2017-04-21 00:45:04 +02:00
|
|
|
// First we'll read the next _full_ message. We do this rather than
|
|
|
|
// reading incrementally from the stream as the Lightning wire protocol
|
|
|
|
// is message oriented and allows nodes to pad on additional data to
|
|
|
|
// the message stream.
|
2021-08-12 03:04:00 +02:00
|
|
|
var (
|
|
|
|
nextMsg lnwire.Message
|
|
|
|
msgLen uint64
|
|
|
|
)
|
2020-06-30 03:29:22 +02:00
|
|
|
err = p.cfg.ReadPool.Submit(func(buf *buffer.Read) error {
|
2019-02-22 05:11:33 +01:00
|
|
|
// Before reading the body of the message, set the read timeout
|
|
|
|
// accordingly to ensure we don't block other readers using the
|
|
|
|
// pool. We do so only after the task has been scheduled to
|
|
|
|
// ensure the deadline doesn't expire while the message is in
|
|
|
|
// the process of being scheduled.
|
2024-05-16 20:19:20 +02:00
|
|
|
readDeadline := time.Now().Add(
|
|
|
|
p.scaleTimeout(readMessageTimeout),
|
|
|
|
)
|
2019-02-22 05:11:33 +01:00
|
|
|
readErr := noiseConn.SetReadDeadline(readDeadline)
|
|
|
|
if readErr != nil {
|
|
|
|
return readErr
|
|
|
|
}
|
|
|
|
|
2021-08-12 03:04:00 +02:00
|
|
|
// The ReadNextBody method will actually end up re-using the
|
|
|
|
// buffer, so within this closure, we can continue to use
|
|
|
|
// rawMsg as it's just a slice into the buf from the buffer
|
|
|
|
// pool.
|
|
|
|
rawMsg, readErr := noiseConn.ReadNextBody(buf[:pktLen])
|
|
|
|
if readErr != nil {
|
2023-06-14 00:09:11 +02:00
|
|
|
return fmt.Errorf("read next body: %w", readErr)
|
2021-08-12 03:04:00 +02:00
|
|
|
}
|
|
|
|
msgLen = uint64(len(rawMsg))
|
2017-04-21 00:45:04 +02:00
|
|
|
|
2021-08-12 03:04:00 +02:00
|
|
|
// Next, create a new io.Reader implementation from the raw
|
|
|
|
// message, and use this to decode the message directly from.
|
|
|
|
msgReader := bytes.NewReader(rawMsg)
|
|
|
|
nextMsg, err = lnwire.ReadMessage(msgReader, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, rawMsg and buf will be returned back to the
|
|
|
|
// buffer pool for re-use.
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
atomic.AddUint64(&p.bytesReceived, msgLen)
|
2016-01-14 06:41:46 +01:00
|
|
|
if err != nil {
|
2017-04-20 01:23:17 +02:00
|
|
|
return nil, err
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
|
|
|
|
2017-01-15 02:52:05 +01:00
|
|
|
p.logWireMessage(nextMsg, true)
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2017-04-20 01:23:17 +02:00
|
|
|
return nextMsg, nil
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
// msgStream implements a goroutine-safe, in-order stream of messages to be
|
|
|
|
// delivered via closure to a receiver. These messages MUST be in order due to
|
|
|
|
// the nature of the lightning channel commitment and gossiper state machines.
|
|
|
|
// TODO(conner): use stream handler interface to abstract out stream
|
2022-04-26 18:51:54 +02:00
|
|
|
// state/logging.
|
2017-11-01 23:50:55 +01:00
|
|
|
type msgStream struct {
|
2018-06-01 00:41:41 +02:00
|
|
|
streamShutdown int32 // To be used atomically.
|
2018-01-09 03:41:15 +01:00
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
peer *Brontide
|
2017-08-01 06:20:36 +02:00
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
apply func(lnwire.Message)
|
2017-08-01 06:20:36 +02:00
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
startMsg string
|
|
|
|
stopMsg string
|
2017-08-01 06:20:36 +02:00
|
|
|
|
|
|
|
msgCond *sync.Cond
|
|
|
|
msgs []lnwire.Message
|
|
|
|
|
|
|
|
mtx sync.Mutex
|
|
|
|
|
2018-03-13 00:34:03 +01:00
|
|
|
producerSema chan struct{}
|
|
|
|
|
2017-08-09 01:51:19 +02:00
|
|
|
wg sync.WaitGroup
|
2017-08-01 06:20:36 +02:00
|
|
|
quit chan struct{}
|
|
|
|
}
|
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
// newMsgStream creates a new instance of a chanMsgStream for a particular
|
2018-03-13 00:34:03 +01:00
|
|
|
// channel identified by its channel ID. bufSize is the max number of messages
|
|
|
|
// that should be buffered in the internal queue. Callers should set this to a
|
|
|
|
// sane value that avoids blocking unnecessarily, but doesn't allow an
|
|
|
|
// unbounded amount of memory to be allocated to buffer incoming messages.
|
2020-07-02 23:46:06 +02:00
|
|
|
func newMsgStream(p *Brontide, startMsg, stopMsg string, bufSize uint32,
|
2017-11-01 23:50:55 +01:00
|
|
|
apply func(lnwire.Message)) *msgStream {
|
|
|
|
|
|
|
|
stream := &msgStream{
|
2018-03-13 00:34:03 +01:00
|
|
|
peer: p,
|
|
|
|
apply: apply,
|
|
|
|
startMsg: startMsg,
|
|
|
|
stopMsg: stopMsg,
|
|
|
|
producerSema: make(chan struct{}, bufSize),
|
|
|
|
quit: make(chan struct{}),
|
2017-08-01 06:20:36 +02:00
|
|
|
}
|
|
|
|
stream.msgCond = sync.NewCond(&stream.mtx)
|
|
|
|
|
2018-03-13 00:34:03 +01:00
|
|
|
// Before we return the active stream, we'll populate the producer's
|
|
|
|
// semaphore channel. We'll use this to ensure that the producer won't
|
|
|
|
// attempt to allocate memory in the queue for an item until it has
|
|
|
|
// sufficient extra space.
|
|
|
|
for i := uint32(0); i < bufSize; i++ {
|
|
|
|
stream.producerSema <- struct{}{}
|
|
|
|
}
|
|
|
|
|
2017-08-01 06:20:36 +02:00
|
|
|
return stream
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start starts the chanMsgStream.
|
2017-11-01 23:50:55 +01:00
|
|
|
func (ms *msgStream) Start() {
|
|
|
|
ms.wg.Add(1)
|
|
|
|
go ms.msgConsumer()
|
2017-08-01 06:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stop stops the chanMsgStream.
|
2017-11-01 23:50:55 +01:00
|
|
|
func (ms *msgStream) Stop() {
|
2017-08-01 06:20:36 +02:00
|
|
|
// TODO(roasbeef): signal too?
|
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
close(ms.quit)
|
2017-08-01 06:31:16 +02:00
|
|
|
|
2018-01-09 03:41:15 +01:00
|
|
|
// Now that we've closed the channel, we'll repeatedly signal the msg
|
|
|
|
// consumer until we've detected that it has exited.
|
|
|
|
for atomic.LoadInt32(&ms.streamShutdown) == 0 {
|
|
|
|
ms.msgCond.Signal()
|
|
|
|
time.Sleep(time.Millisecond * 100)
|
|
|
|
}
|
2017-08-09 01:51:19 +02:00
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
ms.wg.Wait()
|
2017-08-01 06:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// msgConsumer is the main goroutine that streams messages from the peer's
|
|
|
|
// readHandler directly to the target channel.
|
2017-11-01 23:50:55 +01:00
|
|
|
func (ms *msgStream) msgConsumer() {
|
|
|
|
defer ms.wg.Done()
|
|
|
|
defer peerLog.Tracef(ms.stopMsg)
|
2018-08-25 02:54:26 +02:00
|
|
|
defer atomic.StoreInt32(&ms.streamShutdown, 1)
|
2017-08-09 01:51:19 +02:00
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
peerLog.Tracef(ms.startMsg)
|
2017-08-01 06:20:36 +02:00
|
|
|
|
|
|
|
for {
|
|
|
|
// First, we'll check our condition. If the queue of messages
|
|
|
|
// is empty, then we'll wait until a new item is added.
|
2017-11-01 23:50:55 +01:00
|
|
|
ms.msgCond.L.Lock()
|
|
|
|
for len(ms.msgs) == 0 {
|
|
|
|
ms.msgCond.Wait()
|
2017-08-01 06:31:16 +02:00
|
|
|
|
2018-01-09 03:41:15 +01:00
|
|
|
// If we woke up in order to exit, then we'll do so.
|
|
|
|
// Otherwise, we'll check the message queue for any new
|
|
|
|
// items.
|
2017-08-01 06:31:16 +02:00
|
|
|
select {
|
2018-08-25 02:58:24 +02:00
|
|
|
case <-ms.peer.quit:
|
2018-09-04 02:03:00 +02:00
|
|
|
ms.msgCond.L.Unlock()
|
2018-08-25 02:58:24 +02:00
|
|
|
return
|
2017-11-01 23:50:55 +01:00
|
|
|
case <-ms.quit:
|
|
|
|
ms.msgCond.L.Unlock()
|
2017-08-01 06:31:16 +02:00
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2017-08-01 06:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Grab the message off the front of the queue, shifting the
|
|
|
|
// slice's reference down one in order to remove the message
|
|
|
|
// from the queue.
|
2017-11-01 23:50:55 +01:00
|
|
|
msg := ms.msgs[0]
|
|
|
|
ms.msgs[0] = nil // Set to nil to prevent GC leak.
|
|
|
|
ms.msgs = ms.msgs[1:]
|
|
|
|
|
|
|
|
ms.msgCond.L.Unlock()
|
2017-08-01 06:20:36 +02:00
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
ms.apply(msg)
|
2018-03-13 00:34:03 +01:00
|
|
|
|
|
|
|
// We've just successfully processed an item, so we'll signal
|
|
|
|
// to the producer that a new slot in the buffer. We'll use
|
|
|
|
// this to bound the size of the buffer to avoid allowing it to
|
|
|
|
// grow indefinitely.
|
|
|
|
select {
|
|
|
|
case ms.producerSema <- struct{}{}:
|
2018-08-25 02:58:24 +02:00
|
|
|
case <-ms.peer.quit:
|
|
|
|
return
|
2018-03-13 00:34:03 +01:00
|
|
|
case <-ms.quit:
|
|
|
|
return
|
|
|
|
}
|
2017-08-01 06:20:36 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
// AddMsg adds a new message to the msgStream. This function is safe for
|
2017-08-01 06:20:36 +02:00
|
|
|
// concurrent access.
|
2018-11-02 01:08:24 +01:00
|
|
|
func (ms *msgStream) AddMsg(msg lnwire.Message) {
|
2018-03-13 00:34:03 +01:00
|
|
|
// First, we'll attempt to receive from the producerSema struct. This
|
2022-01-13 17:29:43 +01:00
|
|
|
// acts as a semaphore to prevent us from indefinitely buffering
|
2018-03-13 00:34:03 +01:00
|
|
|
// incoming items from the wire. Either the msg queue isn't full, and
|
|
|
|
// we'll not block, or the queue is full, and we'll block until either
|
|
|
|
// we're signalled to quit, or a slot is freed up.
|
|
|
|
select {
|
|
|
|
case <-ms.producerSema:
|
2018-11-02 01:08:24 +01:00
|
|
|
case <-ms.peer.quit:
|
2018-08-27 04:58:41 +02:00
|
|
|
return
|
2018-03-13 00:34:03 +01:00
|
|
|
case <-ms.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll lock the condition, and add the message to the end of
|
2017-08-01 06:20:36 +02:00
|
|
|
// the message queue.
|
2017-11-01 23:50:55 +01:00
|
|
|
ms.msgCond.L.Lock()
|
|
|
|
ms.msgs = append(ms.msgs, msg)
|
|
|
|
ms.msgCond.L.Unlock()
|
2017-08-01 06:20:36 +02:00
|
|
|
|
|
|
|
// With the message added, we signal to the msgConsumer that there are
|
|
|
|
// additional messages to consume.
|
2017-11-01 23:50:55 +01:00
|
|
|
ms.msgCond.Signal()
|
|
|
|
}
|
|
|
|
|
2020-03-07 04:43:51 +01:00
|
|
|
// waitUntilLinkActive waits until the target link is active and returns a
|
|
|
|
// ChannelLink to pass messages to. It accomplishes this by subscribing to
|
|
|
|
// an ActiveLinkEvent which is emitted by the link when it first starts up.
|
2020-07-02 23:46:06 +02:00
|
|
|
func waitUntilLinkActive(p *Brontide,
|
2021-08-03 21:06:26 +02:00
|
|
|
cid lnwire.ChannelID) htlcswitch.ChannelUpdateHandler {
|
2020-06-26 23:55:40 +02:00
|
|
|
|
2023-03-20 09:35:09 +01:00
|
|
|
p.log.Tracef("Waiting for link=%v to be active", cid)
|
|
|
|
|
2020-03-07 04:43:51 +01:00
|
|
|
// Subscribe to receive channel events.
|
|
|
|
//
|
|
|
|
// NOTE: If the link is already active by SubscribeChannelEvents, then
|
|
|
|
// GetLink will retrieve the link and we can send messages. If the link
|
|
|
|
// becomes active between SubscribeChannelEvents and GetLink, then GetLink
|
|
|
|
// will retrieve the link. If the link becomes active after GetLink, then
|
|
|
|
// we will get an ActiveLinkEvent notification and retrieve the link. If
|
|
|
|
// the call to GetLink is before SubscribeChannelEvents, however, there
|
|
|
|
// will be a race condition.
|
2020-06-30 03:29:22 +02:00
|
|
|
sub, err := p.cfg.ChannelNotifier.SubscribeChannelEvents()
|
2020-03-07 04:43:51 +01:00
|
|
|
if err != nil {
|
|
|
|
// If we have a non-nil error, then the server is shutting down and we
|
|
|
|
// can exit here and return nil. This means no message will be delivered
|
|
|
|
// to the link.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer sub.Cancel()
|
|
|
|
|
|
|
|
// The link may already be active by this point, and we may have missed the
|
|
|
|
// ActiveLinkEvent. Check if the link exists.
|
2021-08-10 23:00:51 +02:00
|
|
|
link := p.fetchLinkFromKeyAndCid(cid)
|
|
|
|
if link != nil {
|
|
|
|
return link
|
2020-03-07 04:43:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the link is nil, we must wait for it to be active.
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// A new event has been sent by the ChannelNotifier. We first check
|
|
|
|
// whether the event is an ActiveLinkEvent. If it is, we'll check
|
|
|
|
// that the event is for this channel. Otherwise, we discard the
|
|
|
|
// message.
|
|
|
|
case e := <-sub.Updates():
|
|
|
|
event, ok := e.(channelnotifier.ActiveLinkEvent)
|
|
|
|
if !ok {
|
|
|
|
// Ignore this notification.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
chanPoint := event.ChannelPoint
|
|
|
|
|
|
|
|
// Check whether the retrieved chanPoint matches the target
|
|
|
|
// channel id.
|
|
|
|
if !cid.IsChanPoint(chanPoint) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// The link shouldn't be nil as we received an
|
|
|
|
// ActiveLinkEvent. If it is nil, we return nil and the
|
|
|
|
// calling function should catch it.
|
2021-08-10 23:00:51 +02:00
|
|
|
return p.fetchLinkFromKeyAndCid(cid)
|
2020-03-07 04:43:51 +01:00
|
|
|
|
|
|
|
case <-p.quit:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
// newChanMsgStream is used to create a msgStream between the peer and
|
2020-12-01 14:31:42 +01:00
|
|
|
// particular channel link in the htlcswitch. We utilize additional
|
2017-11-01 23:50:55 +01:00
|
|
|
// synchronization with the fundingManager to ensure we don't attempt to
|
|
|
|
// dispatch a message to a channel before it is fully active. A reference to the
|
2020-12-01 14:31:42 +01:00
|
|
|
// channel this stream forwards to is held in scope to prevent unnecessary
|
2017-11-01 23:50:55 +01:00
|
|
|
// lookups.
|
2020-07-02 23:46:06 +02:00
|
|
|
func newChanMsgStream(p *Brontide, cid lnwire.ChannelID) *msgStream {
|
2021-08-03 21:06:26 +02:00
|
|
|
var chanLink htlcswitch.ChannelUpdateHandler
|
2017-11-01 23:50:55 +01:00
|
|
|
|
2020-06-27 03:07:00 +02:00
|
|
|
apply := func(msg lnwire.Message) {
|
|
|
|
// This check is fine because if the link no longer exists, it will
|
|
|
|
// be removed from the activeChannels map and subsequent messages
|
|
|
|
// shouldn't reach the chan msg stream.
|
|
|
|
if chanLink == nil {
|
|
|
|
chanLink = waitUntilLinkActive(p, cid)
|
|
|
|
|
|
|
|
// If the link is still not active and the calling function
|
|
|
|
// errored out, just return.
|
2017-11-01 23:50:55 +01:00
|
|
|
if chanLink == nil {
|
2023-03-20 09:35:09 +01:00
|
|
|
p.log.Warnf("Link=%v is not active")
|
2018-08-26 02:16:22 +02:00
|
|
|
return
|
|
|
|
}
|
2020-06-27 03:07:00 +02:00
|
|
|
}
|
2018-08-26 02:16:22 +02:00
|
|
|
|
2020-06-27 03:07:00 +02:00
|
|
|
// In order to avoid unnecessarily delivering message
|
|
|
|
// as the peer is exiting, we'll check quickly to see
|
|
|
|
// if we need to exit.
|
|
|
|
select {
|
|
|
|
case <-p.quit:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
chanLink.HandleChannelUpdate(msg)
|
|
|
|
}
|
|
|
|
|
|
|
|
return newMsgStream(p,
|
|
|
|
fmt.Sprintf("Update stream for ChannelID(%x) created", cid[:]),
|
|
|
|
fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid[:]),
|
|
|
|
1000,
|
|
|
|
apply,
|
2017-11-01 23:50:55 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// newDiscMsgStream is used to setup a msgStream between the peer and the
|
|
|
|
// authenticated gossiper. This stream should be used to forward all remote
|
|
|
|
// channel announcements.
|
2020-07-02 23:46:06 +02:00
|
|
|
func newDiscMsgStream(p *Brontide) *msgStream {
|
|
|
|
apply := func(msg lnwire.Message) {
|
2022-11-20 18:18:23 +01:00
|
|
|
// TODO(yy): `ProcessRemoteAnnouncement` returns an error chan
|
|
|
|
// and we need to process it.
|
2020-07-02 23:46:06 +02:00
|
|
|
p.cfg.AuthGossiper.ProcessRemoteAnnouncement(msg, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
return newMsgStream(
|
|
|
|
p,
|
2017-11-01 23:50:55 +01:00
|
|
|
"Update stream for gossiper created",
|
|
|
|
"Update stream for gossiper exited",
|
2018-03-13 00:34:03 +01:00
|
|
|
1000,
|
2020-07-02 23:46:06 +02:00
|
|
|
apply,
|
2017-11-01 23:50:55 +01:00
|
|
|
)
|
2017-08-01 06:20:36 +02:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:32:32 +02:00
|
|
|
// readHandler is responsible for reading messages off the wire in series, then
|
2017-01-13 06:01:50 +01:00
|
|
|
// properly dispatching the handling of the message to the proper subsystem.
|
2016-06-21 21:32:32 +02:00
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) readHandler() {
|
2018-08-25 02:49:12 +02:00
|
|
|
defer p.wg.Done()
|
2017-08-09 01:51:19 +02:00
|
|
|
|
2017-10-16 00:13:27 +02:00
|
|
|
// We'll stop the timer after a new messages is received, and also
|
|
|
|
// reset it after we process the next message.
|
|
|
|
idleTimer := time.AfterFunc(idleTimeout, func() {
|
2020-04-14 19:56:05 +02:00
|
|
|
err := fmt.Errorf("peer %s no answer for %s -- disconnecting",
|
2017-10-16 00:13:27 +02:00
|
|
|
p, idleTimeout)
|
|
|
|
p.Disconnect(err)
|
|
|
|
})
|
|
|
|
|
2019-01-19 03:30:55 +01:00
|
|
|
// Initialize our negotiated gossip sync method before reading messages
|
|
|
|
// off the wire. When using gossip queries, this ensures a gossip
|
|
|
|
// syncer is active by the time query messages arrive.
|
2018-09-04 06:58:48 +02:00
|
|
|
//
|
|
|
|
// TODO(conner): have peer store gossip syncer directly and bypass
|
|
|
|
// gossiper?
|
|
|
|
p.initGossipSync()
|
|
|
|
|
2017-11-01 23:50:55 +01:00
|
|
|
discStream := newDiscMsgStream(p)
|
|
|
|
discStream.Start()
|
|
|
|
defer discStream.Stop()
|
2016-01-14 06:41:46 +01:00
|
|
|
out:
|
|
|
|
for atomic.LoadInt32(&p.disconnect) == 0 {
|
2017-04-20 01:23:17 +02:00
|
|
|
nextMsg, err := p.readNextMessage()
|
2019-03-27 00:40:41 +01:00
|
|
|
if !idleTimer.Stop() {
|
|
|
|
select {
|
|
|
|
case <-idleTimer.C:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
2016-01-14 06:41:46 +01:00
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Infof("unable to read message from peer: %v", err)
|
2017-01-17 03:03:34 +01:00
|
|
|
|
2020-03-17 07:22:35 +01:00
|
|
|
// If we could not read our peer's message due to an
|
|
|
|
// unknown type or invalid alias, we continue processing
|
|
|
|
// as normal. We store unknown message and address
|
|
|
|
// types, as they may provide debugging insight.
|
|
|
|
switch e := err.(type) {
|
2017-01-17 03:03:34 +01:00
|
|
|
// If this is just a message we don't yet recognize,
|
|
|
|
// we'll continue processing as normal as this allows
|
|
|
|
// us to introduce new messages in a forwards
|
|
|
|
// compatible manner.
|
|
|
|
case *lnwire.UnknownMessage:
|
2020-03-17 07:22:35 +01:00
|
|
|
p.storeError(e)
|
2017-10-16 00:13:27 +02:00
|
|
|
idleTimer.Reset(idleTimeout)
|
2017-01-17 03:03:34 +01:00
|
|
|
continue
|
|
|
|
|
2018-03-23 23:49:25 +01:00
|
|
|
// If they sent us an address type that we don't yet
|
2020-12-01 14:31:42 +01:00
|
|
|
// know of, then this isn't a wire error, so we'll
|
2018-03-23 23:49:25 +01:00
|
|
|
// simply continue parsing the remainder of their
|
|
|
|
// messages.
|
|
|
|
case *lnwire.ErrUnknownAddrType:
|
2020-03-17 07:22:35 +01:00
|
|
|
p.storeError(e)
|
2018-03-23 23:49:25 +01:00
|
|
|
idleTimer.Reset(idleTimeout)
|
|
|
|
continue
|
|
|
|
|
2019-01-04 06:11:41 +01:00
|
|
|
// If the NodeAnnouncement has an invalid alias, then
|
|
|
|
// we'll log that error above and continue so we can
|
2020-03-17 07:22:35 +01:00
|
|
|
// continue to read messages from the peer. We do not
|
|
|
|
// store this error because it is of little debugging
|
|
|
|
// value.
|
2019-01-04 06:11:41 +01:00
|
|
|
case *lnwire.ErrInvalidNodeAlias:
|
|
|
|
idleTimer.Reset(idleTimeout)
|
|
|
|
continue
|
|
|
|
|
2017-01-17 03:03:34 +01:00
|
|
|
// If the error we encountered wasn't just a message we
|
2020-12-01 14:31:42 +01:00
|
|
|
// didn't recognize, then we'll stop all processing as
|
2017-01-17 03:03:34 +01:00
|
|
|
// this is a fatal error.
|
|
|
|
default:
|
|
|
|
break out
|
|
|
|
}
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
|
|
|
|
2024-01-31 03:00:11 +01:00
|
|
|
// If a message router is active, then we'll try to have it
|
|
|
|
// handle this message. If it can, then we're able to skip the
|
|
|
|
// rest of the message handling logic.
|
|
|
|
err = fn.MapOptionZ(p.msgRouter, func(r msgmux.Router) error {
|
|
|
|
return r.RouteMsg(msgmux.PeerMsg{
|
|
|
|
PeerPub: *p.IdentityKey(),
|
|
|
|
Message: nextMsg,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
// No error occurred, and the message was handled by the
|
|
|
|
// router.
|
|
|
|
if err == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-02-21 03:10:05 +01:00
|
|
|
var (
|
2017-04-17 00:41:11 +02:00
|
|
|
targetChan lnwire.ChannelID
|
2019-09-25 21:01:23 +02:00
|
|
|
isLinkUpdate bool
|
2017-02-21 03:10:05 +01:00
|
|
|
)
|
2016-07-13 02:45:29 +02:00
|
|
|
|
2016-01-14 06:41:46 +01:00
|
|
|
switch msg := nextMsg.(type) {
|
2017-01-26 03:20:55 +01:00
|
|
|
case *lnwire.Pong:
|
|
|
|
// When we receive a Pong message in response to our
|
2023-07-14 19:24:10 +02:00
|
|
|
// last ping message, we send it to the pingManager
|
|
|
|
p.pingManager.ReceivedPong(msg)
|
2017-01-26 03:20:55 +01:00
|
|
|
|
2016-11-11 02:15:25 +01:00
|
|
|
case *lnwire.Ping:
|
2021-08-13 00:30:55 +02:00
|
|
|
// First, we'll store their latest ping payload within
|
|
|
|
// the relevant atomic variable.
|
|
|
|
p.lastPingPayload.Store(msg.PaddingBytes[:])
|
|
|
|
|
|
|
|
// Next, we'll send over the amount of specified pong
|
|
|
|
// bytes.
|
2022-01-11 04:11:59 +01:00
|
|
|
pong := lnwire.NewPong(p.cfg.PongBuf[0:msg.NumPongBytes])
|
|
|
|
p.queueMsg(pong, nil)
|
2016-11-11 02:15:25 +01:00
|
|
|
|
2020-10-01 16:27:13 +02:00
|
|
|
case *lnwire.OpenChannel,
|
|
|
|
*lnwire.AcceptChannel,
|
|
|
|
*lnwire.FundingCreated,
|
|
|
|
*lnwire.FundingSigned,
|
2023-03-15 21:42:21 +01:00
|
|
|
*lnwire.ChannelReady:
|
2020-10-01 16:27:13 +02:00
|
|
|
|
|
|
|
p.cfg.FundingManager.ProcessFundingMsg(msg, p)
|
2017-03-25 02:26:09 +01:00
|
|
|
|
|
|
|
case *lnwire.Shutdown:
|
2017-09-28 05:18:20 +02:00
|
|
|
select {
|
2017-11-23 08:21:07 +01:00
|
|
|
case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
|
2017-09-28 05:18:20 +02:00
|
|
|
case <-p.quit:
|
|
|
|
break out
|
|
|
|
}
|
2017-03-09 00:32:11 +01:00
|
|
|
case *lnwire.ClosingSigned:
|
2017-09-28 05:18:20 +02:00
|
|
|
select {
|
2017-11-23 08:21:07 +01:00
|
|
|
case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
|
2017-09-28 05:18:20 +02:00
|
|
|
case <-p.quit:
|
|
|
|
break out
|
|
|
|
}
|
2016-10-15 15:24:56 +02:00
|
|
|
|
2022-05-16 18:19:07 +02:00
|
|
|
case *lnwire.Warning:
|
|
|
|
targetChan = msg.ChanID
|
2023-07-19 19:40:16 +02:00
|
|
|
isLinkUpdate = p.handleWarningOrError(targetChan, msg)
|
2022-05-16 18:19:07 +02:00
|
|
|
|
2017-04-17 00:41:11 +02:00
|
|
|
case *lnwire.Error:
|
2019-09-25 21:01:11 +02:00
|
|
|
targetChan = msg.ChanID
|
2023-07-19 19:40:16 +02:00
|
|
|
isLinkUpdate = p.handleWarningOrError(targetChan, msg)
|
2016-12-27 06:42:23 +01:00
|
|
|
|
2017-07-09 01:30:20 +02:00
|
|
|
case *lnwire.ChannelReestablish:
|
|
|
|
targetChan = msg.ChanID
|
2023-07-12 08:36:09 +02:00
|
|
|
isLinkUpdate = p.hasChannel(targetChan)
|
2019-09-25 21:01:34 +02:00
|
|
|
|
|
|
|
// If we failed to find the link in question, and the
|
|
|
|
// message received was a channel sync message, then
|
|
|
|
// this might be a peer trying to resync closed channel.
|
|
|
|
// In this case we'll try to resend our last channel
|
|
|
|
// sync message, such that the peer can recover funds
|
|
|
|
// from the closed channel.
|
|
|
|
if !isLinkUpdate {
|
|
|
|
err := p.resendChanSyncMsg(targetChan)
|
|
|
|
if err != nil {
|
|
|
|
// TODO(halseth): send error to peer?
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("resend failed: %v",
|
2019-09-25 21:01:34 +02:00
|
|
|
err)
|
|
|
|
}
|
|
|
|
}
|
2016-12-27 06:42:23 +01:00
|
|
|
|
2023-07-12 08:36:09 +02:00
|
|
|
// For messages that implement the LinkUpdater interface, we
|
|
|
|
// will consider them as link updates and send them to
|
|
|
|
// chanStream. These messages will be queued inside chanStream
|
|
|
|
// if the channel is not active yet.
|
2024-03-03 20:53:39 +01:00
|
|
|
case lnwire.LinkUpdater:
|
2019-09-25 21:00:59 +02:00
|
|
|
targetChan = msg.TargetChanID()
|
2023-07-12 08:36:09 +02:00
|
|
|
isLinkUpdate = p.hasChannel(targetChan)
|
|
|
|
|
|
|
|
// Log an error if we don't have this channel. This
|
|
|
|
// means the peer has sent us a message with unknown
|
|
|
|
// channel ID.
|
|
|
|
if !isLinkUpdate {
|
|
|
|
p.log.Errorf("Unknown channel ID: %v found "+
|
|
|
|
"in received msg=%s", targetChan,
|
|
|
|
nextMsg.MsgType())
|
|
|
|
}
|
2019-09-25 21:00:59 +02:00
|
|
|
|
2024-08-21 08:39:37 +02:00
|
|
|
case *lnwire.ChannelUpdate1,
|
2024-08-21 08:37:50 +02:00
|
|
|
*lnwire.ChannelAnnouncement1,
|
2017-03-28 21:08:14 +02:00
|
|
|
*lnwire.NodeAnnouncement,
|
2024-08-21 08:34:57 +02:00
|
|
|
*lnwire.AnnounceSignatures1,
|
2018-04-17 04:09:57 +02:00
|
|
|
*lnwire.GossipTimestampRange,
|
|
|
|
*lnwire.QueryShortChanIDs,
|
|
|
|
*lnwire.QueryChannelRange,
|
|
|
|
*lnwire.ReplyChannelRange,
|
|
|
|
*lnwire.ReplyShortChanIDsEnd:
|
2016-12-27 06:42:23 +01:00
|
|
|
|
2018-11-02 01:08:24 +01:00
|
|
|
discStream.AddMsg(msg)
|
2017-11-01 23:50:55 +01:00
|
|
|
|
2021-05-31 12:06:48 +02:00
|
|
|
case *lnwire.Custom:
|
|
|
|
err := p.handleCustomMessage(msg)
|
|
|
|
if err != nil {
|
|
|
|
p.storeError(err)
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("%v", err)
|
2021-05-31 12:06:48 +02:00
|
|
|
}
|
|
|
|
|
2017-03-28 21:08:14 +02:00
|
|
|
default:
|
2020-03-17 07:22:35 +01:00
|
|
|
// If the message we received is unknown to us, store
|
|
|
|
// the type to track the failure.
|
|
|
|
err := fmt.Errorf("unknown message type %v received",
|
|
|
|
uint16(msg.MsgType()))
|
|
|
|
p.storeError(err)
|
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("%v", err)
|
2016-07-13 02:45:29 +02:00
|
|
|
}
|
|
|
|
|
2019-09-25 21:00:59 +02:00
|
|
|
if isLinkUpdate {
|
2017-08-01 06:25:46 +02:00
|
|
|
// If this is a channel update, then we need to feed it
|
|
|
|
// into the channel's in-order message stream.
|
2023-03-16 05:32:42 +01:00
|
|
|
p.sendLinkUpdateMsg(targetChan, nextMsg)
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
2017-10-16 00:13:27 +02:00
|
|
|
|
|
|
|
idleTimer.Reset(idleTimeout)
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
|
|
|
|
2017-07-12 15:44:17 +02:00
|
|
|
p.Disconnect(errors.New("read handler closed"))
|
2016-07-14 01:40:01 +02:00
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Trace("readHandler for peer done")
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
|
|
|
|
2021-05-31 12:06:48 +02:00
|
|
|
// handleCustomMessage handles the given custom message if a handler is
|
|
|
|
// registered.
|
|
|
|
func (p *Brontide) handleCustomMessage(msg *lnwire.Custom) error {
|
|
|
|
if p.cfg.HandleCustomMessage == nil {
|
|
|
|
return fmt.Errorf("no custom message handler for "+
|
|
|
|
"message type %v", uint16(msg.MsgType()))
|
|
|
|
}
|
|
|
|
|
|
|
|
return p.cfg.HandleCustomMessage(p.PubKey(), msg)
|
|
|
|
}
|
|
|
|
|
2023-03-16 03:04:53 +01:00
|
|
|
// isLoadedFromDisk returns true if the provided channel ID is loaded from
|
|
|
|
// disk.
|
|
|
|
//
|
|
|
|
// NOTE: only returns true for pending channels.
|
|
|
|
func (p *Brontide) isLoadedFromDisk(chanID lnwire.ChannelID) bool {
|
|
|
|
// If this is a newly added channel, no need to reestablish.
|
|
|
|
_, added := p.addedChannels.Load(chanID)
|
|
|
|
if added {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return false if the channel is unknown.
|
|
|
|
channel, ok := p.activeChannels.Load(chanID)
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// During startup, we will use a nil value to mark a pending channel
|
|
|
|
// that's loaded from disk.
|
|
|
|
return channel == nil
|
|
|
|
}
|
|
|
|
|
2019-09-25 21:01:23 +02:00
|
|
|
// isActiveChannel returns true if the provided channel id is active, otherwise
|
|
|
|
// returns false.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) isActiveChannel(chanID lnwire.ChannelID) bool {
|
2023-03-16 01:55:12 +01:00
|
|
|
// The channel would be nil if,
|
|
|
|
// - the channel doesn't exist, or,
|
|
|
|
// - the channel exists, but is pending. In this case, we don't
|
|
|
|
// consider this channel active.
|
|
|
|
channel, _ := p.activeChannels.Load(chanID)
|
|
|
|
|
|
|
|
return channel != nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// isPendingChannel returns true if the provided channel ID is pending, and
|
|
|
|
// returns false if the channel is active or unknown.
|
|
|
|
func (p *Brontide) isPendingChannel(chanID lnwire.ChannelID) bool {
|
|
|
|
// Return false if the channel is unknown.
|
|
|
|
channel, ok := p.activeChannels.Load(chanID)
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return channel == nil
|
2019-09-25 21:01:23 +02:00
|
|
|
}
|
|
|
|
|
2023-07-12 08:36:09 +02:00
|
|
|
// hasChannel returns true if the peer has a pending/active channel specified
|
|
|
|
// by the channel ID.
|
|
|
|
func (p *Brontide) hasChannel(chanID lnwire.ChannelID) bool {
|
|
|
|
_, ok := p.activeChannels.Load(chanID)
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2020-03-17 07:22:35 +01:00
|
|
|
// storeError stores an error in our peer's buffer of recent errors with the
|
|
|
|
// current timestamp. Errors are only stored if we have at least one active
|
2020-06-26 23:55:40 +02:00
|
|
|
// channel with the peer to mitigate a dos vector where a peer costlessly
|
2020-03-17 07:22:35 +01:00
|
|
|
// connects to us and spams us with errors.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) storeError(err error) {
|
2020-03-07 04:43:51 +01:00
|
|
|
var haveChannels bool
|
|
|
|
|
2023-03-29 13:24:07 +02:00
|
|
|
p.activeChannels.Range(func(_ lnwire.ChannelID,
|
|
|
|
channel *lnwallet.LightningChannel) bool {
|
|
|
|
|
2020-03-07 04:43:51 +01:00
|
|
|
// Pending channels will be nil in the activeChannels map.
|
|
|
|
if channel == nil {
|
2023-03-29 13:24:07 +02:00
|
|
|
// Return true to continue the iteration.
|
|
|
|
return true
|
2020-03-07 04:43:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
haveChannels = true
|
2023-03-29 13:24:07 +02:00
|
|
|
|
|
|
|
// Return false to break the iteration.
|
|
|
|
return false
|
|
|
|
})
|
2020-03-17 07:22:35 +01:00
|
|
|
|
|
|
|
// If we do not have any active channels with the peer, we do not store
|
|
|
|
// errors as a dos mitigation.
|
2020-03-07 04:43:51 +01:00
|
|
|
if !haveChannels {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Trace("no channels with peer, not storing err")
|
2020-03-17 07:22:35 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-30 03:29:22 +02:00
|
|
|
p.cfg.ErrorBuffer.Add(
|
2020-06-27 03:04:15 +02:00
|
|
|
&TimestampedError{Timestamp: time.Now(), Error: err},
|
2020-03-17 07:22:35 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2023-07-19 19:40:16 +02:00
|
|
|
// handleWarningOrError processes a warning or error msg and returns true if
|
|
|
|
// msg should be forwarded to the associated channel link. False is returned if
|
|
|
|
// any necessary forwarding of msg was already handled by this method. If msg is
|
|
|
|
// an error from a peer with an active channel, we'll store it in memory.
|
2022-08-24 21:26:42 +02:00
|
|
|
//
|
|
|
|
// NOTE: This method should only be called from within the readHandler.
|
2023-07-19 19:40:16 +02:00
|
|
|
func (p *Brontide) handleWarningOrError(chanID lnwire.ChannelID,
|
|
|
|
msg lnwire.Message) bool {
|
2022-08-24 21:26:42 +02:00
|
|
|
|
2023-07-19 19:40:16 +02:00
|
|
|
if errMsg, ok := msg.(*lnwire.Error); ok {
|
|
|
|
p.storeError(errMsg)
|
2022-08-24 21:26:42 +02:00
|
|
|
}
|
2020-03-17 07:22:35 +01:00
|
|
|
|
2019-09-25 21:01:11 +02:00
|
|
|
switch {
|
2023-07-19 19:40:16 +02:00
|
|
|
// Connection wide messages should be forwarded to all channel links
|
|
|
|
// with this peer.
|
|
|
|
case chanID == lnwire.ConnectionWideID:
|
2020-03-06 22:33:09 +01:00
|
|
|
for _, chanStream := range p.activeMsgStreams {
|
2019-09-25 21:01:11 +02:00
|
|
|
chanStream.AddMsg(msg)
|
|
|
|
}
|
2023-07-19 19:40:16 +02:00
|
|
|
|
2019-09-25 21:01:11 +02:00
|
|
|
return false
|
|
|
|
|
2023-07-19 19:40:16 +02:00
|
|
|
// If the channel ID for the message corresponds to a pending channel,
|
|
|
|
// then the funding manager will handle it.
|
|
|
|
case p.cfg.FundingManager.IsPendingChannel(chanID, p):
|
2020-10-01 16:27:13 +02:00
|
|
|
p.cfg.FundingManager.ProcessFundingMsg(msg, p)
|
2019-09-25 21:01:11 +02:00
|
|
|
return false
|
|
|
|
|
2023-07-19 19:40:16 +02:00
|
|
|
// If not we hand the message to the channel link for this channel.
|
|
|
|
case p.isActiveChannel(chanID):
|
2019-09-25 21:01:11 +02:00
|
|
|
return true
|
2019-09-25 21:01:23 +02:00
|
|
|
|
|
|
|
default:
|
|
|
|
return false
|
2019-09-25 21:01:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-18 04:13:19 +02:00
|
|
|
// messageSummary returns a human-readable string that summarizes a
|
|
|
|
// incoming/outgoing message. Not all messages will have a summary, only those
|
|
|
|
// which have additional data that can be informative at a glance.
|
|
|
|
func messageSummary(msg lnwire.Message) string {
|
|
|
|
switch msg := msg.(type) {
|
|
|
|
case *lnwire.Init:
|
|
|
|
// No summary.
|
|
|
|
return ""
|
|
|
|
|
|
|
|
case *lnwire.OpenChannel:
|
2017-10-18 04:35:10 +02:00
|
|
|
return fmt.Sprintf("temp_chan_id=%x, chain=%v, csv=%v, amt=%v, "+
|
2017-10-18 04:13:19 +02:00
|
|
|
"push_amt=%v, reserve=%v, flags=%v",
|
2017-10-18 04:35:10 +02:00
|
|
|
msg.PendingChannelID[:], msg.ChainHash,
|
2017-10-18 04:13:19 +02:00
|
|
|
msg.CsvDelay, msg.FundingAmount, msg.PushAmount,
|
|
|
|
msg.ChannelReserve, msg.ChannelFlags)
|
|
|
|
|
|
|
|
case *lnwire.AcceptChannel:
|
2017-11-28 00:32:06 +01:00
|
|
|
return fmt.Sprintf("temp_chan_id=%x, reserve=%v, csv=%v, num_confs=%v",
|
|
|
|
msg.PendingChannelID[:], msg.ChannelReserve, msg.CsvDelay,
|
|
|
|
msg.MinAcceptDepth)
|
2017-10-18 04:13:19 +02:00
|
|
|
|
|
|
|
case *lnwire.FundingCreated:
|
|
|
|
return fmt.Sprintf("temp_chan_id=%x, chan_point=%v",
|
|
|
|
msg.PendingChannelID[:], msg.FundingPoint)
|
|
|
|
|
|
|
|
case *lnwire.FundingSigned:
|
|
|
|
return fmt.Sprintf("chan_id=%v", msg.ChanID)
|
|
|
|
|
2023-03-15 21:42:21 +01:00
|
|
|
case *lnwire.ChannelReady:
|
2017-10-18 04:13:19 +02:00
|
|
|
return fmt.Sprintf("chan_id=%v, next_point=%x",
|
|
|
|
msg.ChanID, msg.NextPerCommitmentPoint.SerializeCompressed())
|
|
|
|
|
|
|
|
case *lnwire.Shutdown:
|
|
|
|
return fmt.Sprintf("chan_id=%v, script=%x", msg.ChannelID,
|
|
|
|
msg.Address[:])
|
|
|
|
|
|
|
|
case *lnwire.ClosingSigned:
|
|
|
|
return fmt.Sprintf("chan_id=%v, fee_sat=%v", msg.ChannelID,
|
|
|
|
msg.FeeSatoshis)
|
|
|
|
|
|
|
|
case *lnwire.UpdateAddHTLC:
|
2024-03-27 14:38:50 +01:00
|
|
|
var blindingPoint []byte
|
|
|
|
msg.BlindingPoint.WhenSome(
|
|
|
|
func(b tlv.RecordT[lnwire.BlindingPointTlvType,
|
|
|
|
*btcec.PublicKey]) {
|
|
|
|
|
|
|
|
blindingPoint = b.Val.SerializeCompressed()
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, amt=%v, expiry=%v, "+
|
2024-04-13 13:29:41 +02:00
|
|
|
"hash=%x, blinding_point=%x, custom_records=%v",
|
|
|
|
msg.ChanID, msg.ID, msg.Amount, msg.Expiry,
|
|
|
|
msg.PaymentHash[:], blindingPoint, msg.CustomRecords)
|
2017-10-18 04:13:19 +02:00
|
|
|
|
|
|
|
case *lnwire.UpdateFailHTLC:
|
2017-10-21 00:40:50 +02:00
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, reason=%x", msg.ChanID,
|
2017-10-18 04:13:19 +02:00
|
|
|
msg.ID, msg.Reason)
|
|
|
|
|
2018-02-07 04:11:11 +01:00
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
2024-05-03 17:22:05 +02:00
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, pre_image=%x, "+
|
|
|
|
"custom_records=%v", msg.ChanID, msg.ID,
|
|
|
|
msg.PaymentPreimage[:], msg.CustomRecords)
|
2017-10-18 04:13:19 +02:00
|
|
|
|
|
|
|
case *lnwire.CommitSig:
|
|
|
|
return fmt.Sprintf("chan_id=%v, num_htlcs=%v", msg.ChanID,
|
|
|
|
len(msg.HtlcSigs))
|
|
|
|
|
|
|
|
case *lnwire.RevokeAndAck:
|
|
|
|
return fmt.Sprintf("chan_id=%v, rev=%x, next_point=%x",
|
|
|
|
msg.ChanID, msg.Revocation[:],
|
|
|
|
msg.NextRevocationKey.SerializeCompressed())
|
|
|
|
|
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, fail_code=%v",
|
|
|
|
msg.ChanID, msg.ID, msg.FailureCode)
|
|
|
|
|
2022-05-16 18:19:07 +02:00
|
|
|
case *lnwire.Warning:
|
2022-08-24 21:26:42 +02:00
|
|
|
return fmt.Sprintf("%v", msg.Warning())
|
2022-05-16 18:19:07 +02:00
|
|
|
|
2017-10-18 04:13:19 +02:00
|
|
|
case *lnwire.Error:
|
2019-09-20 10:55:21 +02:00
|
|
|
return fmt.Sprintf("%v", msg.Error())
|
2017-10-18 04:13:19 +02:00
|
|
|
|
2024-08-21 08:34:57 +02:00
|
|
|
case *lnwire.AnnounceSignatures1:
|
2017-10-18 04:13:19 +02:00
|
|
|
return fmt.Sprintf("chan_id=%v, short_chan_id=%v", msg.ChannelID,
|
|
|
|
msg.ShortChannelID.ToUint64())
|
|
|
|
|
2024-08-21 08:37:50 +02:00
|
|
|
case *lnwire.ChannelAnnouncement1:
|
2017-10-18 04:35:10 +02:00
|
|
|
return fmt.Sprintf("chain_hash=%v, short_chan_id=%v",
|
|
|
|
msg.ChainHash, msg.ShortChannelID.ToUint64())
|
2017-10-18 04:13:19 +02:00
|
|
|
|
2024-08-21 08:39:37 +02:00
|
|
|
case *lnwire.ChannelUpdate1:
|
2019-01-12 18:59:43 +01:00
|
|
|
return fmt.Sprintf("chain_hash=%v, short_chan_id=%v, "+
|
|
|
|
"mflags=%v, cflags=%v, update_time=%v", msg.ChainHash,
|
|
|
|
msg.ShortChannelID.ToUint64(), msg.MessageFlags,
|
|
|
|
msg.ChannelFlags, time.Unix(int64(msg.Timestamp), 0))
|
2017-10-18 04:13:19 +02:00
|
|
|
|
|
|
|
case *lnwire.NodeAnnouncement:
|
|
|
|
return fmt.Sprintf("node=%x, update_time=%v",
|
2018-01-31 05:30:00 +01:00
|
|
|
msg.NodeID, time.Unix(int64(msg.Timestamp), 0))
|
2017-10-18 04:13:19 +02:00
|
|
|
|
|
|
|
case *lnwire.Ping:
|
2021-08-13 00:30:13 +02:00
|
|
|
return fmt.Sprintf("ping_bytes=%x", msg.PaddingBytes[:])
|
2017-10-18 04:13:19 +02:00
|
|
|
|
|
|
|
case *lnwire.Pong:
|
2023-09-15 00:30:49 +02:00
|
|
|
return fmt.Sprintf("len(pong_bytes)=%d", len(msg.PongBytes[:]))
|
2017-10-18 04:13:19 +02:00
|
|
|
|
|
|
|
case *lnwire.UpdateFee:
|
|
|
|
return fmt.Sprintf("chan_id=%v, fee_update_sat=%v",
|
|
|
|
msg.ChanID, int64(msg.FeePerKw))
|
2017-11-11 00:40:15 +01:00
|
|
|
|
|
|
|
case *lnwire.ChannelReestablish:
|
2023-11-22 16:12:49 +01:00
|
|
|
return fmt.Sprintf("chan_id=%v, next_local_height=%v, "+
|
|
|
|
"remote_tail_height=%v", msg.ChanID,
|
2017-11-11 00:40:15 +01:00
|
|
|
msg.NextLocalCommitHeight, msg.RemoteCommitTailHeight)
|
2018-04-17 04:10:15 +02:00
|
|
|
|
|
|
|
case *lnwire.ReplyShortChanIDsEnd:
|
|
|
|
return fmt.Sprintf("chain_hash=%v, complete=%v", msg.ChainHash,
|
|
|
|
msg.Complete)
|
|
|
|
|
|
|
|
case *lnwire.ReplyChannelRange:
|
2019-12-14 01:09:16 +01:00
|
|
|
return fmt.Sprintf("start_height=%v, end_height=%v, "+
|
|
|
|
"num_chans=%v, encoding=%v", msg.FirstBlockHeight,
|
|
|
|
msg.LastBlockHeight(), len(msg.ShortChanIDs),
|
|
|
|
msg.EncodingType)
|
2018-04-17 04:10:15 +02:00
|
|
|
|
|
|
|
case *lnwire.QueryShortChanIDs:
|
|
|
|
return fmt.Sprintf("chain_hash=%v, encoding=%v, num_chans=%v",
|
|
|
|
msg.ChainHash, msg.EncodingType, len(msg.ShortChanIDs))
|
|
|
|
|
|
|
|
case *lnwire.QueryChannelRange:
|
|
|
|
return fmt.Sprintf("chain_hash=%v, start_height=%v, "+
|
2019-12-14 01:09:16 +01:00
|
|
|
"end_height=%v", msg.ChainHash, msg.FirstBlockHeight,
|
|
|
|
msg.LastBlockHeight())
|
2018-04-17 04:10:15 +02:00
|
|
|
|
|
|
|
case *lnwire.GossipTimestampRange:
|
|
|
|
return fmt.Sprintf("chain_hash=%v, first_stamp=%v, "+
|
|
|
|
"stamp_range=%v", msg.ChainHash,
|
|
|
|
time.Unix(int64(msg.FirstTimestamp), 0),
|
|
|
|
msg.TimestampRange)
|
|
|
|
|
2024-03-12 19:34:57 +01:00
|
|
|
case *lnwire.Stfu:
|
|
|
|
return fmt.Sprintf("chan_id=%v, initiator=%v", msg.ChanID,
|
|
|
|
msg.Initiator)
|
|
|
|
|
2021-05-31 12:06:48 +02:00
|
|
|
case *lnwire.Custom:
|
|
|
|
return fmt.Sprintf("type=%d", msg.Type)
|
2017-10-18 04:13:19 +02:00
|
|
|
}
|
|
|
|
|
2022-11-20 04:14:05 +01:00
|
|
|
return fmt.Sprintf("unknown msg type=%T", msg)
|
2017-10-18 04:13:19 +02:00
|
|
|
}
|
|
|
|
|
2017-01-15 02:52:05 +01:00
|
|
|
// logWireMessage logs the receipt or sending of particular wire message. This
|
|
|
|
// function is used rather than just logging the message in order to produce
|
|
|
|
// less spammy log messages in trace mode by setting the 'Curve" parameter to
|
|
|
|
// nil. Doing this avoids printing out each of the field elements in the curve
|
|
|
|
// parameters for secp256k1.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) logWireMessage(msg lnwire.Message, read bool) {
|
2017-10-18 04:13:19 +02:00
|
|
|
summaryPrefix := "Received"
|
|
|
|
if !read {
|
|
|
|
summaryPrefix = "Sending"
|
|
|
|
}
|
|
|
|
|
2024-07-24 13:31:21 +02:00
|
|
|
p.log.Debugf("%v", lnutils.NewLogClosure(func() string {
|
2017-10-18 04:13:19 +02:00
|
|
|
// Debug summary of message.
|
|
|
|
summary := messageSummary(msg)
|
|
|
|
if len(summary) > 0 {
|
|
|
|
summary = "(" + summary + ")"
|
|
|
|
}
|
|
|
|
|
2017-10-20 04:45:29 +02:00
|
|
|
preposition := "to"
|
|
|
|
if read {
|
|
|
|
preposition = "from"
|
|
|
|
}
|
|
|
|
|
2021-05-31 12:06:48 +02:00
|
|
|
var msgType string
|
|
|
|
if msg.MsgType() < lnwire.CustomTypeStart {
|
|
|
|
msgType = msg.MsgType().String()
|
|
|
|
} else {
|
|
|
|
msgType = "custom"
|
|
|
|
}
|
|
|
|
|
2017-10-20 04:45:29 +02:00
|
|
|
return fmt.Sprintf("%v %v%s %v %s", summaryPrefix,
|
2021-05-31 12:06:48 +02:00
|
|
|
msgType, summary, preposition, p)
|
2017-10-18 04:13:19 +02:00
|
|
|
}))
|
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
prefix := "readMessage from peer"
|
2017-01-15 02:52:05 +01:00
|
|
|
if !read {
|
2022-05-28 10:10:09 +02:00
|
|
|
prefix = "writeMessage to peer"
|
2017-01-15 02:52:05 +01:00
|
|
|
}
|
|
|
|
|
2024-07-25 16:18:00 +02:00
|
|
|
p.log.Tracef(prefix+": %v", lnutils.SpewLogClosure(msg))
|
2017-01-15 02:52:05 +01:00
|
|
|
}
|
|
|
|
|
2019-04-23 01:05:26 +02:00
|
|
|
// writeMessage writes and flushes the target lnwire.Message to the remote peer.
|
|
|
|
// If the passed message is nil, this method will only try to flush an existing
|
2020-06-26 23:55:40 +02:00
|
|
|
// message buffered on the connection. It is safe to call this method again
|
|
|
|
// with a nil message iff a timeout error is returned. This will continue to
|
|
|
|
// flush the pending message to the wire.
|
2023-11-16 17:02:23 +01:00
|
|
|
//
|
|
|
|
// NOTE:
|
|
|
|
// Besides its usage in Start, this function should not be used elsewhere
|
|
|
|
// except in writeHandler. If multiple goroutines call writeMessage at the same
|
|
|
|
// time, panics can occur because WriteMessage and Flush don't use any locking
|
|
|
|
// internally.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) writeMessage(msg lnwire.Message) error {
|
2019-04-23 01:05:26 +02:00
|
|
|
// Only log the message on the first attempt.
|
|
|
|
if msg != nil {
|
|
|
|
p.logWireMessage(msg, false)
|
|
|
|
}
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2020-09-24 16:19:54 +02:00
|
|
|
noiseConn := p.cfg.Conn
|
2016-01-14 06:41:46 +01:00
|
|
|
|
2019-04-23 01:05:26 +02:00
|
|
|
flushMsg := func() error {
|
2019-02-22 05:10:51 +01:00
|
|
|
// Ensure the write deadline is set before we attempt to send
|
|
|
|
// the message.
|
2024-05-16 20:19:20 +02:00
|
|
|
writeDeadline := time.Now().Add(
|
|
|
|
p.scaleTimeout(writeMessageTimeout),
|
|
|
|
)
|
2019-04-23 01:05:26 +02:00
|
|
|
err := noiseConn.SetWriteDeadline(writeDeadline)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flush the pending message to the wire. If an error is
|
|
|
|
// encountered, e.g. write timeout, the number of bytes written
|
|
|
|
// so far will be returned.
|
|
|
|
n, err := noiseConn.Flush()
|
|
|
|
|
|
|
|
// Record the number of bytes written on the wire, if any.
|
|
|
|
if n > 0 {
|
|
|
|
atomic.AddUint64(&p.bytesSent, uint64(n))
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the current message has already been serialized, encrypted, and
|
|
|
|
// buffered on the underlying connection we will skip straight to
|
|
|
|
// flushing it to the wire.
|
|
|
|
if msg == nil {
|
|
|
|
return flushMsg()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, this is a new message. We'll acquire a write buffer to
|
|
|
|
// serialize the message and buffer the ciphertext on the connection.
|
2020-06-30 03:29:22 +02:00
|
|
|
err := p.cfg.WritePool.Submit(func(buf *bytes.Buffer) error {
|
2019-04-23 01:05:26 +02:00
|
|
|
// Using a buffer allocated by the write pool, encode the
|
|
|
|
// message directly into the buffer.
|
|
|
|
_, writeErr := lnwire.WriteMessage(buf, msg, 0)
|
2019-02-22 05:10:51 +01:00
|
|
|
if writeErr != nil {
|
|
|
|
return writeErr
|
|
|
|
}
|
2017-10-16 00:19:45 +02:00
|
|
|
|
2019-04-23 01:05:26 +02:00
|
|
|
// Finally, write the message itself in a single swoop. This
|
|
|
|
// will buffer the ciphertext on the underlying connection. We
|
|
|
|
// will defer flushing the message until the write pool has been
|
|
|
|
// released.
|
|
|
|
return noiseConn.WriteMessage(buf.Bytes())
|
2019-02-22 05:10:51 +01:00
|
|
|
})
|
2019-04-23 01:05:26 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-02-22 05:10:51 +01:00
|
|
|
}
|
2019-01-11 04:49:44 +01:00
|
|
|
|
2019-04-23 01:05:26 +02:00
|
|
|
return flushMsg()
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:32:32 +02:00
|
|
|
// writeHandler is a goroutine dedicated to reading messages off of an incoming
|
|
|
|
// queue, and writing them out to the wire. This goroutine coordinates with the
|
2017-10-16 00:19:45 +02:00
|
|
|
// queueHandler in order to ensure the incoming message queue is quickly
|
|
|
|
// drained.
|
2016-06-21 21:32:32 +02:00
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) writeHandler() {
|
2019-03-27 00:40:41 +01:00
|
|
|
// We'll stop the timer after a new messages is sent, and also reset it
|
|
|
|
// after we process the next message.
|
|
|
|
idleTimer := time.AfterFunc(idleTimeout, func() {
|
2020-04-14 19:56:05 +02:00
|
|
|
err := fmt.Errorf("peer %s no write for %s -- disconnecting",
|
2019-03-27 00:40:41 +01:00
|
|
|
p, idleTimeout)
|
|
|
|
p.Disconnect(err)
|
|
|
|
})
|
|
|
|
|
2017-09-28 05:22:40 +02:00
|
|
|
var exitErr error
|
2018-02-22 23:28:31 +01:00
|
|
|
|
2017-09-28 05:22:40 +02:00
|
|
|
out:
|
2016-01-14 06:41:46 +01:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case outMsg := <-p.sendQueue:
|
2019-03-27 00:40:07 +01:00
|
|
|
// Record the time at which we first attempt to send the
|
|
|
|
// message.
|
|
|
|
startTime := time.Now()
|
|
|
|
|
2019-04-23 01:05:41 +02:00
|
|
|
retry:
|
2019-03-27 00:40:07 +01:00
|
|
|
// Write out the message to the socket. If a timeout
|
|
|
|
// error is encountered, we will catch this and retry
|
|
|
|
// after backing off in case the remote peer is just
|
|
|
|
// slow to process messages from the wire.
|
2017-02-02 02:01:33 +01:00
|
|
|
err := p.writeMessage(outMsg.msg)
|
2019-03-27 00:40:07 +01:00
|
|
|
if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Debugf("Write timeout detected for "+
|
|
|
|
"peer, first write for message "+
|
|
|
|
"attempted %v ago",
|
2019-03-27 00:40:07 +01:00
|
|
|
time.Since(startTime))
|
|
|
|
|
2019-04-23 01:05:26 +02:00
|
|
|
// If we received a timeout error, this implies
|
|
|
|
// that the message was buffered on the
|
|
|
|
// connection successfully and that a flush was
|
|
|
|
// attempted. We'll set the message to nil so
|
|
|
|
// that on a subsequent pass we only try to
|
|
|
|
// flush the buffered message, and forgo
|
|
|
|
// reserializing or reencrypting it.
|
|
|
|
outMsg.msg = nil
|
|
|
|
|
2019-04-23 01:05:41 +02:00
|
|
|
goto retry
|
2019-03-27 00:40:07 +01:00
|
|
|
}
|
|
|
|
|
2019-03-27 00:40:41 +01:00
|
|
|
// The write succeeded, reset the idle timer to prevent
|
|
|
|
// us from disconnecting the peer.
|
|
|
|
if !idleTimer.Stop() {
|
|
|
|
select {
|
|
|
|
case <-idleTimer.C:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
idleTimer.Reset(idleTimeout)
|
|
|
|
|
2019-03-27 00:40:07 +01:00
|
|
|
// If the peer requested a synchronous write, respond
|
|
|
|
// with the error.
|
2017-11-16 03:23:46 +01:00
|
|
|
if outMsg.errChan != nil {
|
|
|
|
outMsg.errChan <- err
|
2017-02-02 02:01:33 +01:00
|
|
|
}
|
2016-01-14 06:41:46 +01:00
|
|
|
|
2017-02-02 02:01:33 +01:00
|
|
|
if err != nil {
|
2018-08-31 23:54:35 +02:00
|
|
|
exitErr = fmt.Errorf("unable to write "+
|
|
|
|
"message: %v", err)
|
2017-09-28 05:22:40 +02:00
|
|
|
break out
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
2017-02-02 02:01:33 +01:00
|
|
|
|
|
|
|
case <-p.quit:
|
2019-04-27 02:31:27 +02:00
|
|
|
exitErr = lnpeer.ErrPeerExiting
|
2017-09-28 05:22:40 +02:00
|
|
|
break out
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
|
|
|
}
|
2017-09-28 05:22:40 +02:00
|
|
|
|
2019-07-09 17:26:27 +02:00
|
|
|
// Avoid an exit deadlock by ensuring WaitGroups are decremented before
|
|
|
|
// disconnect.
|
2017-09-28 05:22:40 +02:00
|
|
|
p.wg.Done()
|
|
|
|
|
|
|
|
p.Disconnect(exitErr)
|
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Trace("writeHandler for peer done")
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
|
|
|
|
2017-01-13 06:01:50 +01:00
|
|
|
// queueHandler is responsible for accepting messages from outside subsystems
|
2016-06-21 21:32:32 +02:00
|
|
|
// to be eventually sent out on the wire by the writeHandler.
|
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) queueHandler() {
|
2017-02-02 02:01:33 +01:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2019-03-06 02:08:05 +01:00
|
|
|
// priorityMsgs holds an in order list of messages deemed high-priority
|
|
|
|
// to be added to the sendQueue. This predominately includes messages
|
|
|
|
// from the funding manager and htlcswitch.
|
|
|
|
priorityMsgs := list.New()
|
|
|
|
|
|
|
|
// lazyMsgs holds an in order list of messages deemed low-priority to be
|
|
|
|
// added to the sendQueue only after all high-priority messages have
|
|
|
|
// been queued. This predominately includes messages from the gossiper.
|
|
|
|
lazyMsgs := list.New()
|
2017-02-02 02:01:33 +01:00
|
|
|
|
2017-11-16 02:56:33 +01:00
|
|
|
for {
|
2019-03-06 02:08:05 +01:00
|
|
|
// Examine the front of the priority queue, if it is empty check
|
|
|
|
// the low priority queue.
|
|
|
|
elem := priorityMsgs.Front()
|
|
|
|
if elem == nil {
|
|
|
|
elem = lazyMsgs.Front()
|
|
|
|
}
|
|
|
|
|
2017-11-16 02:56:33 +01:00
|
|
|
if elem != nil {
|
2019-03-06 02:08:05 +01:00
|
|
|
front := elem.Value.(outgoingMsg)
|
|
|
|
|
2017-11-16 02:56:33 +01:00
|
|
|
// There's an element on the queue, try adding
|
|
|
|
// it to the sendQueue. We also watch for
|
|
|
|
// messages on the outgoingQueue, in case the
|
|
|
|
// writeHandler cannot accept messages on the
|
|
|
|
// sendQueue.
|
2017-02-02 02:01:33 +01:00
|
|
|
select {
|
2019-03-06 02:08:05 +01:00
|
|
|
case p.sendQueue <- front:
|
|
|
|
if front.priority {
|
|
|
|
priorityMsgs.Remove(elem)
|
|
|
|
} else {
|
|
|
|
lazyMsgs.Remove(elem)
|
|
|
|
}
|
2017-11-16 02:56:33 +01:00
|
|
|
case msg := <-p.outgoingQueue:
|
2019-03-06 02:08:05 +01:00
|
|
|
if msg.priority {
|
|
|
|
priorityMsgs.PushBack(msg)
|
|
|
|
} else {
|
|
|
|
lazyMsgs.PushBack(msg)
|
|
|
|
}
|
2017-11-16 02:56:33 +01:00
|
|
|
case <-p.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If there weren't any messages to send to the
|
|
|
|
// writeHandler, then we'll accept a new message
|
|
|
|
// into the queue from outside sub-systems.
|
|
|
|
select {
|
|
|
|
case msg := <-p.outgoingQueue:
|
2019-03-06 02:08:05 +01:00
|
|
|
if msg.priority {
|
|
|
|
priorityMsgs.PushBack(msg)
|
|
|
|
} else {
|
|
|
|
lazyMsgs.PushBack(msg)
|
|
|
|
}
|
2017-02-02 02:01:33 +01:00
|
|
|
case <-p.quit:
|
|
|
|
return
|
2016-01-14 06:41:46 +01:00
|
|
|
}
|
2017-02-02 02:01:33 +01:00
|
|
|
}
|
|
|
|
}
|
2015-12-20 22:16:38 +01:00
|
|
|
}
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2017-01-26 03:20:55 +01:00
|
|
|
// PingTime returns the estimated ping time to the peer in microseconds.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) PingTime() int64 {
|
2023-07-14 19:24:10 +02:00
|
|
|
return p.pingManager.GetPingTimeMicroSeconds()
|
2017-01-26 03:20:55 +01:00
|
|
|
}
|
|
|
|
|
2019-03-06 02:08:05 +01:00
|
|
|
// queueMsg adds the lnwire.Message to the back of the high priority send queue.
|
|
|
|
// If the errChan is non-nil, an error is sent back if the msg failed to queue
|
|
|
|
// or failed to write, and nil otherwise.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) queueMsg(msg lnwire.Message, errChan chan error) {
|
2019-03-06 02:08:05 +01:00
|
|
|
p.queue(true, msg, errChan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// queueMsgLazy adds the lnwire.Message to the back of the low priority send
|
|
|
|
// queue. If the errChan is non-nil, an error is sent back if the msg failed to
|
|
|
|
// queue or failed to write, and nil otherwise.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) queueMsgLazy(msg lnwire.Message, errChan chan error) {
|
2019-03-06 02:08:05 +01:00
|
|
|
p.queue(false, msg, errChan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// queue sends a given message to the queueHandler using the passed priority. If
|
|
|
|
// the errChan is non-nil, an error is sent back if the msg failed to queue or
|
|
|
|
// failed to write, and nil otherwise.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) queue(priority bool, msg lnwire.Message,
|
2020-06-26 23:55:40 +02:00
|
|
|
errChan chan error) {
|
2020-06-30 00:01:45 +02:00
|
|
|
|
2016-12-20 02:00:18 +01:00
|
|
|
select {
|
2019-03-06 02:08:05 +01:00
|
|
|
case p.outgoingQueue <- outgoingMsg{priority, msg, errChan}:
|
2016-12-20 02:00:18 +01:00
|
|
|
case <-p.quit:
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Tracef("Peer shutting down, could not enqueue msg: %v.",
|
2020-06-26 23:55:40 +02:00
|
|
|
spew.Sdump(msg))
|
2017-11-16 03:23:46 +01:00
|
|
|
if errChan != nil {
|
2019-04-27 02:31:27 +02:00
|
|
|
errChan <- lnpeer.ErrPeerExiting
|
2017-11-16 03:23:46 +01:00
|
|
|
}
|
2016-12-20 02:00:18 +01:00
|
|
|
}
|
2016-06-21 21:32:32 +02:00
|
|
|
}
|
|
|
|
|
2016-11-11 02:15:25 +01:00
|
|
|
// ChannelSnapshots returns a slice of channel snapshots detailing all
|
|
|
|
// currently active channels maintained with the remote peer.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) ChannelSnapshots() []*channeldb.ChannelSnapshot {
|
2023-03-29 13:24:07 +02:00
|
|
|
snapshots := make(
|
|
|
|
[]*channeldb.ChannelSnapshot, 0, p.activeChannels.Len(),
|
|
|
|
)
|
|
|
|
|
|
|
|
p.activeChannels.ForEach(func(_ lnwire.ChannelID,
|
|
|
|
activeChan *lnwallet.LightningChannel) error {
|
2017-08-09 01:51:19 +02:00
|
|
|
|
2023-03-29 13:24:07 +02:00
|
|
|
// If the activeChan is nil, then we skip it as the channel is
|
|
|
|
// pending.
|
2020-03-07 04:43:51 +01:00
|
|
|
if activeChan == nil {
|
2023-03-29 13:24:07 +02:00
|
|
|
return nil
|
2020-03-07 04:43:51 +01:00
|
|
|
}
|
|
|
|
|
2018-02-09 04:40:48 +01:00
|
|
|
// We'll only return a snapshot for channels that are
|
2022-01-13 17:29:43 +01:00
|
|
|
// *immediately* available for routing payments over.
|
2018-02-09 04:40:48 +01:00
|
|
|
if activeChan.RemoteNextRevocation() == nil {
|
2023-03-29 13:24:07 +02:00
|
|
|
return nil
|
2018-02-09 04:40:48 +01:00
|
|
|
}
|
|
|
|
|
2017-08-09 01:51:19 +02:00
|
|
|
snapshot := activeChan.StateSnapshot()
|
|
|
|
snapshots = append(snapshots, snapshot)
|
2023-03-29 13:24:07 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2017-08-09 01:51:19 +02:00
|
|
|
|
|
|
|
return snapshots
|
2016-06-23 07:22:06 +02:00
|
|
|
}
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// genDeliveryScript returns a new script to be used to send our funds to in
|
|
|
|
// the case of a cooperative channel close negotiation.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) genDeliveryScript() ([]byte, error) {
|
2022-06-10 20:18:33 +02:00
|
|
|
// We'll send a normal p2wkh address unless we've negotiated the
|
|
|
|
// shutdown-any-segwit feature.
|
|
|
|
addrType := lnwallet.WitnessPubKey
|
|
|
|
if p.taprootShutdownAllowed() {
|
|
|
|
addrType = lnwallet.TaprootPubkey
|
|
|
|
}
|
|
|
|
|
2020-06-30 03:29:22 +02:00
|
|
|
deliveryAddr, err := p.cfg.Wallet.NewAddress(
|
2022-06-10 20:18:33 +02:00
|
|
|
addrType, false, lnwallet.DefaultAccountName,
|
2017-11-23 08:21:07 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Infof("Delivery addr for channel close: %v",
|
2017-11-23 08:21:07 +01:00
|
|
|
deliveryAddr)
|
|
|
|
|
|
|
|
return txscript.PayToAddrScript(deliveryAddr)
|
2017-07-30 23:21:21 +02:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:32:32 +02:00
|
|
|
// channelManager is goroutine dedicated to handling all requests/signals
|
|
|
|
// pertaining to the opening, cooperative closing, and force closing of all
|
|
|
|
// channels maintained with the remote peer.
|
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
2022-12-13 19:26:20 +01:00
|
|
|
func (p *Brontide) channelManager() {
|
2017-08-09 01:51:19 +02:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2019-02-15 02:13:44 +01:00
|
|
|
// reenableTimeout will fire once after the configured channel status
|
2020-12-01 14:31:42 +01:00
|
|
|
// interval has elapsed. This will trigger us to sign new channel
|
2019-02-15 02:13:44 +01:00
|
|
|
// updates and broadcast them with the "disabled" flag unset.
|
2020-06-30 03:29:22 +02:00
|
|
|
reenableTimeout := time.After(p.cfg.ChanActiveTimeout)
|
2019-02-15 02:13:44 +01:00
|
|
|
|
2016-06-21 21:32:32 +02:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
2023-03-16 14:24:04 +01:00
|
|
|
// A new pending channel has arrived which means we are about
|
|
|
|
// to complete a funding workflow and is waiting for the final
|
|
|
|
// `ChannelReady` messages to be exchanged. We will add this
|
|
|
|
// channel to the `activeChannels` with a nil value to indicate
|
|
|
|
// this is a pending channel.
|
|
|
|
case req := <-p.newPendingChannel:
|
|
|
|
p.handleNewPendingChannel(req)
|
|
|
|
|
2017-07-30 23:21:21 +02:00
|
|
|
// A new channel has arrived which means we've just completed a
|
|
|
|
// funding workflow. We'll initialize the necessary local
|
|
|
|
// state, and notify the htlc switch of a new link.
|
2023-03-16 03:17:13 +01:00
|
|
|
case req := <-p.newActiveChannel:
|
|
|
|
p.handleNewActiveChannel(req)
|
2016-07-14 01:40:01 +02:00
|
|
|
|
2023-06-08 13:42:07 +02:00
|
|
|
// The funding flow for a pending channel is failed, we will
|
|
|
|
// remove it from Brontide.
|
|
|
|
case req := <-p.removePendingChannel:
|
|
|
|
p.handleRemovePendingChannel(req)
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// We've just received a local request to close an active
|
2020-12-01 14:31:42 +01:00
|
|
|
// channel. It will either kick of a cooperative channel
|
2017-11-23 08:21:07 +01:00
|
|
|
// closure negotiation, or be a notification of a breached
|
|
|
|
// contract that should be abandoned.
|
2016-06-21 21:32:32 +02:00
|
|
|
case req := <-p.localCloseChanReqs:
|
2017-11-23 08:21:07 +01:00
|
|
|
p.handleLocalCloseReq(req)
|
|
|
|
|
2018-08-31 01:54:53 +02:00
|
|
|
// We've received a link failure from a link that was added to
|
|
|
|
// the switch. This will initiate the teardown of the link, and
|
|
|
|
// initiate any on-chain closures if necessary.
|
|
|
|
case failure := <-p.linkFailures:
|
|
|
|
p.handleLinkFailure(failure)
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// We've received a new cooperative channel closure related
|
|
|
|
// message from the remote peer, we'll use this message to
|
|
|
|
// advance the chan closer state machine.
|
|
|
|
case closeMsg := <-p.chanCloseMsgs:
|
2020-06-30 00:01:45 +02:00
|
|
|
p.handleCloseMsg(closeMsg)
|
2019-02-15 02:13:44 +01:00
|
|
|
|
|
|
|
// The channel reannounce delay has elapsed, broadcast the
|
|
|
|
// reenabled channel updates to the network. This should only
|
|
|
|
// fire once, so we set the reenableTimeout channel to nil to
|
|
|
|
// mark it for garbage collection. If the peer is torn down
|
|
|
|
// before firing, reenabling will not be attempted.
|
|
|
|
// TODO(conner): consolidate reenables timers inside chan status
|
|
|
|
// manager
|
|
|
|
case <-reenableTimeout:
|
2022-12-13 19:26:20 +01:00
|
|
|
p.reenableActiveChannels()
|
2019-02-15 02:13:44 +01:00
|
|
|
|
|
|
|
// Since this channel will never fire again during the
|
|
|
|
// lifecycle of the peer, we nil the channel to mark it
|
|
|
|
// eligible for garbage collection, and make this
|
2019-05-05 00:35:37 +02:00
|
|
|
// explicitly ineligible to receive in future calls to
|
2019-02-15 02:13:44 +01:00
|
|
|
// select. This also shaves a few CPU cycles since the
|
|
|
|
// select will ignore this case entirely.
|
|
|
|
reenableTimeout = nil
|
|
|
|
|
2022-11-23 06:23:18 +01:00
|
|
|
// Once the reenabling is attempted, we also cancel the
|
|
|
|
// channel event subscription to free up the overflow
|
|
|
|
// queue used in channel notifier.
|
2022-12-13 19:26:20 +01:00
|
|
|
//
|
|
|
|
// NOTE: channelEventClient will be nil if the
|
|
|
|
// reenableTimeout is greater than 1 minute.
|
|
|
|
if p.channelEventClient != nil {
|
|
|
|
p.channelEventClient.Cancel()
|
|
|
|
}
|
2022-11-23 06:23:18 +01:00
|
|
|
|
2017-11-23 20:49:48 +01:00
|
|
|
case <-p.quit:
|
|
|
|
// As, we've been signalled to exit, we'll reset all
|
|
|
|
// our active channel back to their default state.
|
2023-03-29 13:24:07 +02:00
|
|
|
p.activeChannels.ForEach(func(_ lnwire.ChannelID,
|
|
|
|
lc *lnwallet.LightningChannel) error {
|
|
|
|
|
|
|
|
// Exit if the channel is nil as it's a pending
|
|
|
|
// channel.
|
|
|
|
if lc == nil {
|
|
|
|
return nil
|
2020-03-07 04:43:51 +01:00
|
|
|
}
|
|
|
|
|
2023-03-29 13:24:07 +02:00
|
|
|
lc.ResetState()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2017-05-24 00:26:38 +02:00
|
|
|
|
2016-06-21 21:32:32 +02:00
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-15 02:13:44 +01:00
|
|
|
// reenableActiveChannels searches the index of channels maintained with this
|
|
|
|
// peer, and reenables each public, non-pending channel. This is done at the
|
|
|
|
// gossip level by broadcasting a new ChannelUpdate with the disabled bit unset.
|
|
|
|
// No message will be sent if the channel is already enabled.
|
2022-12-13 19:26:20 +01:00
|
|
|
func (p *Brontide) reenableActiveChannels() {
|
2019-02-15 02:13:44 +01:00
|
|
|
// First, filter all known channels with this peer for ones that are
|
|
|
|
// both public and not pending.
|
2022-11-23 05:50:20 +01:00
|
|
|
activePublicChans := p.filterChannelsToEnable()
|
2019-02-15 02:13:44 +01:00
|
|
|
|
2022-11-23 06:23:18 +01:00
|
|
|
// Create a map to hold channels that needs to be retried.
|
|
|
|
retryChans := make(map[wire.OutPoint]struct{}, len(activePublicChans))
|
|
|
|
|
2019-02-15 02:13:44 +01:00
|
|
|
// For each of the public, non-pending channels, set the channel
|
|
|
|
// disabled bit to false and send out a new ChannelUpdate. If this
|
|
|
|
// channel is already active, the update won't be sent.
|
|
|
|
for _, chanPoint := range activePublicChans {
|
2021-02-16 06:13:50 +01:00
|
|
|
err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint, false)
|
2022-11-23 06:23:18 +01:00
|
|
|
|
|
|
|
switch {
|
|
|
|
// No error occurred, continue to request the next channel.
|
|
|
|
case err == nil:
|
|
|
|
continue
|
|
|
|
|
|
|
|
// Cannot auto enable a manually disabled channel so we do
|
|
|
|
// nothing but proceed to the next channel.
|
|
|
|
case errors.Is(err, netann.ErrEnableManuallyDisabledChan):
|
|
|
|
p.log.Debugf("Channel(%v) was manually disabled, "+
|
|
|
|
"ignoring automatic enable request", chanPoint)
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
// If the channel is reported as inactive, we will give it
|
|
|
|
// another chance. When handling the request, ChanStatusManager
|
|
|
|
// will check whether the link is active or not. One of the
|
|
|
|
// conditions is whether the link has been marked as
|
|
|
|
// reestablished, which happens inside a goroutine(htlcManager)
|
|
|
|
// after the link is started. And we may get a false negative
|
|
|
|
// saying the link is not active because that goroutine hasn't
|
|
|
|
// reached the line to mark the reestablishment. Thus we give
|
|
|
|
// it a second chance to send the request.
|
|
|
|
case errors.Is(err, netann.ErrEnableInactiveChan):
|
2022-12-13 19:26:20 +01:00
|
|
|
// If we don't have a client created, it means we
|
|
|
|
// shouldn't retry enabling the channel.
|
|
|
|
if p.channelEventClient == nil {
|
|
|
|
p.log.Errorf("Channel(%v) request enabling "+
|
|
|
|
"failed due to inactive link",
|
|
|
|
chanPoint)
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-11-23 06:23:18 +01:00
|
|
|
p.log.Warnf("Channel(%v) cannot be enabled as " +
|
|
|
|
"ChanStatusManager reported inactive, retrying")
|
|
|
|
|
|
|
|
// Add the channel to the retry map.
|
|
|
|
retryChans[chanPoint] = struct{}{}
|
2019-02-15 02:13:44 +01:00
|
|
|
}
|
|
|
|
}
|
2022-11-23 06:23:18 +01:00
|
|
|
|
|
|
|
// Retry the channels if we have any.
|
|
|
|
if len(retryChans) != 0 {
|
2022-12-13 19:26:20 +01:00
|
|
|
p.retryRequestEnable(retryChans)
|
2022-11-23 06:23:18 +01:00
|
|
|
}
|
2019-02-15 02:13:44 +01:00
|
|
|
}
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// fetchActiveChanCloser attempts to fetch the active chan closer state machine
|
|
|
|
// for the target channel ID. If the channel isn't active an error is returned.
|
|
|
|
// Otherwise, either an existing state machine will be returned, or a new one
|
|
|
|
// will be created.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) fetchActiveChanCloser(chanID lnwire.ChannelID) (
|
2020-06-17 02:33:06 +02:00
|
|
|
*chancloser.ChanCloser, error) {
|
|
|
|
|
2022-04-22 19:51:26 +02:00
|
|
|
chanCloser, found := p.activeChanCloses[chanID]
|
|
|
|
if found {
|
|
|
|
// An entry will only be found if the closer has already been
|
|
|
|
// created for a non-pending channel or for a channel that had
|
|
|
|
// previously started the shutdown process but the connection
|
|
|
|
// was restarted.
|
|
|
|
return chanCloser, nil
|
|
|
|
}
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// First, we'll ensure that we actually know of the target channel. If
|
|
|
|
// not, we'll ignore this message.
|
2023-03-29 13:24:07 +02:00
|
|
|
channel, ok := p.activeChannels.Load(chanID)
|
2020-03-07 04:43:51 +01:00
|
|
|
|
|
|
|
// If the channel isn't in the map or the channel is nil, return
|
|
|
|
// ErrChannelNotFound as the channel is pending.
|
|
|
|
if !ok || channel == nil {
|
2018-05-23 09:09:19 +02:00
|
|
|
return nil, ErrChannelNotFound
|
2017-11-23 08:21:07 +01:00
|
|
|
}
|
|
|
|
|
2022-04-22 19:51:26 +02:00
|
|
|
// We'll create a valid closing state machine in order to respond to
|
|
|
|
// the initiated cooperative channel closure. First, we set the
|
|
|
|
// delivery script that our funds will be paid out to. If an upfront
|
|
|
|
// shutdown script was set, we will use it. Otherwise, we get a fresh
|
|
|
|
// delivery script.
|
|
|
|
//
|
|
|
|
// TODO: Expose option to allow upfront shutdown script from watch-only
|
|
|
|
// accounts.
|
|
|
|
deliveryScript := channel.LocalUpfrontShutdownScript()
|
|
|
|
if len(deliveryScript) == 0 {
|
|
|
|
var err error
|
|
|
|
deliveryScript, err = p.genDeliveryScript()
|
2017-11-23 08:21:07 +01:00
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("unable to gen delivery script: %v",
|
2022-04-22 19:51:26 +02:00
|
|
|
err)
|
|
|
|
return nil, fmt.Errorf("close addr unavailable")
|
2017-11-23 08:21:07 +01:00
|
|
|
}
|
2022-04-22 19:51:26 +02:00
|
|
|
}
|
2017-11-23 08:21:07 +01:00
|
|
|
|
2022-04-22 19:51:26 +02:00
|
|
|
// In order to begin fee negotiations, we'll first compute our target
|
|
|
|
// ideal fee-per-kw.
|
|
|
|
feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
|
|
|
|
p.cfg.CoopCloseTargetConfs,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("unable to query fee estimator: %v", err)
|
2022-04-22 19:51:26 +02:00
|
|
|
return nil, fmt.Errorf("unable to estimate fee")
|
|
|
|
}
|
2017-11-23 08:21:07 +01:00
|
|
|
|
2024-05-29 19:57:47 +02:00
|
|
|
addr, err := p.addrWithInternalKey(deliveryScript).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to parse addr: %w", err)
|
|
|
|
}
|
2022-04-22 19:51:26 +02:00
|
|
|
chanCloser, err = p.createChanCloser(
|
2024-05-29 19:57:47 +02:00
|
|
|
channel, addr, feePerKw, nil, lntypes.Remote,
|
2022-04-22 19:51:26 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("unable to create chan closer: %v", err)
|
2022-04-22 19:51:26 +02:00
|
|
|
return nil, fmt.Errorf("unable to create chan closer")
|
2017-11-23 08:21:07 +01:00
|
|
|
}
|
|
|
|
|
2022-04-22 19:51:26 +02:00
|
|
|
p.activeChanCloses[chanID] = chanCloser
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
return chanCloser, nil
|
|
|
|
}
|
|
|
|
|
2022-11-23 05:50:20 +01:00
|
|
|
// filterChannelsToEnable filters a list of channels to be enabled upon start.
|
|
|
|
// The filtered channels are active channels that's neither private nor
|
|
|
|
// pending.
|
|
|
|
func (p *Brontide) filterChannelsToEnable() []wire.OutPoint {
|
|
|
|
var activePublicChans []wire.OutPoint
|
|
|
|
|
2023-03-29 13:24:07 +02:00
|
|
|
p.activeChannels.Range(func(chanID lnwire.ChannelID,
|
|
|
|
lnChan *lnwallet.LightningChannel) bool {
|
2022-11-23 05:50:20 +01:00
|
|
|
|
|
|
|
// If the lnChan is nil, continue as this is a pending channel.
|
|
|
|
if lnChan == nil {
|
2023-03-29 13:24:07 +02:00
|
|
|
return true
|
2022-11-23 05:50:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
dbChan := lnChan.State()
|
|
|
|
isPublic := dbChan.ChannelFlags&lnwire.FFAnnounceChannel != 0
|
|
|
|
if !isPublic || dbChan.IsPending {
|
2023-03-29 13:24:07 +02:00
|
|
|
return true
|
2022-11-23 05:50:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll also skip any channels added during this peer's
|
|
|
|
// lifecycle since they haven't waited out the timeout. Their
|
|
|
|
// first announcement will be enabled, and the chan status
|
|
|
|
// manager will begin monitoring them passively since they exist
|
|
|
|
// in the database.
|
2023-03-29 13:24:07 +02:00
|
|
|
if _, ok := p.addedChannels.Load(chanID); ok {
|
|
|
|
return true
|
2022-11-23 05:50:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
activePublicChans = append(
|
|
|
|
activePublicChans, dbChan.FundingOutpoint,
|
|
|
|
)
|
2023-03-29 13:24:07 +02:00
|
|
|
|
|
|
|
return true
|
|
|
|
})
|
2022-11-23 05:50:20 +01:00
|
|
|
|
|
|
|
return activePublicChans
|
|
|
|
}
|
|
|
|
|
2022-11-23 06:23:18 +01:00
|
|
|
// retryRequestEnable takes a map of channel outpoints and a channel event
|
|
|
|
// client. It listens to the channel events and removes a channel from the map
|
|
|
|
// if it's matched to the event. Upon receiving an active channel event, it
|
|
|
|
// will send the enabling request again.
|
2022-12-13 19:26:20 +01:00
|
|
|
func (p *Brontide) retryRequestEnable(activeChans map[wire.OutPoint]struct{}) {
|
2022-11-23 06:23:18 +01:00
|
|
|
p.log.Debugf("Retry enabling %v channels", len(activeChans))
|
|
|
|
|
|
|
|
// retryEnable is a helper closure that sends an enable request and
|
|
|
|
// removes the channel from the map if it's matched.
|
|
|
|
retryEnable := func(chanPoint wire.OutPoint) error {
|
|
|
|
// If this is an active channel event, check whether it's in
|
|
|
|
// our targeted channels map.
|
|
|
|
_, found := activeChans[chanPoint]
|
|
|
|
|
|
|
|
// If this channel is irrelevant, return nil so the loop can
|
|
|
|
// jump to next iteration.
|
|
|
|
if !found {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise we've just received an active signal for a channel
|
|
|
|
// that's previously failed to be enabled, we send the request
|
|
|
|
// again.
|
|
|
|
//
|
|
|
|
// We only give the channel one more shot, so we delete it from
|
|
|
|
// our map first to keep it from being attempted again.
|
|
|
|
delete(activeChans, chanPoint)
|
|
|
|
|
|
|
|
// Send the request.
|
|
|
|
err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint, false)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("request enabling channel %v "+
|
|
|
|
"failed: %w", chanPoint, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
// If activeChans is empty, we've done processing all the
|
|
|
|
// channels.
|
|
|
|
if len(activeChans) == 0 {
|
|
|
|
p.log.Debug("Finished retry enabling channels")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
// A new event has been sent by the ChannelNotifier. We now
|
|
|
|
// check whether it's an active or inactive channel event.
|
2022-12-13 19:26:20 +01:00
|
|
|
case e := <-p.channelEventClient.Updates():
|
2022-11-23 06:23:18 +01:00
|
|
|
// If this is an active channel event, try enable the
|
|
|
|
// channel then jump to the next iteration.
|
|
|
|
active, ok := e.(channelnotifier.ActiveChannelEvent)
|
|
|
|
if ok {
|
|
|
|
chanPoint := *active.ChannelPoint
|
|
|
|
|
|
|
|
// If we received an error for this particular
|
|
|
|
// channel, we log an error and won't quit as
|
|
|
|
// we still want to retry other channels.
|
|
|
|
if err := retryEnable(chanPoint); err != nil {
|
|
|
|
p.log.Errorf("Retry failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise check for inactive link event, and jump to
|
|
|
|
// next iteration if it's not.
|
|
|
|
inactive, ok := e.(channelnotifier.InactiveLinkEvent)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Found an inactive link event, if this is our
|
|
|
|
// targeted channel, remove it from our map.
|
|
|
|
chanPoint := *inactive.ChannelPoint
|
|
|
|
_, found := activeChans[chanPoint]
|
|
|
|
if !found {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(activeChans, chanPoint)
|
|
|
|
p.log.Warnf("Re-enable channel %v failed, received "+
|
|
|
|
"inactive link event", chanPoint)
|
|
|
|
|
|
|
|
case <-p.quit:
|
|
|
|
p.log.Debugf("Peer shutdown during retry enabling")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-09 14:44:00 +01:00
|
|
|
// chooseDeliveryScript takes two optionally set shutdown scripts and returns
|
|
|
|
// a suitable script to close out to. This may be nil if neither script is
|
|
|
|
// set. If both scripts are set, this function will error if they do not match.
|
|
|
|
func chooseDeliveryScript(upfront,
|
|
|
|
requested lnwire.DeliveryAddress) (lnwire.DeliveryAddress, error) {
|
|
|
|
|
2020-12-01 14:31:42 +01:00
|
|
|
// If no upfront shutdown script was provided, return the user
|
2019-12-09 14:44:00 +01:00
|
|
|
// requested address (which may be nil).
|
|
|
|
if len(upfront) == 0 {
|
|
|
|
return requested, nil
|
|
|
|
}
|
|
|
|
|
2024-01-30 01:28:22 +01:00
|
|
|
// If an upfront shutdown script was provided, and the user did not
|
|
|
|
// request a custom shutdown script, return the upfront address.
|
2019-12-09 14:44:00 +01:00
|
|
|
if len(requested) == 0 {
|
|
|
|
return upfront, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If both an upfront shutdown script and a custom close script were
|
|
|
|
// provided, error if the user provided shutdown script does not match
|
2024-01-30 01:28:22 +01:00
|
|
|
// the upfront shutdown script (because closing out to a different
|
|
|
|
// script would violate upfront shutdown).
|
2019-12-09 14:44:00 +01:00
|
|
|
if !bytes.Equal(upfront, requested) {
|
2020-06-17 02:33:06 +02:00
|
|
|
return nil, chancloser.ErrUpfrontShutdownScriptMismatch
|
2019-12-09 14:44:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// The user requested script matches the upfront shutdown script, so we
|
|
|
|
// can return it without error.
|
|
|
|
return upfront, nil
|
|
|
|
}
|
|
|
|
|
2022-04-26 18:44:18 +02:00
|
|
|
// restartCoopClose checks whether we need to restart the cooperative close
|
|
|
|
// process for a given channel.
|
|
|
|
func (p *Brontide) restartCoopClose(lnChan *lnwallet.LightningChannel) (
|
|
|
|
*lnwire.Shutdown, error) {
|
|
|
|
|
|
|
|
// If this channel has status ChanStatusCoopBroadcasted and does not
|
|
|
|
// have a closing transaction, then the cooperative close process was
|
|
|
|
// started but never finished. We'll re-create the chanCloser state
|
|
|
|
// machine and resend Shutdown. BOLT#2 requires that we retransmit
|
|
|
|
// Shutdown exactly, but doing so would mean persisting the RPC
|
|
|
|
// provided close script. Instead use the LocalUpfrontShutdownScript
|
|
|
|
// or generate a script.
|
|
|
|
c := lnChan.State()
|
|
|
|
_, err := c.BroadcastedCooperative()
|
|
|
|
if err != nil && err != channeldb.ErrNoCloseTx {
|
|
|
|
// An error other than ErrNoCloseTx was encountered.
|
|
|
|
return nil, err
|
|
|
|
} else if err == nil {
|
|
|
|
// This channel has already completed the coop close
|
|
|
|
// negotiation.
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2024-02-06 18:11:26 +01:00
|
|
|
var deliveryScript []byte
|
|
|
|
|
|
|
|
shutdownInfo, err := c.ShutdownInfo()
|
|
|
|
switch {
|
|
|
|
// We have previously stored the delivery script that we need to use
|
|
|
|
// in the shutdown message. Re-use this script.
|
|
|
|
case err == nil:
|
|
|
|
shutdownInfo.WhenSome(func(info channeldb.ShutdownInfo) {
|
|
|
|
deliveryScript = info.DeliveryScript.Val
|
|
|
|
})
|
|
|
|
|
|
|
|
// An error other than ErrNoShutdownInfo was returned
|
2024-06-06 15:15:51 +02:00
|
|
|
case !errors.Is(err, channeldb.ErrNoShutdownInfo):
|
2024-02-06 18:11:26 +01:00
|
|
|
return nil, err
|
|
|
|
|
|
|
|
case errors.Is(err, channeldb.ErrNoShutdownInfo):
|
|
|
|
deliveryScript = c.LocalShutdownScript
|
|
|
|
if len(deliveryScript) == 0 {
|
|
|
|
var err error
|
|
|
|
deliveryScript, err = p.genDeliveryScript()
|
|
|
|
if err != nil {
|
|
|
|
p.log.Errorf("unable to gen delivery script: "+
|
|
|
|
"%v", err)
|
|
|
|
|
|
|
|
return nil, fmt.Errorf("close addr unavailable")
|
|
|
|
}
|
2022-04-26 18:44:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute an ideal fee.
|
|
|
|
feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(
|
|
|
|
p.cfg.CoopCloseTargetConfs,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("unable to query fee estimator: %v", err)
|
2022-04-26 18:44:18 +02:00
|
|
|
return nil, fmt.Errorf("unable to estimate fee")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine whether we or the peer are the initiator of the coop
|
|
|
|
// close attempt by looking at the channel's status.
|
2024-07-31 02:03:47 +02:00
|
|
|
closingParty := lntypes.Remote
|
|
|
|
if c.HasChanStatus(channeldb.ChanStatusLocalCloseInitiator) {
|
|
|
|
closingParty = lntypes.Local
|
|
|
|
}
|
2022-04-26 18:44:18 +02:00
|
|
|
|
2024-05-29 19:57:47 +02:00
|
|
|
addr, err := p.addrWithInternalKey(deliveryScript).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to parse addr: %w", err)
|
|
|
|
}
|
2022-04-26 18:44:18 +02:00
|
|
|
chanCloser, err := p.createChanCloser(
|
2024-05-29 19:57:47 +02:00
|
|
|
lnChan, addr, feePerKw, nil, closingParty,
|
2022-04-26 18:44:18 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("unable to create chan closer: %v", err)
|
2022-04-26 18:44:18 +02:00
|
|
|
return nil, fmt.Errorf("unable to create chan closer")
|
|
|
|
}
|
|
|
|
|
|
|
|
// This does not need a mutex even though it is in a different
|
|
|
|
// goroutine since this is done before the channelManager goroutine is
|
|
|
|
// created.
|
2024-01-29 22:19:15 +01:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(c.FundingOutpoint)
|
2022-04-26 18:44:18 +02:00
|
|
|
p.activeChanCloses[chanID] = chanCloser
|
|
|
|
|
|
|
|
// Create the Shutdown message.
|
|
|
|
shutdownMsg, err := chanCloser.ShutdownChan()
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("unable to create shutdown message: %v", err)
|
2022-04-26 18:44:18 +02:00
|
|
|
delete(p.activeChanCloses, chanID)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return shutdownMsg, nil
|
|
|
|
}
|
|
|
|
|
2022-04-22 19:51:26 +02:00
|
|
|
// createChanCloser constructs a ChanCloser from the passed parameters and is
|
|
|
|
// used to de-duplicate code.
|
|
|
|
func (p *Brontide) createChanCloser(channel *lnwallet.LightningChannel,
|
2024-05-29 19:57:47 +02:00
|
|
|
deliveryScript chancloser.DeliveryAddrWithKey,
|
|
|
|
fee chainfee.SatPerKWeight, req *htlcswitch.ChanClose,
|
2024-07-31 02:03:47 +02:00
|
|
|
closer lntypes.ChannelParty) (*chancloser.ChanCloser, error) {
|
2022-04-22 19:51:26 +02:00
|
|
|
|
|
|
|
_, startingHeight, err := p.cfg.ChainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("unable to obtain best block: %v", err)
|
2022-04-22 19:51:26 +02:00
|
|
|
return nil, fmt.Errorf("cannot obtain best block")
|
|
|
|
}
|
|
|
|
|
2023-12-21 16:21:35 +01:00
|
|
|
// The req will only be set if we initiated the co-op closing flow.
|
2022-07-27 01:41:59 +02:00
|
|
|
var maxFee chainfee.SatPerKWeight
|
|
|
|
if req != nil {
|
|
|
|
maxFee = req.MaxFee
|
|
|
|
}
|
|
|
|
|
2022-04-22 19:51:26 +02:00
|
|
|
chanCloser := chancloser.NewChanCloser(
|
|
|
|
chancloser.ChanCloseCfg{
|
lnwallet/chancloser: properly compute initial fee of cop close txn
In this commit, we modify the way we compute the starting ideal fee for
the co-op close transaction. Before thsi commit, channel.CalcFee was
used, which'll compute the fee based on the commitment transaction
itself, rathern than the co-op close transaction. As the co-op close
transaction is potentailly bigger (two P2TR outputs) than the commitment
transaction, this can cause us to under estimate the fee, which can
result in the fee rate being too low to propagate.
To remedy this, we now compute a fee estimate from scratch, based on the
delivery fees of the two parties.
We also add a bug fix in the chancloser unit tests that wasn't caught
due to loop variable shadowing.
The wallet import itest has been updated as well, since we'll now pay
600 extra saothis to close the channel, since we're accounting for the
added weight of the P2TR outputs.
Fixes #6953
2022-09-30 04:49:44 +02:00
|
|
|
Channel: channel,
|
2023-01-20 04:43:47 +01:00
|
|
|
MusigSession: NewMusigChanCloser(channel),
|
lnwallet/chancloser: properly compute initial fee of cop close txn
In this commit, we modify the way we compute the starting ideal fee for
the co-op close transaction. Before thsi commit, channel.CalcFee was
used, which'll compute the fee based on the commitment transaction
itself, rathern than the co-op close transaction. As the co-op close
transaction is potentailly bigger (two P2TR outputs) than the commitment
transaction, this can cause us to under estimate the fee, which can
result in the fee rate being too low to propagate.
To remedy this, we now compute a fee estimate from scratch, based on the
delivery fees of the two parties.
We also add a bug fix in the chancloser unit tests that wasn't caught
due to loop variable shadowing.
The wallet import itest has been updated as well, since we'll now pay
600 extra saothis to close the channel, since we're accounting for the
added weight of the P2TR outputs.
Fixes #6953
2022-09-30 04:49:44 +02:00
|
|
|
FeeEstimator: &chancloser.SimpleCoopFeeEstimator{},
|
|
|
|
BroadcastTx: p.cfg.Wallet.PublishTransaction,
|
2024-05-29 19:57:47 +02:00
|
|
|
AuxCloser: p.cfg.AuxChanCloser,
|
2022-04-22 19:51:26 +02:00
|
|
|
DisableChannel: func(op wire.OutPoint) error {
|
|
|
|
return p.cfg.ChanStatusMgr.RequestDisable(
|
|
|
|
op, false,
|
|
|
|
)
|
|
|
|
},
|
2022-07-27 01:41:59 +02:00
|
|
|
MaxFee: maxFee,
|
2022-04-22 19:51:26 +02:00
|
|
|
Disconnect: func() error {
|
|
|
|
return p.cfg.DisconnectPeer(p.IdentityKey())
|
|
|
|
},
|
2022-06-10 20:17:20 +02:00
|
|
|
ChainParams: &p.cfg.Wallet.Cfg.NetParams,
|
|
|
|
Quit: p.quit,
|
2022-04-22 19:51:26 +02:00
|
|
|
},
|
|
|
|
deliveryScript,
|
|
|
|
fee,
|
|
|
|
uint32(startingHeight),
|
|
|
|
req,
|
2024-07-31 01:44:18 +02:00
|
|
|
closer,
|
2022-04-22 19:51:26 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
return chanCloser, nil
|
|
|
|
}
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// handleLocalCloseReq kicks-off the workflow to execute a cooperative or
|
|
|
|
// forced unilateral closure of the channel initiated by a local subsystem.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
2024-01-29 22:19:15 +01:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(*req.ChanPoint)
|
2017-04-17 00:41:11 +02:00
|
|
|
|
2023-03-29 13:24:07 +02:00
|
|
|
channel, ok := p.activeChannels.Load(chanID)
|
2020-03-07 04:43:51 +01:00
|
|
|
|
|
|
|
// Though this function can't be called for pending channels, we still
|
|
|
|
// check whether channel is nil for safety.
|
|
|
|
if !ok || channel == nil {
|
2017-05-24 00:21:35 +02:00
|
|
|
err := fmt.Errorf("unable to close channel, ChannelID(%v) is "+
|
|
|
|
"unknown", chanID)
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf(err.Error())
|
2017-05-02 22:04:58 +02:00
|
|
|
req.Err <- err
|
2017-05-24 00:21:35 +02:00
|
|
|
return
|
|
|
|
}
|
2016-09-12 21:42:26 +02:00
|
|
|
|
2016-11-29 03:44:14 +01:00
|
|
|
switch req.CloseType {
|
|
|
|
// A type of CloseRegular indicates that the user has opted to close
|
2017-05-05 01:03:47 +02:00
|
|
|
// out this channel on-chain, so we execute the cooperative channel
|
2017-02-03 02:05:25 +01:00
|
|
|
// closure workflow.
|
2021-09-14 04:00:36 +02:00
|
|
|
case contractcourt.CloseRegular:
|
2019-12-09 14:44:00 +01:00
|
|
|
// First, we'll choose a delivery address that we'll use to send the
|
|
|
|
// funds to in the case of a successful negotiation.
|
|
|
|
|
|
|
|
// An upfront shutdown and user provided script are both optional,
|
|
|
|
// but must be equal if both set (because we cannot serve a request
|
|
|
|
// to close out to a script which violates upfront shutdown). Get the
|
|
|
|
// appropriate address to close out to (which may be nil if neither
|
|
|
|
// are set) and error if they are both set and do not match.
|
|
|
|
deliveryScript, err := chooseDeliveryScript(
|
|
|
|
channel.LocalUpfrontShutdownScript(), req.DeliveryScript,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("cannot close channel %v: %v", req.ChanPoint, err)
|
2019-12-09 14:44:00 +01:00
|
|
|
req.Err <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If neither an upfront address or a user set address was
|
|
|
|
// provided, generate a fresh script.
|
2019-12-03 10:38:29 +01:00
|
|
|
if len(deliveryScript) == 0 {
|
|
|
|
deliveryScript, err = p.genDeliveryScript()
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf(err.Error())
|
2019-12-03 10:38:29 +01:00
|
|
|
req.Err <- err
|
|
|
|
return
|
|
|
|
}
|
2017-11-23 08:21:07 +01:00
|
|
|
}
|
2024-05-29 19:57:47 +02:00
|
|
|
addr, err := p.addrWithInternalKey(deliveryScript).Unpack()
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("unable to parse addr for channel "+
|
|
|
|
"%v: %w", req.ChanPoint, err)
|
|
|
|
p.log.Errorf(err.Error())
|
|
|
|
req.Err <- err
|
2017-11-23 08:21:07 +01:00
|
|
|
|
2024-05-29 19:57:47 +02:00
|
|
|
return
|
|
|
|
}
|
2022-04-22 19:51:26 +02:00
|
|
|
chanCloser, err := p.createChanCloser(
|
2024-05-29 19:57:47 +02:00
|
|
|
channel, addr, req.TargetFeePerKw, req, lntypes.Local,
|
2017-11-23 08:21:07 +01:00
|
|
|
)
|
2022-04-22 19:51:26 +02:00
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf(err.Error())
|
2022-04-22 19:51:26 +02:00
|
|
|
req.Err <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
p.activeChanCloses[chanID] = chanCloser
|
|
|
|
|
|
|
|
// Finally, we'll initiate the channel shutdown within the
|
|
|
|
// chanCloser, and send the shutdown message to the remote
|
|
|
|
// party to kick things off.
|
|
|
|
shutdownMsg, err := chanCloser.ShutdownChan()
|
2017-03-25 02:26:09 +01:00
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf(err.Error())
|
2017-05-02 22:04:58 +02:00
|
|
|
req.Err <- err
|
2017-11-23 08:21:07 +01:00
|
|
|
delete(p.activeChanCloses, chanID)
|
|
|
|
|
2017-11-23 20:49:48 +01:00
|
|
|
// As we were unable to shutdown the channel, we'll
|
|
|
|
// return it back to its normal state.
|
|
|
|
channel.ResetState()
|
2017-03-25 02:26:09 +01:00
|
|
|
return
|
|
|
|
}
|
2016-11-29 03:44:14 +01:00
|
|
|
|
2023-11-26 22:29:48 +01:00
|
|
|
link := p.fetchLinkFromKeyAndCid(chanID)
|
|
|
|
if link == nil {
|
|
|
|
// If the link is nil then it means it was already
|
|
|
|
// removed from the switch or it never existed in the
|
|
|
|
// first place. The latter case is handled at the
|
|
|
|
// beginning of this function, so in the case where it
|
|
|
|
// has already been removed, we can skip adding the
|
|
|
|
// commit hook to queue a Shutdown message.
|
|
|
|
p.log.Warnf("link not found during attempted closure: "+
|
|
|
|
"%v", chanID)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-02-07 11:45:33 +01:00
|
|
|
if !link.DisableAdds(htlcswitch.Outgoing) {
|
|
|
|
p.log.Warnf("Outgoing link adds already "+
|
|
|
|
"disabled: %v", link.ChanID())
|
|
|
|
}
|
2023-11-26 22:29:48 +01:00
|
|
|
|
2024-02-07 11:45:33 +01:00
|
|
|
link.OnCommitOnce(htlcswitch.Outgoing, func() {
|
2023-11-26 22:29:48 +01:00
|
|
|
p.queueMsg(shutdownMsg, nil)
|
|
|
|
})
|
2017-11-23 08:21:07 +01:00
|
|
|
|
2017-01-13 06:01:50 +01:00
|
|
|
// A type of CloseBreach indicates that the counterparty has breached
|
2017-02-03 02:05:25 +01:00
|
|
|
// the channel therefore we need to clean up our local state.
|
2021-09-14 04:00:36 +02:00
|
|
|
case contractcourt.CloseBreach:
|
2017-07-30 23:21:21 +02:00
|
|
|
// TODO(roasbeef): no longer need with newer beach logic?
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Infof("ChannelPoint(%v) has been breached, wiping "+
|
2017-05-02 22:04:58 +02:00
|
|
|
"channel", req.ChanPoint)
|
2020-04-03 02:39:29 +02:00
|
|
|
p.WipeChannel(req.ChanPoint)
|
2016-09-12 21:42:26 +02:00
|
|
|
}
|
2017-03-25 02:26:09 +01:00
|
|
|
}
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// linkFailureReport is sent to the channelManager whenever a link reports a
|
|
|
|
// link failure, and is forced to exit. The report houses the necessary
|
|
|
|
// information to clean up the channel state, send back the error message, and
|
|
|
|
// force close if necessary.
|
2018-08-31 01:54:53 +02:00
|
|
|
type linkFailureReport struct {
|
|
|
|
chanPoint wire.OutPoint
|
|
|
|
chanID lnwire.ChannelID
|
|
|
|
shortChanID lnwire.ShortChannelID
|
|
|
|
linkErr htlcswitch.LinkFailureError
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleLinkFailure processes a link failure report when a link in the switch
|
2020-06-26 23:55:40 +02:00
|
|
|
// fails. It facilitates the removal of all channel state within the peer,
|
2018-08-31 01:54:53 +02:00
|
|
|
// force closing the channel depending on severity, and sending the error
|
|
|
|
// message back to the remote party.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) handleLinkFailure(failure linkFailureReport) {
|
2018-10-08 15:46:14 +02:00
|
|
|
// Retrieve the channel from the map of active channels. We do this to
|
|
|
|
// have access to it even after WipeChannel remove it from the map.
|
2024-01-29 22:19:15 +01:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(failure.chanPoint)
|
2023-03-29 13:24:07 +02:00
|
|
|
lnChan, _ := p.activeChannels.Load(chanID)
|
2018-10-08 15:46:14 +02:00
|
|
|
|
2018-08-31 01:54:53 +02:00
|
|
|
// We begin by wiping the link, which will remove it from the switch,
|
|
|
|
// such that it won't be attempted used for any more updates.
|
|
|
|
//
|
|
|
|
// TODO(halseth): should introduce a way to atomically stop/pause the
|
|
|
|
// link and cancel back any adds in its mailboxes such that we can
|
|
|
|
// safely force close without the link being added again and updates
|
|
|
|
// being applied.
|
2020-04-03 02:39:29 +02:00
|
|
|
p.WipeChannel(&failure.chanPoint)
|
2018-08-31 01:54:53 +02:00
|
|
|
|
2023-05-20 02:16:25 +02:00
|
|
|
// If the error encountered was severe enough, we'll now force close
|
|
|
|
// the channel to prevent reading it to the switch in the future.
|
|
|
|
if failure.linkErr.FailureAction == htlcswitch.LinkFailureForceClose {
|
|
|
|
p.log.Warnf("Force closing link(%v)", failure.shortChanID)
|
2018-08-31 01:54:53 +02:00
|
|
|
|
2020-06-30 03:29:22 +02:00
|
|
|
closeTx, err := p.cfg.ChainArb.ForceCloseContract(
|
2018-08-31 01:54:53 +02:00
|
|
|
failure.chanPoint,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("unable to force close "+
|
2018-08-31 01:54:53 +02:00
|
|
|
"link(%v): %v", failure.shortChanID, err)
|
|
|
|
} else {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Infof("channel(%v) force "+
|
2018-08-31 01:54:53 +02:00
|
|
|
"closed with txid %v",
|
|
|
|
failure.shortChanID, closeTx.TxHash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-08 15:46:14 +02:00
|
|
|
// If this is a permanent failure, we will mark the channel borked.
|
|
|
|
if failure.linkErr.PermanentFailure && lnChan != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Warnf("Marking link(%v) borked due to permanent "+
|
2018-10-08 15:46:14 +02:00
|
|
|
"failure", failure.shortChanID)
|
|
|
|
|
|
|
|
if err := lnChan.State().MarkBorked(); err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("Unable to mark channel %v borked: %v",
|
2018-10-08 15:46:14 +02:00
|
|
|
failure.shortChanID, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-31 01:54:53 +02:00
|
|
|
// Send an error to the peer, why we failed the channel.
|
|
|
|
if failure.linkErr.ShouldSendToPeer() {
|
|
|
|
// If SendData is set, send it to the peer. If not, we'll use
|
|
|
|
// the standard error messages in the payload. We only include
|
|
|
|
// sendData in the cases where the error data does not contain
|
|
|
|
// sensitive information.
|
|
|
|
data := []byte(failure.linkErr.Error())
|
|
|
|
if failure.linkErr.SendData != nil {
|
|
|
|
data = failure.linkErr.SendData
|
|
|
|
}
|
multi: ensure link is always torn down due to db failures, add exponential back off for sql-kvdb failures (#7927)
* lnwallet: fix log output msg
The log message is off by one.
* htlcswitch: fail channel when revoking it fails.
When the revocation of a channel state fails after receiving a new
CommitmentSigned msg we have to fail the channel otherwise we
continue with an unclean state.
* docs: update release-docs
* htlcswitch: tear down connection if revocation processing fails
If we couldn't revoke due to a DB error, then we want to also tear down
the connection, as we don't want the other party to continue to send
updates. That may lead to de-sync'd state an eventual force close.
Otherwise, the database might be able to recover come the next
reconnection attempt.
* kvdb: use sql.LevelSerializable for all backends
In this commit, we modify the default isolation level to be
`sql.LevelSerializable. This is the strictness isolation type for
postgres. For sqlite, there's only ever a single writer, so this doesn't
apply directly.
* kvdb/sqlbase: add randomized exponential backoff for serialization failures
In this commit, we add randomized exponential backoff for serialization
failures. For postgres, we''ll his this any time a transaction set fails
to be linearized. For sqlite, we'll his this if we have many writers
trying to grab the write lock at time same time, manifesting as a
`SQLITE_BUSY` error code.
As is, we'll retry up to 10 times, waiting a minimum of 50 miliseconds
between each attempt, up to 5 seconds without any delay at all. For
sqlite, this is also bounded by the busy timeout set, which applies on
top of this retry logic (block for busy timeout seconds, then apply this
back off logic).
* docs/release-notes: add entry for sqlite/postgres tx retry
---------
Co-authored-by: ziggie <ziggie1984@protonmail.com>
2023-08-31 01:48:00 +02:00
|
|
|
|
|
|
|
var networkMsg lnwire.Message
|
|
|
|
if failure.linkErr.Warning {
|
|
|
|
networkMsg = &lnwire.Warning{
|
|
|
|
ChanID: failure.chanID,
|
|
|
|
Data: data,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
networkMsg = &lnwire.Error{
|
|
|
|
ChanID: failure.chanID,
|
|
|
|
Data: data,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err := p.SendMessage(true, networkMsg)
|
2018-08-31 01:54:53 +02:00
|
|
|
if err != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("unable to send msg to "+
|
2018-08-31 01:54:53 +02:00
|
|
|
"remote peer: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2023-05-20 02:18:00 +02:00
|
|
|
|
|
|
|
// If the failure action is disconnect, then we'll execute that now. If
|
|
|
|
// we had to send an error above, it was a sync call, so we expect the
|
|
|
|
// message to be flushed on the wire by now.
|
|
|
|
if failure.linkErr.FailureAction == htlcswitch.LinkFailureDisconnect {
|
|
|
|
p.Disconnect(fmt.Errorf("link requested disconnect"))
|
|
|
|
}
|
2018-08-31 01:54:53 +02:00
|
|
|
}
|
|
|
|
|
2021-08-10 23:00:51 +02:00
|
|
|
// fetchLinkFromKeyAndCid fetches a link from the switch via the remote's
|
|
|
|
// public key and the channel id.
|
|
|
|
func (p *Brontide) fetchLinkFromKeyAndCid(
|
|
|
|
cid lnwire.ChannelID) htlcswitch.ChannelUpdateHandler {
|
|
|
|
|
|
|
|
var chanLink htlcswitch.ChannelUpdateHandler
|
|
|
|
|
|
|
|
// We don't need to check the error here, and can instead just loop
|
|
|
|
// over the slice and return nil.
|
|
|
|
links, _ := p.cfg.Switch.GetLinksByInterface(p.cfg.PubKeyBytes)
|
|
|
|
for _, link := range links {
|
|
|
|
if link.ChanID() == cid {
|
|
|
|
chanLink = link
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanLink
|
|
|
|
}
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// finalizeChanClosure performs the final clean up steps once the cooperative
|
|
|
|
// closure transaction has been fully broadcast. The finalized closing state
|
2017-12-18 03:40:05 +01:00
|
|
|
// machine should be passed in. Once the transaction has been sufficiently
|
|
|
|
// confirmed, the channel will be marked as fully closed within the database,
|
2017-11-23 08:21:07 +01:00
|
|
|
// and any clients will be notified of updates to the closing state.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) {
|
2017-11-23 08:21:07 +01:00
|
|
|
closeReq := chanCloser.CloseRequest()
|
|
|
|
|
|
|
|
// First, we'll clear all indexes related to the channel in question.
|
2020-06-11 21:25:05 +02:00
|
|
|
chanPoint := chanCloser.Channel().ChannelPoint()
|
2024-01-29 21:59:51 +01:00
|
|
|
p.WipeChannel(&chanPoint)
|
2017-08-18 21:16:20 +02:00
|
|
|
|
2022-04-26 18:44:18 +02:00
|
|
|
// Also clear the activeChanCloses map of this channel.
|
2024-01-29 22:19:15 +01:00
|
|
|
cid := lnwire.NewChanIDFromOutPoint(chanPoint)
|
2022-04-26 18:44:18 +02:00
|
|
|
delete(p.activeChanCloses, cid)
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// Next, we'll launch a goroutine which will request to be notified by
|
2018-01-17 05:25:34 +01:00
|
|
|
// the ChainNotifier once the closure transaction obtains a single
|
|
|
|
// confirmation.
|
2020-06-30 03:29:22 +02:00
|
|
|
notifier := p.cfg.ChainNotifier
|
2017-10-18 04:25:13 +02:00
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// If any error happens during waitForChanToClose, forward it to
|
|
|
|
// closeReq. If this channel closure is not locally initiated, closeReq
|
|
|
|
// will be nil, so just ignore the error.
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
if closeReq != nil {
|
|
|
|
errChan = closeReq.Err
|
2016-09-12 21:42:26 +02:00
|
|
|
}
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
closingTx, err := chanCloser.ClosingTx()
|
|
|
|
if err != nil {
|
|
|
|
if closeReq != nil {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Error(err)
|
2017-11-23 08:21:07 +01:00
|
|
|
closeReq.Err <- err
|
2017-07-14 21:05:55 +02:00
|
|
|
}
|
2017-05-05 01:03:47 +02:00
|
|
|
}
|
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
closingTxid := closingTx.TxHash()
|
2017-05-05 01:03:47 +02:00
|
|
|
|
2017-11-23 08:21:07 +01:00
|
|
|
// If this is a locally requested shutdown, update the caller with a
|
|
|
|
// new event detailing the current pending state of this request.
|
|
|
|
if closeReq != nil {
|
2020-06-27 03:04:15 +02:00
|
|
|
closeReq.Updates <- &PendingUpdate{
|
2018-12-20 18:52:27 +01:00
|
|
|
Txid: closingTxid[:],
|
2017-07-14 21:05:55 +02:00
|
|
|
}
|
2016-08-31 01:52:53 +02:00
|
|
|
}
|
|
|
|
|
2024-05-29 19:57:49 +02:00
|
|
|
localOut := chanCloser.LocalCloseOutput()
|
|
|
|
remoteOut := chanCloser.RemoteCloseOutput()
|
|
|
|
auxOut := chanCloser.AuxOutputs()
|
|
|
|
go WaitForChanToClose(
|
|
|
|
chanCloser.NegotiationHeight(), notifier, errChan,
|
2024-01-29 21:59:51 +01:00
|
|
|
&chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() {
|
2017-05-11 02:27:05 +02:00
|
|
|
// Respond to the local subsystem which requested the
|
|
|
|
// channel closure.
|
2017-11-23 08:21:07 +01:00
|
|
|
if closeReq != nil {
|
2020-06-27 03:04:15 +02:00
|
|
|
closeReq.Updates <- &ChannelCloseUpdate{
|
2024-05-29 19:57:49 +02:00
|
|
|
ClosingTxid: closingTxid[:],
|
|
|
|
Success: true,
|
|
|
|
LocalCloseOutput: localOut,
|
|
|
|
RemoteCloseOutput: remoteOut,
|
|
|
|
AuxOutputs: auxOut,
|
2017-07-14 21:05:55 +02:00
|
|
|
}
|
2017-05-11 02:27:05 +02:00
|
|
|
}
|
2024-05-29 19:57:49 +02:00
|
|
|
},
|
|
|
|
)
|
2017-05-05 01:03:47 +02:00
|
|
|
}
|
|
|
|
|
2020-06-27 03:04:15 +02:00
|
|
|
// WaitForChanToClose uses the passed notifier to wait until the channel has
|
2017-05-05 01:03:47 +02:00
|
|
|
// been detected as closed on chain and then concludes by executing the
|
|
|
|
// following actions: the channel point will be sent over the settleChan, and
|
|
|
|
// finally the callback will be executed. If any error is encountered within
|
|
|
|
// the function, then it will be sent over the errChan.
|
2020-06-27 03:04:15 +02:00
|
|
|
func WaitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier,
|
2017-05-05 01:03:47 +02:00
|
|
|
errChan chan error, chanPoint *wire.OutPoint,
|
2018-05-31 07:18:44 +02:00
|
|
|
closingTxID *chainhash.Hash, closeScript []byte, cb func()) {
|
2017-05-05 01:03:47 +02:00
|
|
|
|
2022-10-13 01:00:14 +02:00
|
|
|
peerLog.Infof("Waiting for confirmation of close of ChannelPoint(%v) "+
|
|
|
|
"with txid: %v", chanPoint, closingTxID)
|
2017-03-25 02:26:09 +01:00
|
|
|
|
2017-05-05 01:03:47 +02:00
|
|
|
// TODO(roasbeef): add param for num needed confs
|
2018-05-31 07:18:44 +02:00
|
|
|
confNtfn, err := notifier.RegisterConfirmationsNtfn(
|
|
|
|
closingTxID, closeScript, 1, bestHeight,
|
|
|
|
)
|
2017-05-16 02:53:22 +02:00
|
|
|
if err != nil {
|
|
|
|
if errChan != nil {
|
|
|
|
errChan <- err
|
|
|
|
}
|
2017-05-05 01:03:47 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the case that the ChainNotifier is shutting down, all subscriber
|
|
|
|
// notification channels will be closed, generating a nil receive.
|
|
|
|
height, ok := <-confNtfn.Confirmed
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// The channel has been closed, remove it from any active indexes, and
|
|
|
|
// the database state.
|
2017-08-05 03:32:25 +02:00
|
|
|
peerLog.Infof("ChannelPoint(%v) is now closed at "+
|
2017-05-05 01:03:47 +02:00
|
|
|
"height %v", chanPoint, height.BlockHeight)
|
|
|
|
|
|
|
|
// Finally, execute the closure call back to mark the confirmation of
|
|
|
|
// the transaction closing the contract.
|
|
|
|
cb()
|
2016-06-23 07:19:24 +02:00
|
|
|
}
|
|
|
|
|
2018-09-06 03:22:29 +02:00
|
|
|
// WipeChannel removes the passed channel point from all indexes associated with
|
2020-06-26 23:55:40 +02:00
|
|
|
// the peer and the switch.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) WipeChannel(chanPoint *wire.OutPoint) {
|
2024-01-29 22:19:15 +01:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(*chanPoint)
|
2016-06-23 07:19:24 +02:00
|
|
|
|
2023-03-29 13:24:07 +02:00
|
|
|
p.activeChannels.Delete(chanID)
|
2016-06-21 21:32:32 +02:00
|
|
|
|
2017-11-23 08:15:48 +01:00
|
|
|
// Instruct the HtlcSwitch to close this link as the channel is no
|
2016-07-10 01:41:06 +02:00
|
|
|
// longer active.
|
2020-06-30 03:29:22 +02:00
|
|
|
p.cfg.Switch.RemoveLink(chanID)
|
2016-06-21 21:32:32 +02:00
|
|
|
}
|
|
|
|
|
2017-02-16 13:39:38 +01:00
|
|
|
// handleInitMsg handles the incoming init message which contains global and
|
2020-06-26 23:55:40 +02:00
|
|
|
// local feature vectors. If feature vectors are incompatible then disconnect.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) handleInitMsg(msg *lnwire.Init) error {
|
2019-11-08 14:31:47 +01:00
|
|
|
// First, merge any features from the legacy global features field into
|
|
|
|
// those presented in the local features fields.
|
|
|
|
err := msg.Features.Merge(msg.GlobalFeatures)
|
|
|
|
if err != nil {
|
2024-03-07 13:19:28 +01:00
|
|
|
return fmt.Errorf("unable to merge legacy global features: %w",
|
2019-11-08 14:31:47 +01:00
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// Then, finalize the remote feature vector providing the flattened
|
2019-11-08 14:31:47 +01:00
|
|
|
// feature bit namespace.
|
|
|
|
p.remoteFeatures = lnwire.NewFeatureVector(
|
|
|
|
msg.Features, lnwire.Features,
|
2019-01-19 03:30:55 +01:00
|
|
|
)
|
2017-10-11 20:36:23 +02:00
|
|
|
|
2019-01-19 03:30:55 +01:00
|
|
|
// Now that we have their features loaded, we'll ensure that they
|
|
|
|
// didn't set any required bits that we don't know of.
|
2020-01-08 21:25:41 +01:00
|
|
|
err = feature.ValidateRequired(p.remoteFeatures)
|
|
|
|
if err != nil {
|
2024-02-26 12:19:38 +01:00
|
|
|
return fmt.Errorf("invalid remote features: %w", err)
|
2017-02-16 13:39:38 +01:00
|
|
|
}
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// Ensure the remote party's feature vector contains all transitive
|
|
|
|
// dependencies. We know ours are correct since they are validated
|
2019-12-16 22:06:59 +01:00
|
|
|
// during the feature manager's instantiation.
|
|
|
|
err = feature.ValidateDeps(p.remoteFeatures)
|
|
|
|
if err != nil {
|
2024-02-26 12:19:38 +01:00
|
|
|
return fmt.Errorf("invalid remote features: %w", err)
|
2019-12-16 22:06:59 +01:00
|
|
|
}
|
|
|
|
|
2019-01-19 03:30:55 +01:00
|
|
|
// Now that we know we understand their requirements, we'll check to
|
|
|
|
// see if they don't support anything that we deem to be mandatory.
|
2020-06-27 03:05:10 +02:00
|
|
|
if !p.remoteFeatures.HasFeature(lnwire.DataLossProtectRequired) {
|
2019-01-19 03:30:55 +01:00
|
|
|
return fmt.Errorf("data loss protection required")
|
|
|
|
}
|
|
|
|
|
2017-02-16 13:39:38 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-08 14:31:47 +01:00
|
|
|
// LocalFeatures returns the set of global features that has been advertised by
|
|
|
|
// the local node. This allows sub-systems that use this interface to gate their
|
|
|
|
// behavior off the set of negotiated feature bits.
|
2019-09-11 14:41:08 +02:00
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) LocalFeatures() *lnwire.FeatureVector {
|
2020-06-30 03:29:22 +02:00
|
|
|
return p.cfg.Features
|
2019-09-11 14:41:08 +02:00
|
|
|
}
|
|
|
|
|
2019-11-08 14:31:47 +01:00
|
|
|
// RemoteFeatures returns the set of global features that has been advertised by
|
|
|
|
// the remote node. This allows sub-systems that use this interface to gate
|
|
|
|
// their behavior off the set of negotiated feature bits.
|
2019-09-11 14:41:08 +02:00
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) RemoteFeatures() *lnwire.FeatureVector {
|
2019-11-08 14:31:47 +01:00
|
|
|
return p.remoteFeatures
|
2019-09-11 14:41:08 +02:00
|
|
|
}
|
|
|
|
|
2022-04-04 22:56:29 +02:00
|
|
|
// hasNegotiatedScidAlias returns true if we've negotiated the
|
|
|
|
// option-scid-alias feature bit with the peer.
|
|
|
|
func (p *Brontide) hasNegotiatedScidAlias() bool {
|
|
|
|
peerHas := p.remoteFeatures.HasFeature(lnwire.ScidAliasOptional)
|
|
|
|
localHas := p.cfg.Features.HasFeature(lnwire.ScidAliasOptional)
|
|
|
|
return peerHas && localHas
|
|
|
|
}
|
|
|
|
|
2020-12-09 15:13:50 +01:00
|
|
|
// sendInitMsg sends the Init message to the remote peer. This message contains
|
|
|
|
// our currently supported local and global features.
|
|
|
|
func (p *Brontide) sendInitMsg(legacyChan bool) error {
|
|
|
|
features := p.cfg.Features.Clone()
|
2020-12-18 00:49:55 +01:00
|
|
|
legacyFeatures := p.cfg.LegacyFeatures.Clone()
|
2020-12-09 15:13:50 +01:00
|
|
|
|
|
|
|
// If we have a legacy channel open with a peer, we downgrade static
|
|
|
|
// remote required to optional in case the peer does not understand the
|
|
|
|
// required feature bit. If we do not do this, the peer will reject our
|
|
|
|
// connection because it does not understand a required feature bit, and
|
|
|
|
// our channel will be unusable.
|
|
|
|
if legacyChan && features.RequiresFeature(lnwire.StaticRemoteKeyRequired) {
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Infof("Legacy channel open with peer, " +
|
|
|
|
"downgrading static remote required feature bit to " +
|
|
|
|
"optional")
|
2020-12-09 15:13:50 +01:00
|
|
|
|
2020-12-18 00:49:55 +01:00
|
|
|
// Unset and set in both the local and global features to
|
|
|
|
// ensure both sets are consistent and merge able by old and
|
|
|
|
// new nodes.
|
2020-12-09 15:13:50 +01:00
|
|
|
features.Unset(lnwire.StaticRemoteKeyRequired)
|
2020-12-18 00:49:55 +01:00
|
|
|
legacyFeatures.Unset(lnwire.StaticRemoteKeyRequired)
|
|
|
|
|
2020-12-09 15:13:50 +01:00
|
|
|
features.Set(lnwire.StaticRemoteKeyOptional)
|
2020-12-18 00:49:55 +01:00
|
|
|
legacyFeatures.Set(lnwire.StaticRemoteKeyOptional)
|
2020-12-09 15:13:50 +01:00
|
|
|
}
|
|
|
|
|
2017-02-16 13:39:38 +01:00
|
|
|
msg := lnwire.NewInitMessage(
|
2020-12-18 00:49:55 +01:00
|
|
|
legacyFeatures.RawFeatureVector,
|
2020-12-09 15:13:50 +01:00
|
|
|
features.RawFeatureVector,
|
2017-02-16 13:39:38 +01:00
|
|
|
)
|
|
|
|
|
2017-03-17 03:45:10 +01:00
|
|
|
return p.writeMessage(msg)
|
2017-02-16 13:39:38 +01:00
|
|
|
}
|
|
|
|
|
2018-11-20 15:09:46 +01:00
|
|
|
// resendChanSyncMsg will attempt to find a channel sync message for the closed
|
|
|
|
// channel and resend it to our peer.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) resendChanSyncMsg(cid lnwire.ChannelID) error {
|
2020-02-17 12:47:26 +01:00
|
|
|
// If we already re-sent the mssage for this channel, we won't do it
|
|
|
|
// again.
|
|
|
|
if _, ok := p.resentChanSyncMsg[cid]; ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-11-20 15:09:46 +01:00
|
|
|
// Check if we have any channel sync messages stored for this channel.
|
2020-06-30 03:29:22 +02:00
|
|
|
c, err := p.cfg.ChannelDB.FetchClosedChannelForID(cid)
|
2018-11-20 15:09:46 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to fetch channel sync messages for "+
|
|
|
|
"peer %v: %v", p, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.LastChanSyncMsg == nil {
|
|
|
|
return fmt.Errorf("no chan sync message stored for channel %v",
|
|
|
|
cid)
|
|
|
|
}
|
|
|
|
|
2019-09-25 21:01:34 +02:00
|
|
|
if !c.RemotePub.IsEqual(p.IdentityKey()) {
|
|
|
|
return fmt.Errorf("ignoring channel reestablish from "+
|
2022-02-23 14:48:00 +01:00
|
|
|
"peer=%x", p.IdentityKey().SerializeCompressed())
|
2019-09-25 21:01:34 +02:00
|
|
|
}
|
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Debugf("Re-sending channel sync message for channel %v to "+
|
|
|
|
"peer", cid)
|
2018-11-20 15:09:46 +01:00
|
|
|
|
|
|
|
if err := p.SendMessage(true, c.LastChanSyncMsg); err != nil {
|
2020-04-14 19:56:05 +02:00
|
|
|
return fmt.Errorf("failed resending channel sync "+
|
2018-11-20 15:09:46 +01:00
|
|
|
"message to peer %v: %v", p, err)
|
|
|
|
}
|
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Debugf("Re-sent channel sync message for channel %v to peer ",
|
|
|
|
cid)
|
2018-11-20 15:09:46 +01:00
|
|
|
|
2020-02-17 12:47:26 +01:00
|
|
|
// Note down that we sent the message, so we won't resend it again for
|
|
|
|
// this connection.
|
|
|
|
p.resentChanSyncMsg[cid] = struct{}{}
|
|
|
|
|
2018-11-20 15:09:46 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// SendMessage sends a variadic number of high-priority messages to the remote
|
|
|
|
// peer. The first argument denotes if the method should block until the
|
|
|
|
// messages have been sent to the remote peer or an error is returned,
|
|
|
|
// otherwise it returns immediately after queuing.
|
2018-08-02 09:22:38 +02:00
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) SendMessage(sync bool, msgs ...lnwire.Message) error {
|
2019-03-06 02:08:22 +01:00
|
|
|
return p.sendMessage(sync, true, msgs...)
|
|
|
|
}
|
|
|
|
|
2020-06-26 23:55:40 +02:00
|
|
|
// SendMessageLazy sends a variadic number of low-priority messages to the
|
|
|
|
// remote peer. The first argument denotes if the method should block until
|
|
|
|
// the messages have been sent to the remote peer or an error is returned,
|
|
|
|
// otherwise it returns immediately after queueing.
|
2019-03-06 02:08:22 +01:00
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) SendMessageLazy(sync bool, msgs ...lnwire.Message) error {
|
2019-03-06 02:08:22 +01:00
|
|
|
return p.sendMessage(sync, false, msgs...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendMessage queues a variadic number of messages using the passed priority
|
|
|
|
// to the remote peer. If sync is true, this method will block until the
|
|
|
|
// messages have been sent to the remote peer or an error is returned, otherwise
|
|
|
|
// it returns immediately after queueing.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) sendMessage(sync, priority bool, msgs ...lnwire.Message) error {
|
2018-06-08 05:07:30 +02:00
|
|
|
// Add all incoming messages to the outgoing queue. A list of error
|
|
|
|
// chans is populated for each message if the caller requested a sync
|
|
|
|
// send.
|
|
|
|
var errChans []chan error
|
2019-03-06 02:08:35 +01:00
|
|
|
if sync {
|
|
|
|
errChans = make([]chan error, 0, len(msgs))
|
|
|
|
}
|
2018-06-08 05:07:30 +02:00
|
|
|
for _, msg := range msgs {
|
|
|
|
// If a sync send was requested, create an error chan to listen
|
|
|
|
// for an ack from the writeHandler.
|
|
|
|
var errChan chan error
|
|
|
|
if sync {
|
|
|
|
errChan = make(chan error, 1)
|
|
|
|
errChans = append(errChans, errChan)
|
|
|
|
}
|
2018-04-05 02:43:51 +02:00
|
|
|
|
2019-03-06 02:08:22 +01:00
|
|
|
if priority {
|
|
|
|
p.queueMsg(msg, errChan)
|
|
|
|
} else {
|
|
|
|
p.queueMsgLazy(msg, errChan)
|
|
|
|
}
|
2018-06-08 05:07:30 +02:00
|
|
|
}
|
2018-04-05 02:43:51 +02:00
|
|
|
|
2018-06-08 05:07:30 +02:00
|
|
|
// Wait for all replies from the writeHandler. For async sends, this
|
|
|
|
// will be a NOP as the list of error chans is nil.
|
|
|
|
for _, errChan := range errChans {
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
case <-p.quit:
|
2019-04-27 02:31:27 +02:00
|
|
|
return lnpeer.ErrPeerExiting
|
2020-06-30 03:29:22 +02:00
|
|
|
case <-p.cfg.Quit:
|
2019-05-06 23:44:54 +02:00
|
|
|
return lnpeer.ErrPeerExiting
|
2018-06-08 05:07:30 +02:00
|
|
|
}
|
2018-04-05 02:43:51 +02:00
|
|
|
}
|
2018-06-08 05:07:30 +02:00
|
|
|
|
|
|
|
return nil
|
2016-07-22 02:10:30 +02:00
|
|
|
}
|
|
|
|
|
2017-06-17 00:11:02 +02:00
|
|
|
// PubKey returns the pubkey of the peer in compressed serialized format.
|
2018-08-02 09:22:38 +02:00
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) PubKey() [33]byte {
|
2020-06-30 03:29:22 +02:00
|
|
|
return p.cfg.PubKeyBytes
|
2016-07-13 02:45:29 +02:00
|
|
|
}
|
|
|
|
|
2018-06-08 05:07:30 +02:00
|
|
|
// IdentityKey returns the public key of the remote peer.
|
2018-08-02 09:22:38 +02:00
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) IdentityKey() *btcec.PublicKey {
|
2020-06-30 03:29:22 +02:00
|
|
|
return p.cfg.Addr.IdentityKey
|
2018-06-08 05:07:30 +02:00
|
|
|
}
|
|
|
|
|
2018-07-05 22:27:35 +02:00
|
|
|
// Address returns the network address of the remote peer.
|
2018-08-02 09:22:38 +02:00
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) Address() net.Addr {
|
2020-06-30 03:29:22 +02:00
|
|
|
return p.cfg.Addr.Address
|
2018-07-05 22:27:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// AddNewChannel adds a new channel to the peer. The channel should fail to be
|
|
|
|
// added if the cancel channel is closed.
|
2018-08-02 09:22:38 +02:00
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
2023-01-20 05:26:05 +01:00
|
|
|
func (p *Brontide) AddNewChannel(newChan *lnpeer.NewChannel,
|
2018-07-05 22:27:35 +02:00
|
|
|
cancel <-chan struct{}) error {
|
|
|
|
|
2018-09-26 11:12:57 +02:00
|
|
|
errChan := make(chan error, 1)
|
2018-07-05 22:27:35 +02:00
|
|
|
newChanMsg := &newChannelMsg{
|
2023-01-20 05:26:05 +01:00
|
|
|
channel: newChan,
|
2018-09-26 11:12:57 +02:00
|
|
|
err: errChan,
|
2018-07-05 22:27:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2023-03-16 03:17:13 +01:00
|
|
|
case p.newActiveChannel <- newChanMsg:
|
2018-07-05 22:27:35 +02:00
|
|
|
case <-cancel:
|
|
|
|
return errors.New("canceled adding new channel")
|
|
|
|
case <-p.quit:
|
2019-04-27 02:31:27 +02:00
|
|
|
return lnpeer.ErrPeerExiting
|
2018-07-05 22:27:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// We pause here to wait for the peer to recognize the new channel
|
|
|
|
// before we close the channel barrier corresponding to the channel.
|
|
|
|
select {
|
2018-09-26 11:12:57 +02:00
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
2018-07-05 22:27:35 +02:00
|
|
|
case <-p.quit:
|
2019-04-27 02:31:27 +02:00
|
|
|
return lnpeer.ErrPeerExiting
|
2018-07-05 22:27:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-16 14:24:04 +01:00
|
|
|
// AddPendingChannel adds a pending open channel to the peer. The channel
|
|
|
|
// should fail to be added if the cancel channel is closed.
|
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
|
|
|
func (p *Brontide) AddPendingChannel(cid lnwire.ChannelID,
|
|
|
|
cancel <-chan struct{}) error {
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
newChanMsg := &newChannelMsg{
|
|
|
|
channelID: cid,
|
|
|
|
err: errChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case p.newPendingChannel <- newChanMsg:
|
|
|
|
|
|
|
|
case <-cancel:
|
|
|
|
return errors.New("canceled adding pending channel")
|
|
|
|
|
|
|
|
case <-p.quit:
|
|
|
|
return lnpeer.ErrPeerExiting
|
|
|
|
}
|
|
|
|
|
|
|
|
// We pause here to wait for the peer to recognize the new pending
|
|
|
|
// channel before we close the channel barrier corresponding to the
|
|
|
|
// channel.
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
|
|
|
|
case <-cancel:
|
|
|
|
return errors.New("canceled adding pending channel")
|
|
|
|
|
|
|
|
case <-p.quit:
|
|
|
|
return lnpeer.ErrPeerExiting
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-08 13:42:07 +02:00
|
|
|
// RemovePendingChannel removes a pending open channel from the peer.
|
|
|
|
//
|
|
|
|
// NOTE: Part of the lnpeer.Peer interface.
|
|
|
|
func (p *Brontide) RemovePendingChannel(cid lnwire.ChannelID) error {
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
newChanMsg := &newChannelMsg{
|
|
|
|
channelID: cid,
|
|
|
|
err: errChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case p.removePendingChannel <- newChanMsg:
|
|
|
|
case <-p.quit:
|
|
|
|
return lnpeer.ErrPeerExiting
|
|
|
|
}
|
|
|
|
|
|
|
|
// We pause here to wait for the peer to respond to the cancellation of
|
|
|
|
// the pending channel before we close the channel barrier
|
|
|
|
// corresponding to the channel.
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
|
|
|
|
case <-p.quit:
|
|
|
|
return lnpeer.ErrPeerExiting
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-02 09:22:38 +02:00
|
|
|
// StartTime returns the time at which the connection was established if the
|
|
|
|
// peer started successfully, and zero otherwise.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) StartTime() time.Time {
|
2018-08-02 09:22:38 +02:00
|
|
|
return p.startTime
|
|
|
|
}
|
|
|
|
|
2020-06-30 00:01:45 +02:00
|
|
|
// handleCloseMsg is called when a new cooperative channel closure related
|
|
|
|
// message is received from the remote peer. We'll use this message to advance
|
|
|
|
// the chan closer state machine.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) handleCloseMsg(msg *closeMsg) {
|
2023-11-26 21:57:11 +01:00
|
|
|
link := p.fetchLinkFromKeyAndCid(msg.cid)
|
|
|
|
|
2020-06-30 00:01:45 +02:00
|
|
|
// We'll now fetch the matching closing state machine in order to continue,
|
|
|
|
// or finalize the channel closure process.
|
|
|
|
chanCloser, err := p.fetchActiveChanCloser(msg.cid)
|
|
|
|
if err != nil {
|
|
|
|
// If the channel is not known to us, we'll simply ignore this message.
|
|
|
|
if err == ErrChannelNotFound {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Errorf("Unable to respond to remote close msg: %v", err)
|
2020-06-30 00:01:45 +02:00
|
|
|
|
|
|
|
errMsg := &lnwire.Error{
|
|
|
|
ChanID: msg.cid,
|
|
|
|
Data: lnwire.ErrorData(err.Error()),
|
|
|
|
}
|
|
|
|
p.queueMsg(errMsg, nil)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-12-02 00:53:38 +01:00
|
|
|
handleErr := func(err error) {
|
|
|
|
err = fmt.Errorf("unable to process close msg: %w", err)
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Error(err)
|
2020-06-30 00:01:45 +02:00
|
|
|
|
|
|
|
// As the negotiations failed, we'll reset the channel state machine to
|
|
|
|
// ensure we act to on-chain events as normal.
|
|
|
|
chanCloser.Channel().ResetState()
|
|
|
|
|
|
|
|
if chanCloser.CloseRequest() != nil {
|
|
|
|
chanCloser.CloseRequest().Err <- err
|
|
|
|
}
|
|
|
|
delete(p.activeChanCloses, msg.cid)
|
2023-12-02 00:53:38 +01:00
|
|
|
|
|
|
|
p.Disconnect(err)
|
2020-06-30 00:01:45 +02:00
|
|
|
}
|
|
|
|
|
2023-12-02 00:53:38 +01:00
|
|
|
// Next, we'll process the next message using the target state machine.
|
|
|
|
// We'll either continue negotiation, or halt.
|
|
|
|
switch typed := msg.msg.(type) {
|
|
|
|
case *lnwire.Shutdown:
|
2023-11-26 21:57:11 +01:00
|
|
|
// Disable incoming adds immediately.
|
2024-02-07 11:39:37 +01:00
|
|
|
if link != nil && !link.DisableAdds(htlcswitch.Incoming) {
|
|
|
|
p.log.Warnf("Incoming link adds already disabled: %v",
|
|
|
|
link.ChanID())
|
2023-11-26 21:57:11 +01:00
|
|
|
}
|
|
|
|
|
2023-12-02 00:53:38 +01:00
|
|
|
oShutdown, err := chanCloser.ReceiveShutdown(*typed)
|
|
|
|
if err != nil {
|
|
|
|
handleErr(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
oShutdown.WhenSome(func(msg lnwire.Shutdown) {
|
2024-02-07 11:45:33 +01:00
|
|
|
// If the link is nil it means we can immediately queue
|
2023-11-26 21:57:11 +01:00
|
|
|
// the Shutdown message since we don't have to wait for
|
2024-01-09 03:07:52 +01:00
|
|
|
// commitment transaction synchronization.
|
2023-11-26 21:57:11 +01:00
|
|
|
if link == nil {
|
2024-01-30 18:15:59 +01:00
|
|
|
p.queueMsg(&msg, nil)
|
2023-11-26 21:57:11 +01:00
|
|
|
return
|
|
|
|
}
|
2024-01-09 03:07:52 +01:00
|
|
|
|
2024-02-07 11:45:33 +01:00
|
|
|
// Immediately disallow any new HTLC's from being added
|
|
|
|
// in the outgoing direction.
|
|
|
|
if !link.DisableAdds(htlcswitch.Outgoing) {
|
|
|
|
p.log.Warnf("Outgoing link adds already "+
|
|
|
|
"disabled: %v", link.ChanID())
|
|
|
|
}
|
|
|
|
|
2024-01-09 03:07:52 +01:00
|
|
|
// When we have a Shutdown to send, we defer it till the
|
2023-11-26 21:57:11 +01:00
|
|
|
// next time we send a CommitSig to remain spec
|
|
|
|
// compliant.
|
|
|
|
link.OnCommitOnce(htlcswitch.Outgoing, func() {
|
|
|
|
p.queueMsg(&msg, nil)
|
|
|
|
})
|
2023-12-02 00:53:38 +01:00
|
|
|
})
|
|
|
|
|
2023-11-26 21:57:11 +01:00
|
|
|
beginNegotiation := func() {
|
|
|
|
oClosingSigned, err := chanCloser.BeginNegotiation()
|
|
|
|
if err != nil {
|
|
|
|
handleErr(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
oClosingSigned.WhenSome(func(msg lnwire.ClosingSigned) {
|
|
|
|
p.queueMsg(&msg, nil)
|
|
|
|
})
|
2023-12-02 00:53:38 +01:00
|
|
|
}
|
|
|
|
|
2023-11-26 21:57:11 +01:00
|
|
|
if link == nil {
|
|
|
|
beginNegotiation()
|
|
|
|
} else {
|
|
|
|
// Now we register a flush hook to advance the
|
|
|
|
// ChanCloser and possibly send out a ClosingSigned
|
|
|
|
// when the link finishes draining.
|
|
|
|
link.OnFlushedOnce(func() {
|
|
|
|
// Remove link in goroutine to prevent deadlock.
|
|
|
|
go p.cfg.Switch.RemoveLink(msg.cid)
|
|
|
|
beginNegotiation()
|
|
|
|
})
|
|
|
|
}
|
2023-12-02 00:53:38 +01:00
|
|
|
|
|
|
|
case *lnwire.ClosingSigned:
|
|
|
|
oClosingSigned, err := chanCloser.ReceiveClosingSigned(*typed)
|
|
|
|
if err != nil {
|
|
|
|
handleErr(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
oClosingSigned.WhenSome(func(msg lnwire.ClosingSigned) {
|
|
|
|
p.queueMsg(&msg, nil)
|
|
|
|
})
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("impossible closeMsg type")
|
2020-06-30 00:01:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we haven't finished close negotiations, then we'll continue as we
|
|
|
|
// can't yet finalize the closure.
|
2023-12-02 00:53:38 +01:00
|
|
|
if _, err := chanCloser.ClosingTx(); err != nil {
|
2020-06-30 00:01:45 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we've agreed on a closing fee! In this case, we'll wrap up
|
|
|
|
// the channel closure by notifying relevant sub-systems and launching a
|
|
|
|
// goroutine to wait for close tx conf.
|
|
|
|
p.finalizeChanClosure(chanCloser)
|
|
|
|
}
|
|
|
|
|
2020-06-30 02:42:06 +02:00
|
|
|
// HandleLocalCloseChanReqs accepts a *htlcswitch.ChanClose and passes it onto
|
|
|
|
// the channelManager goroutine, which will shut down the link and possibly
|
|
|
|
// close the channel.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) {
|
2020-06-30 02:42:06 +02:00
|
|
|
select {
|
|
|
|
case p.localCloseChanReqs <- req:
|
2023-10-12 11:05:01 +02:00
|
|
|
p.log.Info("Local close channel request is going to be " +
|
|
|
|
"delivered to the peer")
|
2020-06-30 02:42:06 +02:00
|
|
|
case <-p.quit:
|
2022-05-28 10:10:09 +02:00
|
|
|
p.log.Info("Unable to deliver local close channel request " +
|
|
|
|
"to peer")
|
2020-06-30 02:42:06 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-30 03:29:22 +02:00
|
|
|
// NetAddress returns the network of the remote peer as an lnwire.NetAddress.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) NetAddress() *lnwire.NetAddress {
|
2020-06-30 03:29:22 +02:00
|
|
|
return p.cfg.Addr
|
|
|
|
}
|
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
// Inbound is a getter for the Brontide's Inbound boolean in cfg.
|
|
|
|
func (p *Brontide) Inbound() bool {
|
2020-06-30 03:29:22 +02:00
|
|
|
return p.cfg.Inbound
|
|
|
|
}
|
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
// ConnReq is a getter for the Brontide's connReq in cfg.
|
|
|
|
func (p *Brontide) ConnReq() *connmgr.ConnReq {
|
2020-06-30 03:29:22 +02:00
|
|
|
return p.cfg.ConnReq
|
|
|
|
}
|
|
|
|
|
2020-07-02 23:46:06 +02:00
|
|
|
// ErrorBuffer is a getter for the Brontide's errorBuffer in cfg.
|
|
|
|
func (p *Brontide) ErrorBuffer() *queue.CircularBuffer {
|
2020-06-30 03:29:22 +02:00
|
|
|
return p.cfg.ErrorBuffer
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetAddress sets the remote peer's address given an address.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) SetAddress(address net.Addr) {
|
2020-06-30 03:29:22 +02:00
|
|
|
p.cfg.Addr.Address = address
|
|
|
|
}
|
|
|
|
|
|
|
|
// ActiveSignal returns the peer's active signal.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) ActiveSignal() chan struct{} {
|
2020-06-30 03:29:22 +02:00
|
|
|
return p.activeSignal
|
|
|
|
}
|
|
|
|
|
|
|
|
// Conn returns a pointer to the peer's connection struct.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) Conn() net.Conn {
|
2020-06-30 03:29:22 +02:00
|
|
|
return p.cfg.Conn
|
|
|
|
}
|
|
|
|
|
|
|
|
// BytesReceived returns the number of bytes received from the peer.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) BytesReceived() uint64 {
|
2020-06-30 03:29:22 +02:00
|
|
|
return atomic.LoadUint64(&p.bytesReceived)
|
|
|
|
}
|
|
|
|
|
|
|
|
// BytesSent returns the number of bytes sent to the peer.
|
2020-07-02 23:46:06 +02:00
|
|
|
func (p *Brontide) BytesSent() uint64 {
|
2020-06-30 03:29:22 +02:00
|
|
|
return atomic.LoadUint64(&p.bytesSent)
|
|
|
|
}
|
2021-08-13 00:30:55 +02:00
|
|
|
|
|
|
|
// LastRemotePingPayload returns the last payload the remote party sent as part
|
|
|
|
// of their ping.
|
|
|
|
func (p *Brontide) LastRemotePingPayload() []byte {
|
|
|
|
pingPayload := p.lastPingPayload.Load()
|
|
|
|
if pingPayload == nil {
|
|
|
|
return []byte{}
|
|
|
|
}
|
|
|
|
|
|
|
|
pingBytes, ok := pingPayload.(lnwire.PingPayload)
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return pingBytes
|
|
|
|
}
|
2022-12-13 19:26:20 +01:00
|
|
|
|
|
|
|
// attachChannelEventSubscription creates a channel event subscription and
|
|
|
|
// attaches to client to Brontide if the reenableTimeout is no greater than 1
|
|
|
|
// minute.
|
|
|
|
func (p *Brontide) attachChannelEventSubscription() error {
|
|
|
|
// If the timeout is greater than 1 minute, it's unlikely that the link
|
|
|
|
// hasn't yet finished its reestablishment. Return a nil without
|
|
|
|
// creating the client to specify that we don't want to retry.
|
|
|
|
if p.cfg.ChanActiveTimeout > 1*time.Minute {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// When the reenable timeout is less than 1 minute, it's likely the
|
|
|
|
// channel link hasn't finished its reestablishment yet. In that case,
|
|
|
|
// we'll give it a second chance by subscribing to the channel update
|
|
|
|
// events. Upon receiving the `ActiveLinkEvent`, we'll then request
|
|
|
|
// enabling the channel again.
|
|
|
|
sub, err := p.cfg.ChannelNotifier.SubscribeChannelEvents()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("SubscribeChannelEvents failed: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
p.channelEventClient = sub
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2023-03-16 03:04:53 +01:00
|
|
|
|
|
|
|
// updateNextRevocation updates the existing channel's next revocation if it's
|
|
|
|
// nil.
|
2023-06-12 17:41:20 +02:00
|
|
|
func (p *Brontide) updateNextRevocation(c *channeldb.OpenChannel) error {
|
2024-01-29 22:19:15 +01:00
|
|
|
chanPoint := c.FundingOutpoint
|
2023-03-16 03:04:53 +01:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
|
|
|
|
|
|
|
// Read the current channel.
|
|
|
|
currentChan, loaded := p.activeChannels.Load(chanID)
|
|
|
|
|
|
|
|
// currentChan should exist, but we perform a check anyway to avoid nil
|
|
|
|
// pointer dereference.
|
|
|
|
if !loaded {
|
2023-06-12 17:41:20 +02:00
|
|
|
return fmt.Errorf("missing active channel with chanID=%v",
|
|
|
|
chanID)
|
2023-03-16 03:04:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// currentChan should not be nil, but we perform a check anyway to
|
|
|
|
// avoid nil pointer dereference.
|
|
|
|
if currentChan == nil {
|
2023-06-12 17:41:20 +02:00
|
|
|
return fmt.Errorf("found nil active channel with chanID=%v",
|
|
|
|
chanID)
|
2023-03-16 03:04:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we're being sent a new channel, and our existing channel doesn't
|
|
|
|
// have the next revocation, then we need to update the current
|
|
|
|
// existing channel.
|
|
|
|
if currentChan.RemoteNextRevocation() != nil {
|
2023-06-12 17:41:20 +02:00
|
|
|
return nil
|
2023-03-16 03:04:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
p.log.Infof("Processing retransmitted ChannelReady for "+
|
|
|
|
"ChannelPoint(%v)", chanPoint)
|
|
|
|
|
|
|
|
nextRevoke := c.RemoteNextRevocation
|
2023-06-12 17:41:20 +02:00
|
|
|
|
2023-03-16 03:04:53 +01:00
|
|
|
err := currentChan.InitNextRevocation(nextRevoke)
|
|
|
|
if err != nil {
|
2023-06-12 17:41:20 +02:00
|
|
|
return fmt.Errorf("unable to init next revocation: %w", err)
|
2023-03-16 03:04:53 +01:00
|
|
|
}
|
2023-06-12 17:41:20 +02:00
|
|
|
|
|
|
|
return nil
|
2023-03-16 03:04:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// addActiveChannel adds a new active channel to the `activeChannels` map. It
|
|
|
|
// takes a `channeldb.OpenChannel`, creates a `lnwallet.LightningChannel` from
|
|
|
|
// it and assembles it with a channel link.
|
2023-01-20 05:26:05 +01:00
|
|
|
func (p *Brontide) addActiveChannel(c *lnpeer.NewChannel) error {
|
2024-01-29 22:19:15 +01:00
|
|
|
chanPoint := c.FundingOutpoint
|
2023-03-16 03:04:53 +01:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
|
|
|
|
2023-10-13 17:00:49 +02:00
|
|
|
// If we've reached this point, there are two possible scenarios. If
|
|
|
|
// the channel was in the active channels map as nil, then it was
|
|
|
|
// loaded from disk and we need to send reestablish. Else, it was not
|
|
|
|
// loaded from disk and we don't need to send reestablish as this is a
|
|
|
|
// fresh channel.
|
|
|
|
shouldReestablish := p.isLoadedFromDisk(chanID)
|
|
|
|
|
|
|
|
chanOpts := c.ChanOpts
|
|
|
|
if shouldReestablish {
|
|
|
|
// If we have to do the reestablish dance for this channel,
|
|
|
|
// ensure that we don't try to call InitRemoteMusigNonces twice
|
|
|
|
// by calling SkipNonceInit.
|
|
|
|
chanOpts = append(chanOpts, lnwallet.WithSkipNonceInit())
|
|
|
|
}
|
|
|
|
|
2024-04-25 19:00:42 +02:00
|
|
|
p.cfg.AuxLeafStore.WhenSome(func(s lnwallet.AuxLeafStore) {
|
|
|
|
chanOpts = append(chanOpts, lnwallet.WithLeafStore(s))
|
|
|
|
})
|
2024-04-09 04:48:36 +02:00
|
|
|
p.cfg.AuxSigner.WhenSome(func(s lnwallet.AuxSigner) {
|
|
|
|
chanOpts = append(chanOpts, lnwallet.WithAuxSigner(s))
|
|
|
|
})
|
2024-04-25 19:00:42 +02:00
|
|
|
|
2023-03-16 03:04:53 +01:00
|
|
|
// If not already active, we'll add this channel to the set of active
|
|
|
|
// channels, so we can look it up later easily according to its channel
|
|
|
|
// ID.
|
|
|
|
lnChan, err := lnwallet.NewLightningChannel(
|
2023-10-13 17:00:49 +02:00
|
|
|
p.cfg.Signer, c.OpenChannel, p.cfg.SigPool, chanOpts...,
|
2023-03-16 03:04:53 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to create LightningChannel: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the channel in the activeChannels map.
|
|
|
|
p.activeChannels.Store(chanID, lnChan)
|
|
|
|
|
|
|
|
p.log.Infof("New channel active ChannelPoint(%v) with peer", chanPoint)
|
|
|
|
|
|
|
|
// Next, we'll assemble a ChannelLink along with the necessary items it
|
|
|
|
// needs to function.
|
2024-01-29 22:19:15 +01:00
|
|
|
chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents(chanPoint)
|
2023-03-16 03:04:53 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to subscribe to chain events: %w",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
2023-07-17 12:53:26 +02:00
|
|
|
// We'll query the channel DB for the new channel's initial forwarding
|
|
|
|
// policies to determine the policy we start out with.
|
|
|
|
initialPolicy, err := p.cfg.ChannelDB.GetInitialForwardingPolicy(chanID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to query for initial forwarding "+
|
|
|
|
"policy: %v", err)
|
2023-03-16 03:04:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create the link and add it to the switch.
|
|
|
|
err = p.addLink(
|
2024-01-29 22:19:15 +01:00
|
|
|
&chanPoint, lnChan, initialPolicy, chainEvents,
|
2024-02-06 18:11:26 +01:00
|
|
|
shouldReestablish, fn.None[lnwire.Shutdown](),
|
2023-03-16 03:04:53 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("can't register new channel link(%v) with "+
|
|
|
|
"peer", chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleNewActiveChannel handles a `newChannelMsg` request. Depending on we
|
|
|
|
// know this channel ID or not, we'll either add it to the `activeChannels` map
|
|
|
|
// or init the next revocation for it.
|
|
|
|
func (p *Brontide) handleNewActiveChannel(req *newChannelMsg) {
|
|
|
|
newChan := req.channel
|
2024-01-29 22:19:15 +01:00
|
|
|
chanPoint := newChan.FundingOutpoint
|
2023-03-16 03:04:53 +01:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
|
|
|
|
|
|
|
// Only update RemoteNextRevocation if the channel is in the
|
|
|
|
// activeChannels map and if we added the link to the switch. Only
|
|
|
|
// active channels will be added to the switch.
|
|
|
|
if p.isActiveChannel(chanID) {
|
|
|
|
p.log.Infof("Already have ChannelPoint(%v), ignoring",
|
|
|
|
chanPoint)
|
|
|
|
|
|
|
|
// Handle it and close the err chan on the request.
|
|
|
|
close(req.err)
|
2023-06-12 17:41:20 +02:00
|
|
|
|
|
|
|
// Update the next revocation point.
|
2023-01-20 05:26:05 +01:00
|
|
|
err := p.updateNextRevocation(newChan.OpenChannel)
|
|
|
|
if err != nil {
|
2023-06-12 17:41:20 +02:00
|
|
|
p.log.Errorf(err.Error())
|
|
|
|
}
|
2023-03-16 03:04:53 +01:00
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is a new channel, we now add it to the map.
|
|
|
|
if err := p.addActiveChannel(req.channel); err != nil {
|
|
|
|
// Log and send back the error to the request.
|
|
|
|
p.log.Errorf(err.Error())
|
|
|
|
req.err <- err
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close the err chan if everything went fine.
|
|
|
|
close(req.err)
|
|
|
|
}
|
2023-03-16 14:24:04 +01:00
|
|
|
|
|
|
|
// handleNewPendingChannel takes a `newChannelMsg` request and add it to
|
|
|
|
// `activeChannels` map with nil value. This pending channel will be saved as
|
|
|
|
// it may become active in the future. Once active, the funding manager will
|
|
|
|
// send it again via `AddNewChannel`, and we'd handle the link creation there.
|
|
|
|
func (p *Brontide) handleNewPendingChannel(req *newChannelMsg) {
|
|
|
|
defer close(req.err)
|
|
|
|
|
|
|
|
chanID := req.channelID
|
|
|
|
|
|
|
|
// If we already have this channel, something is wrong with the funding
|
|
|
|
// flow as it will only be marked as active after `ChannelReady` is
|
|
|
|
// handled. In this case, we will do nothing but log an error, just in
|
|
|
|
// case this is a legit channel.
|
|
|
|
if p.isActiveChannel(chanID) {
|
|
|
|
p.log.Errorf("Channel(%v) is already active, ignoring "+
|
|
|
|
"pending channel request", chanID)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// The channel has already been added, we will do nothing and return.
|
|
|
|
if p.isPendingChannel(chanID) {
|
|
|
|
p.log.Infof("Channel(%v) is already added, ignoring "+
|
|
|
|
"pending channel request", chanID)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is a new channel, we now add it to the map `activeChannels`
|
|
|
|
// with nil value and mark it as a newly added channel in
|
|
|
|
// `addedChannels`.
|
|
|
|
p.activeChannels.Store(chanID, nil)
|
|
|
|
p.addedChannels.Store(chanID, struct{}{})
|
|
|
|
}
|
2023-03-16 05:32:42 +01:00
|
|
|
|
2023-06-08 13:42:07 +02:00
|
|
|
// handleRemovePendingChannel takes a `newChannelMsg` request and removes it
|
|
|
|
// from `activeChannels` map. The request will be ignored if the channel is
|
|
|
|
// considered active by Brontide. Noop if the channel ID cannot be found.
|
|
|
|
func (p *Brontide) handleRemovePendingChannel(req *newChannelMsg) {
|
|
|
|
defer close(req.err)
|
|
|
|
|
|
|
|
chanID := req.channelID
|
|
|
|
|
|
|
|
// If we already have this channel, something is wrong with the funding
|
|
|
|
// flow as it will only be marked as active after `ChannelReady` is
|
|
|
|
// handled. In this case, we will log an error and exit.
|
|
|
|
if p.isActiveChannel(chanID) {
|
|
|
|
p.log.Errorf("Channel(%v) is active, ignoring remove request",
|
|
|
|
chanID)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// The channel has not been added yet, we will log a warning as there
|
|
|
|
// is an unexpected call from funding manager.
|
|
|
|
if !p.isPendingChannel(chanID) {
|
|
|
|
p.log.Warnf("Channel(%v) not found, removing it anyway", chanID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the record of this pending channel.
|
|
|
|
p.activeChannels.Delete(chanID)
|
|
|
|
p.addedChannels.Delete(chanID)
|
|
|
|
}
|
|
|
|
|
2023-03-16 05:32:42 +01:00
|
|
|
// sendLinkUpdateMsg sends a message that updates the channel to the
|
|
|
|
// channel's message stream.
|
|
|
|
func (p *Brontide) sendLinkUpdateMsg(cid lnwire.ChannelID, msg lnwire.Message) {
|
|
|
|
p.log.Tracef("Sending link update msg=%v", msg.MsgType())
|
|
|
|
|
|
|
|
chanStream, ok := p.activeMsgStreams[cid]
|
|
|
|
if !ok {
|
|
|
|
// If a stream hasn't yet been created, then we'll do so, add
|
|
|
|
// it to the map, and finally start it.
|
|
|
|
chanStream = newChanMsgStream(p, cid)
|
|
|
|
p.activeMsgStreams[cid] = chanStream
|
|
|
|
chanStream.Start()
|
|
|
|
|
|
|
|
// Stop the stream when quit.
|
|
|
|
go func() {
|
|
|
|
<-p.quit
|
|
|
|
chanStream.Stop()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the stream obtained, add the message to the stream so we can
|
|
|
|
// continue processing message.
|
|
|
|
chanStream.AddMsg(msg)
|
|
|
|
}
|
2024-05-16 20:19:20 +02:00
|
|
|
|
|
|
|
// scaleTimeout multiplies the argument duration by a constant factor depending
|
|
|
|
// on variious heuristics. Currently this is only used to check whether our peer
|
|
|
|
// appears to be connected over Tor and relaxes the timout deadline. However,
|
|
|
|
// this is subject to change and should be treated as opaque.
|
|
|
|
func (p *Brontide) scaleTimeout(timeout time.Duration) time.Duration {
|
|
|
|
if p.isTorConnection {
|
|
|
|
return timeout * time.Duration(torTimeoutMultiplier)
|
|
|
|
}
|
|
|
|
|
|
|
|
return timeout
|
|
|
|
}
|