Merge pull request #9314 from ellemouton/slog1

build+lnd+docs: start using slog and add commit_hash to log lines
This commit is contained in:
Oliver Gugger 2024-12-02 09:53:31 +01:00 committed by GitHub
commit 0474b4ff20
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
139 changed files with 1010 additions and 381 deletions

4
.custom-gcl.yml Normal file
View File

@ -0,0 +1,4 @@
version: v1.57.0
plugins:
- module: 'github.com/lightningnetwork/lnd/tools/linters'
path: ./tools/linters

3
.gitignore vendored
View File

@ -44,6 +44,9 @@ cmd/cmd
*.key
*.hex
# Ignore the custom linter binary if it is built.
custom-gcl
cmd/lncli/lncli
# Files from mobile build.

View File

@ -20,6 +20,18 @@ run:
- integration
linters-settings:
custom:
ll:
type: "module"
description: "Custom lll linter with 'S' log line exclusion."
settings:
# Max line length, lines longer will be reported.
line-length: 80
# Tab width in spaces.
tab-width: 8
# The regex that we will use to detect the start of an `S` log line.
log-regex: "^\\s*.*(L|l)og\\.(Info|Debug|Trace|Warn|Error|Critical)S\\("
errorlint:
# Check for incorrect fmt.Errorf error wrapping.
errorf: true
@ -45,16 +57,11 @@ linters-settings:
excludes:
- G402 # Look for bad TLS connection settings.
- G306 # Poor file permissions used when writing to a new file.
- G601 # Implicit memory aliasing in for loop.
staticcheck:
checks: ["-SA1019"]
lll:
# Max line length, lines longer will be reported.
line-length: 80
# Tab width in spaces.
tab-width: 8
funlen:
# Checks the number of lines in a function.
# If lower than 0, disable the check.
@ -105,6 +112,10 @@ linters-settings:
linters:
enable-all: true
disable:
# We instead use our own custom line length linter called `ll` since
# then we can ignore log lines.
- lll
# Global variables are used in many places throughout the code base.
- gochecknoglobals
@ -170,7 +181,7 @@ linters:
- wrapcheck
# Allow dynamic errors.
- err113
- goerr113
# We use ErrXXX instead.
- errname
@ -186,7 +197,6 @@ linters:
# The linter is too aggressive and doesn't add much value since reviewers
# will also catch magic numbers that make sense to extract.
- gomnd
- mnd
# Some of the tests cannot be parallelized. On the other hand, we don't
# gain much performance with this check so we disable it for now until
@ -204,6 +214,7 @@ linters:
- depguard
- gosmopolitan
- intrange
- goconst
issues:

View File

@ -319,7 +319,7 @@ check-go-version: check-go-version-dockerfile check-go-version-yaml
#? lint-source: Run static code analysis
lint-source: docker-tools
@$(call print, "Linting source.")
$(DOCKER_TOOLS) golangci-lint run -v $(LINT_WORKERS)
$(DOCKER_TOOLS) custom-gcl run -v $(LINT_WORKERS)
#? lint: Run static code analysis
lint: check-go-version lint-source

View File

@ -23,10 +23,11 @@ const (
// LogConfig holds logging configuration options.
//
//nolint:lll
//nolint:ll
type LogConfig struct {
Console *consoleLoggerCfg `group:"console" namespace:"console" description:"The logger writing to stdout and stderr."`
File *FileLoggerConfig `group:"file" namespace:"file" description:"The logger writing to LND's standard log file."`
Console *consoleLoggerCfg `group:"console" namespace:"console" description:"The logger writing to stdout and stderr."`
File *FileLoggerConfig `group:"file" namespace:"file" description:"The logger writing to LND's standard log file."`
NoCommitHash bool `long:"no-commit-hash" description:"If set, the commit-hash of the current build will not be included in log lines by default."`
}
// Validate validates the LogConfig struct values.
@ -41,7 +42,7 @@ func (c *LogConfig) Validate() error {
// LoggerConfig holds options for a particular logger.
//
//nolint:lll
//nolint:ll
type LoggerConfig struct {
Disable bool `long:"disable" description:"Disable this logger."`
NoTimestamps bool `long:"no-timestamps" description:"Omit timestamps from log lines."`
@ -89,7 +90,7 @@ func (cfg *LoggerConfig) HandlerOptions() []btclog.HandlerOption {
// FileLoggerConfig extends LoggerConfig with specific log file options.
//
//nolint:lll
//nolint:ll
type FileLoggerConfig struct {
LoggerConfig
Compressor string `long:"compressor" description:"Compression algorithm to use when rotating logs." choice:"gzip" choice:"zstd"`

View File

@ -22,7 +22,7 @@ const (
// consoleLoggerCfg extends the LoggerConfig struct by adding a Color option
// which is only available for a console logger.
//
//nolint:lll
//nolint:ll
type consoleLoggerCfg struct {
LoggerConfig
Style bool `long:"style" description:"If set, the output will be styled with color and fonts"`

View File

@ -6,7 +6,7 @@ package build
// consoleLoggerCfg embeds the LoggerConfig struct along with any extensions
// specific to a production deployment.
//
//nolint:lll
//nolint:ll
type consoleLoggerCfg struct {
LoggerConfig
}

View File

@ -6,9 +6,13 @@
package build
import (
"context"
"encoding/hex"
"fmt"
"runtime/debug"
"strings"
"github.com/btcsuite/btclog/v2"
)
var (
@ -101,3 +105,28 @@ func Tags() []string {
return strings.Split(RawTags, ",")
}
// WithBuildInfo derives a child context with the build information attached as
// attributes. At the moment, this only includes the current build's commit
// hash.
func WithBuildInfo(ctx context.Context, cfg *LogConfig) (context.Context,
error) {
if cfg.NoCommitHash {
return ctx, nil
}
// Convert the commit hash to a byte slice.
commitHash, err := hex.DecodeString(CommitHash)
if err != nil {
return nil, fmt.Errorf("unable to decode commit hash: %w", err)
}
// Include the first 3 bytes of the commit hash in the context as an
// slog attribute.
if len(commitHash) > 3 {
commitHash = commitHash[:3]
}
return btclog.WithCtx(ctx, btclog.Hex("rev", commitHash)), nil
}

View File

@ -256,7 +256,7 @@ out:
// TODO(wilmer): add retry logic if rescan fails?
b.wg.Add(1)
//nolint:lll
//nolint:ll
go func(msg *chainntnfs.HistoricalConfDispatch) {
defer b.wg.Done()
@ -301,7 +301,7 @@ out:
// TODO(wilmer): add retry logic if rescan fails?
b.wg.Add(1)
//nolint:lll
//nolint:ll
go func(msg *chainntnfs.HistoricalSpendDispatch) {
defer b.wg.Done()

View File

@ -366,7 +366,7 @@ out:
// TODO(wilmer): add retry logic if rescan fails?
b.wg.Add(1)
//nolint:lll
//nolint:ll
go func(msg *chainntnfs.HistoricalConfDispatch) {
defer b.wg.Done()

View File

@ -439,7 +439,7 @@ func (n *NeutrinoNotifier) notificationDispatcher() {
// potentially long rescans.
n.wg.Add(1)
//nolint:lll
//nolint:ll
go func(msg *chainntnfs.HistoricalConfDispatch) {
defer n.wg.Done()

View File

@ -222,7 +222,7 @@ type ChainControl struct {
// the parts that can be purely constructed from the passed in global
// configuration and doesn't need any wallet instance yet.
//
//nolint:lll
//nolint:ll
func NewPartialChainControl(cfg *Config) (*PartialChainControl, func(), error) {
cc := &PartialChainControl{
Cfg: cfg,

View File

@ -249,7 +249,7 @@ func (r *RPCAcceptor) sendAcceptRequests(errChan chan error,
acceptRequests := make(map[[32]byte]*chanAcceptInfo)
for {
//nolint:lll
//nolint:ll
select {
// Consume requests passed to us from our Accept() function and
// send them into our stream.

View File

@ -1691,7 +1691,7 @@ var (
// DeriveMusig2Shachain derives a shachain producer for the taproot channel
// from normal shachain revocation root.
func DeriveMusig2Shachain(revRoot shachain.Producer) (shachain.Producer, error) { //nolint:lll
func DeriveMusig2Shachain(revRoot shachain.Producer) (shachain.Producer, error) { //nolint:ll
// In order to obtain the revocation root hash to create the taproot
// revocation, we'll encode the producer into a buffer, then use that
// to derive the shachain root needed.

View File

@ -55,7 +55,7 @@ type FailCode uint16
// The currently defined onion failure types within this current version of the
// Lightning protocol.
//
//nolint:lll
//nolint:ll
const (
CodeNone FailCode = 0
CodeInvalidRealm = FlagBadOnion | 1

View File

@ -118,7 +118,7 @@ var (
},
}
//nolint:lll
//nolint:ll
resultNew1Hop1 = &mcHop{
channelID: tlv.NewPrimitiveRecord[tlv.TlvType0, uint64](100),
pubKeyBytes: tlv.NewRecordT[tlv.TlvType1](testPub),
@ -128,14 +128,14 @@ var (
),
}
//nolint:lll
//nolint:ll
resultNew1Hop2 = &mcHop{
channelID: tlv.NewPrimitiveRecord[tlv.TlvType0, uint64](800),
pubKeyBytes: tlv.NewRecordT[tlv.TlvType1](testPub),
amtToFwd: tlv.NewPrimitiveRecord[tlv.TlvType2, lnwire.MilliSatoshi](4),
}
//nolint:lll
//nolint:ll
resultNew1Hop3 = &mcHop{
channelID: tlv.NewPrimitiveRecord[tlv.TlvType0, uint64](800),
pubKeyBytes: tlv.NewRecordT[tlv.TlvType1](testPub),
@ -145,7 +145,7 @@ var (
),
}
//nolint:lll
//nolint:ll
resultNew1Hop4 = &mcHop{
channelID: tlv.NewPrimitiveRecord[tlv.TlvType0, uint64](800),
pubKeyBytes: tlv.NewRecordT[tlv.TlvType1](testPub),
@ -158,7 +158,7 @@ var (
),
}
//nolint:lll
//nolint:ll
resultNew2Hop1 = &mcHop{
channelID: tlv.NewPrimitiveRecord[tlv.TlvType0, uint64](800),
pubKeyBytes: tlv.NewRecordT[tlv.TlvType1](testPub),
@ -171,7 +171,7 @@ var (
),
}
//nolint:lll
//nolint:ll
resultNew1 = paymentResultNew{
id: 0,
timeFwd: tlv.NewPrimitiveRecord[tlv.TlvType0](
@ -200,7 +200,7 @@ var (
}),
}
//nolint:lll
//nolint:ll
resultNew2 = paymentResultNew{
id: 2,
timeFwd: tlv.NewPrimitiveRecord[tlv.TlvType0, uint64](

View File

@ -380,7 +380,7 @@ func (p *PaymentControl) RegisterAttempt(paymentHash lntypes.Hash,
if attempt.Route.FinalHop().TotalAmtMsat !=
h.Route.FinalHop().TotalAmtMsat {
//nolint:lll
//nolint:ll
return ErrBlindedPaymentTotalAmountMismatch
}

View File

@ -169,7 +169,7 @@ func estimateFeeRate(ctx *cli.Context) error {
SatPerKw int64 `json:"sat_per_kw"`
SatPerVByte int64 `json:"sat_per_vbyte"`
MinRelayFeeSatPerKw int64 `json:"min_relay_fee_sat_per_kw"`
//nolint:lll
//nolint:ll
MinRelayFeeSatPerVByte int64 `json:"min_relay_fee_sat_per_vbyte"`
}{
SatPerKw: int64(rateKW),

View File

@ -290,7 +290,7 @@ var (
// See LoadConfig for further details regarding the configuration
// loading+parsing process.
//
//nolint:lll
//nolint:ll
type Config struct {
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
@ -523,7 +523,7 @@ type Config struct {
// for more details. Any value of 0 means we use the gRPC internal default
// values.
//
//nolint:lll
//nolint:ll
type GRPCConfig struct {
// ServerPingTime is a duration for the amount of time of no activity
// after which the server pings the client to see if the transport is
@ -549,7 +549,7 @@ type GRPCConfig struct {
// DefaultConfig returns all default values for the Config struct.
//
//nolint:lll
//nolint:ll
func DefaultConfig() Config {
return Config{
LndDir: DefaultLndDir,

View File

@ -1372,7 +1372,7 @@ func newRetributionInfo(chanPoint *wire.OutPoint,
// For taproot outputs, we also need to hold onto the second
// level tap tweak as well.
//nolint:lll
//nolint:ll
htlcOutput.secondLevelTapTweak = breachedHtlc.SecondLevelTapTweak
breachedOutputs = append(breachedOutputs, htlcOutput)
@ -1729,7 +1729,7 @@ func taprootBriefcaseFromRetInfo(retInfo *retributionInfo) *taprootBriefcase {
// For spending from our commitment output on the remote
// commitment, we'll need to stash the control block.
case input.TaprootRemoteCommitSpend:
//nolint:lll
//nolint:ll
tapCase.CtrlBlocks.Val.CommitSweepCtrlBlock = bo.signDesc.ControlBlock
bo.resolutionBlob.WhenSome(func(blob tlv.Blob) {
@ -1743,7 +1743,7 @@ func taprootBriefcaseFromRetInfo(retInfo *retributionInfo) *taprootBriefcase {
// To spend the revoked output again, we'll store the same
// control block value as above, but in a different place.
case input.TaprootCommitmentRevoke:
//nolint:lll
//nolint:ll
tapCase.CtrlBlocks.Val.RevokeSweepCtrlBlock = bo.signDesc.ControlBlock
bo.resolutionBlob.WhenSome(func(blob tlv.Blob) {
@ -1765,10 +1765,10 @@ func taprootBriefcaseFromRetInfo(retInfo *retributionInfo) *taprootBriefcase {
copy(firstLevelTweak[:], bo.signDesc.TapTweak)
secondLevelTweak := bo.secondLevelTapTweak
//nolint:lll
//nolint:ll
tapCase.TapTweaks.Val.BreachedHtlcTweaks[resID] = firstLevelTweak
//nolint:lll
//nolint:ll
tapCase.TapTweaks.Val.BreachedSecondLevelHltcTweaks[resID] = secondLevelTweak
}
}
@ -1788,7 +1788,7 @@ func applyTaprootRetInfo(tapCase *taprootBriefcase,
// For spending from our commitment output on the remote
// commitment, we'll apply the control block.
case input.TaprootRemoteCommitSpend:
//nolint:lll
//nolint:ll
bo.signDesc.ControlBlock = tapCase.CtrlBlocks.Val.CommitSweepCtrlBlock
tapCase.SettledCommitBlob.WhenSomeV(
@ -1800,7 +1800,7 @@ func applyTaprootRetInfo(tapCase *taprootBriefcase,
// To spend the revoked output again, we'll apply the same
// control block value as above, but to a different place.
case input.TaprootCommitmentRevoke:
//nolint:lll
//nolint:ll
bo.signDesc.ControlBlock = tapCase.CtrlBlocks.Val.RevokeSweepCtrlBlock
tapCase.BreachedCommitBlob.WhenSomeV(
@ -1816,7 +1816,7 @@ func applyTaprootRetInfo(tapCase *taprootBriefcase,
case input.TaprootHtlcOfferedRevoke:
resID := newResolverID(bo.OutPoint())
//nolint:lll
//nolint:ll
tap1, ok := tapCase.TapTweaks.Val.BreachedHtlcTweaks[resID]
if !ok {
return fmt.Errorf("unable to find taproot "+
@ -1824,7 +1824,7 @@ func applyTaprootRetInfo(tapCase *taprootBriefcase,
}
bo.signDesc.TapTweak = tap1[:]
//nolint:lll
//nolint:ll
tap2, ok := tapCase.TapTweaks.Val.BreachedSecondLevelHltcTweaks[resID]
if !ok {
return fmt.Errorf("unable to find taproot "+

View File

@ -1561,7 +1561,7 @@ func encodeTaprootAuxData(w io.Writer, c *ContractResolutions) error {
if c.CommitResolution != nil {
commitResolution := c.CommitResolution
commitSignDesc := commitResolution.SelfOutputSignDesc
//nolint:lll
//nolint:ll
tapCase.CtrlBlocks.Val.CommitSweepCtrlBlock = commitSignDesc.ControlBlock
c.CommitResolution.ResolutionBlob.WhenSome(func(b []byte) {
@ -1587,21 +1587,21 @@ func encodeTaprootAuxData(w io.Writer, c *ContractResolutions) error {
resID = newResolverID(
htlc.SignedSuccessTx.TxIn[0].PreviousOutPoint,
)
//nolint:lll
//nolint:ll
tapCase.CtrlBlocks.Val.SecondLevelCtrlBlocks[resID] = ctrlBlock
// For HTLCs we need to go to the second level for, we
// also need to store the control block needed to
// publish the second level transaction.
if htlc.SignDetails != nil {
//nolint:lll
//nolint:ll
bridgeCtrlBlock := htlc.SignDetails.SignDesc.ControlBlock
//nolint:lll
//nolint:ll
tapCase.CtrlBlocks.Val.IncomingHtlcCtrlBlocks[resID] = bridgeCtrlBlock
}
} else {
resID = newResolverID(htlc.ClaimOutpoint)
//nolint:lll
//nolint:ll
tapCase.CtrlBlocks.Val.IncomingHtlcCtrlBlocks[resID] = ctrlBlock
}
@ -1624,23 +1624,23 @@ func encodeTaprootAuxData(w io.Writer, c *ContractResolutions) error {
resID = newResolverID(
htlc.SignedTimeoutTx.TxIn[0].PreviousOutPoint,
)
//nolint:lll
//nolint:ll
tapCase.CtrlBlocks.Val.SecondLevelCtrlBlocks[resID] = ctrlBlock
// For HTLCs we need to go to the second level for, we
// also need to store the control block needed to
// publish the second level transaction.
//
//nolint:lll
//nolint:ll
if htlc.SignDetails != nil {
//nolint:lll
//nolint:ll
bridgeCtrlBlock := htlc.SignDetails.SignDesc.ControlBlock
//nolint:lll
//nolint:ll
tapCase.CtrlBlocks.Val.OutgoingHtlcCtrlBlocks[resID] = bridgeCtrlBlock
}
} else {
resID = newResolverID(htlc.ClaimOutpoint)
//nolint:lll
//nolint:ll
tapCase.CtrlBlocks.Val.OutgoingHtlcCtrlBlocks[resID] = ctrlBlock
}
@ -1689,11 +1689,11 @@ func decodeTapRootAuxData(r io.Reader, c *ContractResolutions) error {
htlc.SignedSuccessTx.TxIn[0].PreviousOutPoint,
)
//nolint:lll
//nolint:ll
ctrlBlock := tapCase.CtrlBlocks.Val.SecondLevelCtrlBlocks[resID]
htlc.SweepSignDesc.ControlBlock = ctrlBlock
//nolint:lll
//nolint:ll
if htlc.SignDetails != nil {
bridgeCtrlBlock := tapCase.CtrlBlocks.Val.IncomingHtlcCtrlBlocks[resID]
htlc.SignDetails.SignDesc.ControlBlock = bridgeCtrlBlock
@ -1701,7 +1701,7 @@ func decodeTapRootAuxData(r io.Reader, c *ContractResolutions) error {
} else {
resID = newResolverID(htlc.ClaimOutpoint)
//nolint:lll
//nolint:ll
ctrlBlock := tapCase.CtrlBlocks.Val.IncomingHtlcCtrlBlocks[resID]
htlc.SweepSignDesc.ControlBlock = ctrlBlock
}
@ -1722,11 +1722,11 @@ func decodeTapRootAuxData(r io.Reader, c *ContractResolutions) error {
htlc.SignedTimeoutTx.TxIn[0].PreviousOutPoint,
)
//nolint:lll
//nolint:ll
ctrlBlock := tapCase.CtrlBlocks.Val.SecondLevelCtrlBlocks[resID]
htlc.SweepSignDesc.ControlBlock = ctrlBlock
//nolint:lll
//nolint:ll
if htlc.SignDetails != nil {
bridgeCtrlBlock := tapCase.CtrlBlocks.Val.OutgoingHtlcCtrlBlocks[resID]
htlc.SignDetails.SignDesc.ControlBlock = bridgeCtrlBlock
@ -1734,7 +1734,7 @@ func decodeTapRootAuxData(r io.Reader, c *ContractResolutions) error {
} else {
resID = newResolverID(htlc.ClaimOutpoint)
//nolint:lll
//nolint:ll
ctrlBlock := tapCase.CtrlBlocks.Val.OutgoingHtlcCtrlBlocks[resID]
htlc.SweepSignDesc.ControlBlock = ctrlBlock
}

View File

@ -639,7 +639,7 @@ func (c *ChainArbitrator) Start() error {
// corresponding more restricted resolver, as we don't have to watch
// the chain any longer, only resolve the contracts on the confirmed
// commitment.
//nolint:lll
//nolint:ll
for _, closeChanInfo := range closingChannels {
// We can leave off the CloseContract and ForceCloseChan
// methods as the channel is already closed at this point.

View File

@ -431,7 +431,7 @@ func (c *chainWatcher) handleUnknownLocalState(
auxResult, err := fn.MapOptionZ(
c.cfg.auxLeafStore,
//nolint:lll
//nolint:ll
func(s lnwallet.AuxLeafStore) fn.Result[lnwallet.CommitDiffAuxResult] {
return s.FetchLeavesFromCommit(
lnwallet.NewAuxChanState(c.cfg.chanState),

View File

@ -593,11 +593,11 @@ func maybeAugmentTaprootResolvers(chanType channeldb.ChannelType,
switch r := resolver.(type) {
case *commitSweepResolver:
if contractResolutions.CommitResolution != nil {
//nolint:lll
//nolint:ll
r.commitResolution = *contractResolutions.CommitResolution
}
case *htlcOutgoingContestResolver:
//nolint:lll
//nolint:ll
htlcResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
for _, htlcRes := range htlcResolutions {
htlcRes := htlcRes
@ -610,7 +610,7 @@ func maybeAugmentTaprootResolvers(chanType channeldb.ChannelType,
}
case *htlcTimeoutResolver:
//nolint:lll
//nolint:ll
htlcResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
for _, htlcRes := range htlcResolutions {
htlcRes := htlcRes
@ -623,7 +623,7 @@ func maybeAugmentTaprootResolvers(chanType channeldb.ChannelType,
}
case *htlcIncomingContestResolver:
//nolint:lll
//nolint:ll
htlcResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
for _, htlcRes := range htlcResolutions {
htlcRes := htlcRes
@ -635,7 +635,7 @@ func maybeAugmentTaprootResolvers(chanType channeldb.ChannelType,
}
}
case *htlcSuccessResolver:
//nolint:lll
//nolint:ll
htlcResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
for _, htlcRes := range htlcResolutions {
htlcRes := htlcRes

View File

@ -694,7 +694,7 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) {
// Now notify about the local force close getting confirmed.
//
//nolint:lll
//nolint:ll
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
SpendDetail: &chainntnfs.SpendDetail{},
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
@ -991,7 +991,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
},
}
//nolint:lll
//nolint:ll
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
SpendDetail: &chainntnfs.SpendDetail{},
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
@ -1620,7 +1620,7 @@ func TestChannelArbitratorCommitFailure(t *testing.T) {
},
{
closeType: channeldb.LocalForceClose,
//nolint:lll
//nolint:ll
sendEvent: func(chanArb *ChannelArbitrator) {
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
SpendDetail: &chainntnfs.SpendDetail{},
@ -1957,7 +1957,7 @@ func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
// resolutions sent since we have none on our
// commitment transaction.
//
//nolint:lll
//nolint:ll
uniCloseInfo := &LocalUnilateralCloseInfo{
SpendDetail: &chainntnfs.SpendDetail{},
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
@ -2884,7 +2884,7 @@ func TestChannelArbitratorAnchors(t *testing.T) {
},
}
//nolint:lll
//nolint:ll
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
SpendDetail: &chainntnfs.SpendDetail{},
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{

View File

@ -28,7 +28,7 @@ const (
// BudgetConfig is a struct that holds the configuration when offering outputs
// to the sweeper.
//
//nolint:lll
//nolint:ll
type BudgetConfig struct {
ToLocal btcutil.Amount `long:"tolocal" description:"The amount in satoshis to allocate as the budget to pay fees when sweeping the to_local output. If set, the budget calculated using the ratio (if set) will be capped at this value."`
ToLocalRatio float64 `long:"tolocalratio" description:"The ratio of the value in to_local output to allocate as the budget to pay fees when sweeping it."`

View File

@ -220,7 +220,7 @@ func (h *htlcIncomingContestResolver) Resolve(
//
// So we'll insert it at the 3rd index of the witness.
case isTaproot:
//nolint:lll
//nolint:ll
h.htlcResolution.SignedSuccessTx.TxIn[0].Witness[2] = preimage[:]
// Within the witness for the success transaction, the

View File

@ -242,7 +242,7 @@ func (h *htlcSuccessResolver) broadcastReSignedSuccessTx(immediate bool) (
if !h.outputIncubating {
var secondLevelInput input.HtlcSecondLevelAnchorInput
if isTaproot {
//nolint:lll
//nolint:ll
secondLevelInput = input.MakeHtlcSecondLevelSuccessTaprootInput(
h.htlcResolution.SignedSuccessTx,
h.htlcResolution.SignDetails, h.htlcResolution.Preimage,
@ -252,7 +252,7 @@ func (h *htlcSuccessResolver) broadcastReSignedSuccessTx(immediate bool) (
),
)
} else {
//nolint:lll
//nolint:ll
secondLevelInput = input.MakeHtlcSecondLevelSuccessAnchorInput(
h.htlcResolution.SignedSuccessTx,
h.htlcResolution.SignDetails, h.htlcResolution.Preimage,

View File

@ -178,7 +178,7 @@ func (h *htlcTimeoutResolver) claimCleanUp(
// - <sender sig> <receiver sig> <preimage> <success_script>
// <control_block>
case h.isTaproot() && h.htlcResolution.SignedTimeoutTx == nil:
//nolint:lll
//nolint:ll
preimageBytes = spendingInput.Witness[taprootRemotePreimageIndex]
// The witness stack when the remote party sweeps the output on a
@ -269,7 +269,7 @@ func (h *htlcTimeoutResolver) chainDetailsToWatch() (*wire.OutPoint, []byte, err
// witness script (the last element of the witness stack) to
// re-construct the pkScript we need to watch.
//
//nolint:lll
//nolint:ll
outPointToWatch := h.htlcResolution.SignedTimeoutTx.TxIn[0].PreviousOutPoint
witness := h.htlcResolution.SignedTimeoutTx.TxIn[0].Witness
@ -825,7 +825,7 @@ func (h *htlcTimeoutResolver) handleCommitSpend(
var csvWitnessType input.StandardWitnessType
if h.isTaproot() {
//nolint:lll
//nolint:ll
csvWitnessType = input.TaprootHtlcOfferedTimeoutSecondLevel
} else {
csvWitnessType = input.HtlcOfferedTimeoutSecondLevel

View File

@ -290,7 +290,7 @@ func testHtlcTimeoutResolver(t *testing.T, testCase htlcTimeoutTestCase) {
resolutionChan := make(chan ResolutionMsg, 1)
reportChan := make(chan *channeldb.ResolverReport)
//nolint:lll
//nolint:ll
chainCfg := ChannelArbitratorConfig{
ChainArbitratorConfig: ChainArbitratorConfig{
Notifier: notifier,
@ -371,10 +371,10 @@ func testHtlcTimeoutResolver(t *testing.T, testCase htlcTimeoutTestCase) {
if testCase.timeout {
timeoutTxID := timeoutTx.TxHash()
report := &channeldb.ResolverReport{
OutPoint: timeoutTx.TxIn[0].PreviousOutPoint, //nolint:lll
OutPoint: timeoutTx.TxIn[0].PreviousOutPoint, //nolint:ll
Amount: testHtlcAmt.ToSatoshis(),
ResolverType: channeldb.ResolverTypeOutgoingHtlc, //nolint:lll
ResolverOutcome: channeldb.ResolverOutcomeFirstStage, //nolint:lll
ResolverType: channeldb.ResolverTypeOutgoingHtlc, //nolint:ll
ResolverOutcome: channeldb.ResolverOutcomeFirstStage, //nolint:ll
SpendTxID: &timeoutTxID,
}

View File

@ -555,7 +555,7 @@ func (u *UtxoNursery) NurseryReport(
// confirmation of the commitment transaction.
switch kid.WitnessType() {
//nolint:lll
//nolint:ll
case input.TaprootHtlcAcceptedSuccessSecondLevel:
fallthrough
case input.HtlcAcceptedSuccessSecondLevel:
@ -590,7 +590,7 @@ func (u *UtxoNursery) NurseryReport(
// it.
report.AddLimboDirectHtlc(&kid)
//nolint:lll
//nolint:ll
case input.TaprootHtlcAcceptedSuccessSecondLevel:
fallthrough
case input.TaprootHtlcOfferedTimeoutSecondLevel:
@ -611,7 +611,7 @@ func (u *UtxoNursery) NurseryReport(
// balance.
switch kid.WitnessType() {
//nolint:lll
//nolint:ll
case input.TaprootHtlcAcceptedSuccessSecondLevel:
fallthrough
case input.TaprootHtlcOfferedTimeoutSecondLevel:

View File

@ -1461,7 +1461,7 @@ func TestSignatureAnnouncementRetryAtStartup(t *testing.T) {
return lnwire.ShortChannelID{}, fmt.Errorf("no peer alias")
}
//nolint:lll
//nolint:ll
gossiper := New(Config{
Notifier: ctx.gossiper.cfg.Notifier,
Broadcast: ctx.gossiper.cfg.Broadcast,

View File

@ -178,6 +178,95 @@ be used for calls to formatting functions like `fmt.Errorf`,
But not for statements that are important for the flow or logic of the code,
like `require.NoErrorf()`.
#### Exceptions and additional styling for structured logging
When making use of structured logging calls (there are any `btclog.Logger`
methods ending in `S`), a few different rules and exceptions apply.
1) **Static messages:** Structured log calls take a `context.Context` as a first
parameter and a _static_ string as the second parameter (the `msg` parameter).
Formatted strings should ideally not be used for the construction of the `msg`
parameter. Instead, key-value pairs (or `slog` attributes) should be used to
provide additional variables to the log line.
**WRONG**
```go
log.DebugS(ctx, fmt.Sprintf("User %d just spent %.8f to open a channel", userID, 0.0154))
```
**RIGHT**
```go
log.InfoS(ctx, "Channel open performed",
slog.Int("user_id", userID),
btclog.Fmt("amount", "%.8f", 0.00154))
```
2) **Key-value attributes**: The third parameter in any structured log method is
a variadic list of the `any` type but it is required that these are provided in
key-value pairs such that an associated `slog.Attr` variable can be created for
each key-value pair. The simplest way to specify this is to directly pass in the
key-value pairs as raw literals as follows:
```go
log.InfoS(ctx, "Channel open performed", "user_id", userID, "amount", 0.00154)
```
This does work, but it becomes easy to make a mistake and accidentally leave out
a value for each key provided leading to a nonsensical log line. To avoid this,
it is suggested to make use of the various `slog.Attr` helper functions as
follows:
```go
log.InfoS(ctx, "Channel open performed",
slog.Int("user_id", userID),
btclog.Fmt("amount", "%.8f", 0.00154))
```
3) **Line wrapping**: Structured log lines are an exception to the 80-character
line wrapping rule. This is so that the key-value pairs can be easily read and
reasoned about. If it is the case that there is only a single key-value pair
and the entire log line is still less than 80 characters, it is acceptable to
have the key-value pair on the same line as the log message. However, if there
are multiple key-value pairs, it is suggested to use the one line per key-value
pair format. Due to this suggestion, it is acceptable for any single key-value
pair line to exceed 80 characters for the sake of readability.
**WRONG**
```go
// Example 1.
log.InfoS(ctx, "User connected",
"user_id", userID)
// Example 2.
log.InfoS(ctx, "Channel open performed", "user_id", userID,
btclog.Fmt("amount", "%.8f", 0.00154), "channel_id", channelID)
// Example 3.
log.InfoS(ctx, "Bytes received",
"user_id", userID,
btclog.Hex("peer_id", peerID.SerializeCompressed()),
btclog.Hex("message", []bytes{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
})))
```
**RIGHT**
```go
// Example 1.
log.InfoS(ctx, "User connected", "user_id", userID)
// Example 2.
log.InfoS(ctx, "Channel open performed",
slog.Int("user_id", userID),
btclog.Fmt("amount", "%.8f", 0.00154),
slog.String("channel_id", channelID))
// Example 3.
log.InfoS(ctx, "Bytes received",
"user_id", userID,
btclog.Hex("peer_id", peerID.SerializeCompressed()),
btclog.Hex("message", []bytes{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08})))
```
### Wrapping long function definitions
If one is forced to wrap lines of function arguments that exceed the

View File

@ -163,7 +163,13 @@
a log line. The options for this include "off" (default), "short" (source file
name and line number) and "long" (full path to source file and line number).
Finally, the new `--logging.console.style` option can be used under the `dev`
build tag to add styling to console logging.
build tag to add styling to console logging.
* [Start adding a commit hash fingerprint to log lines by
default](https://github.com/lightningnetwork/lnd/pull/9314). This can be
disabled with the new `--logging.no-commit-hash"` option. Note that this extra
info will currently only appear in a few log lines, but more will be added in
future as the structured logging change is propagated throughout LND.
* [Add max files and max file size](https://github.com/lightningnetwork/lnd/pull/9233)
options to the `logging` config namespace under new `--logging.file.max-files`

View File

@ -230,7 +230,7 @@ func (b *Batcher) BatchFund(ctx context.Context,
err)
}
//nolint:lll
//nolint:ll
fundingReq, err := b.cfg.RequestParser(&lnrpc.OpenChannelRequest{
SatPerVbyte: uint64(req.SatPerVbyte),
TargetConf: req.TargetConf,

View File

@ -39,7 +39,7 @@ func TestCommitmentTypeNegotiation(t *testing.T) {
lnwire.StaticRemoteKeyOptional,
lnwire.AnchorsZeroFeeHtlcTxOptional,
),
//nolint:lll
//nolint:ll
expectsCommitType: lnwallet.CommitmentTypeAnchorsZeroFeeHtlcTx,
expectsChanType: nil,
expectsErr: nil,

View File

@ -4028,7 +4028,7 @@ func (f *Manager) handleChannelReady(peer lnpeer.Peer, //nolint:funlen
channelReadyMsg.AliasScid = &alias
if firstVerNonce != nil {
channelReadyMsg.NextLocalNonce = lnwire.SomeMusig2Nonce( //nolint:lll
channelReadyMsg.NextLocalNonce = lnwire.SomeMusig2Nonce( //nolint:ll
firstVerNonce.PubNonce,
)
}

View File

@ -428,7 +428,7 @@ func (b *Builder) syncGraphWithChain() error {
// pruning the channel graph with each new block that hasn't yet been
// consumed by the channel graph.
var spentOutputs []*wire.OutPoint
for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ { //nolint:lll
for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ { //nolint:ll
// Break out of the rescan early if a shutdown has been
// requested, otherwise long rescans will block the daemon from
// shutting down promptly.
@ -1236,7 +1236,7 @@ func (b *Builder) processUpdate(msg interface{},
b.cfg.Chain, &channelID, b.quit,
)
if err != nil {
//nolint:lll
//nolint:ll
//
// In order to ensure we don't erroneously mark a
// channel as a zombie due to an RPC failure, we'll

View File

@ -1658,7 +1658,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) (
var keys [][]byte
cursor := edgeIndex.ReadWriteCursor()
//nolint:lll
//nolint:ll
for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
bytes.Compare(k, chanIDEnd[:]) < 0; k, v = cursor.Next() {
edgeInfoReader := bytes.NewReader(v)
@ -1705,7 +1705,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) (
// the keys in a second loop.
var pruneKeys [][]byte
pruneCursor := pruneBucket.ReadWriteCursor()
//nolint:lll
//nolint:ll
for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
pruneKeys = append(pruneKeys, k)
@ -2004,7 +2004,7 @@ func (c *ChannelGraph) ChanUpdatesInHorizon(startTime,
// the index collecting the info and policy of each update of
// each channel that has a last update within the time range.
//
//nolint:lll
//nolint:ll
for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
@ -2139,7 +2139,7 @@ func (c *ChannelGraph) NodeUpdatesInHorizon(startTime,
// the index collecting info for each node within the time
// range.
//
//nolint:lll
//nolint:ll
for indexKey, _ := updateCursor.Seek(startTimeBytes[:]); indexKey != nil &&
bytes.Compare(indexKey, endTimeBytes[:]) <= 0; indexKey, _ = updateCursor.Next() {
@ -2377,7 +2377,7 @@ func (c *ChannelGraph) FilterChannelRange(startHeight,
// We'll now iterate through the database, and find each
// channel ID that resides within the specified range.
//
//nolint:lll
//nolint:ll
for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
// Don't send alias SCIDs during gossip sync.
@ -3163,7 +3163,7 @@ func nodeTraversal(tx kvdb.RTx, nodePub []byte, db kvdb.Backend,
// as its prefix. This indicates that we've stepped over into
// another node's edges, so we can terminate our scan.
edgeCursor := edges.ReadCursor()
for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:lll
for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { //nolint:ll
// If the prefix still matches, the channel id is
// returned in nodeEdge. Channel id is used to lookup
// the node at the other end of the channel and both

View File

@ -1124,7 +1124,7 @@ func TestGraphTraversalCacheable(t *testing.T) {
tx, func(tx kvdb.RTx,
info *models.ChannelEdgeInfo,
policy *models.ChannelEdgePolicy,
policy2 *models.ChannelEdgePolicy) error { //nolint:lll
policy2 *models.ChannelEdgePolicy) error { //nolint:ll
delete(chanIndex, info.ChannelID)
return nil
@ -2584,7 +2584,7 @@ func TestFilterChannelRange(t *testing.T) {
)
require.NoError(t, err)
expRes := channelRanges[test.expStartIndex:test.expEndIndex] //nolint:lll
expRes := channelRanges[test.expStartIndex:test.expEndIndex] //nolint:ll
if len(expRes) == 0 {
require.Nil(t, resp)
@ -2598,7 +2598,7 @@ func TestFilterChannelRange(t *testing.T) {
)
require.NoError(t, err)
expRes = channelRangesWithTimestamps[test.expStartIndex:test.expEndIndex] //nolint:lll
expRes = channelRangesWithTimestamps[test.expStartIndex:test.expEndIndex] //nolint:ll
if len(expRes) == 0 {
require.Nil(t, resp)
@ -3898,7 +3898,7 @@ func BenchmarkForEachChannel(b *testing.B) {
cb := func(tx kvdb.RTx,
info *models.ChannelEdgeInfo,
policy *models.ChannelEdgePolicy,
policy2 *models.ChannelEdgePolicy) error { //nolint:lll
policy2 *models.ChannelEdgePolicy) error { //nolint:ll
// We need to do something with
// the data here, otherwise the

View File

@ -377,7 +377,7 @@ func peelBlindedPathDummyHop(r *sphinxHopIterator, cltvExpiryDelta uint32,
r.router, onionPkt, sphinxPacket, BlindingKit{
Processor: r.router,
UpdateAddBlinding: tlv.SomeRecordT(
tlv.NewPrimitiveRecord[lnwire.BlindingPointTlvType]( //nolint:lll
tlv.NewPrimitiveRecord[lnwire.BlindingPointTlvType]( //nolint:ll
nextEph.Val,
),
),
@ -606,7 +606,7 @@ func (b *BlindingKit) getBlindingPoint(payloadBlinding *btcec.PublicKey) (
//
// ceil(a/b) = (a + b - 1)/(b).
//
//nolint:lll,dupword
//nolint:ll,dupword
func calculateForwardingAmount(incomingAmount, baseFee lnwire.MilliSatoshi,
proportionalFee uint32) (lnwire.MilliSatoshi, error) {

View File

@ -281,7 +281,7 @@ func TestParseAndValidateRecipientData(t *testing.T) {
if testCase.updateAddBlinding != nil {
kit.UpdateAddBlinding = tlv.SomeRecordT(
//nolint:lll
//nolint:ll
tlv.NewPrimitiveRecord[lnwire.BlindingPointTlvType](testCase.updateAddBlinding),
)
}

View File

@ -15,7 +15,7 @@ import (
)
var (
//nolint:lll
//nolint:ll
testPrivKeyBytes, _ = hex.DecodeString("e126f68f7eafcc8b74f54d269fe206be715000f94dac067d1c04a8ca3b2db734")
_, testPubKey = btcec.PrivKeyFromBytes(testPrivKeyBytes)
)

View File

@ -942,7 +942,7 @@ func (l *channelLink) syncChanStates() error {
// very same nonce that we sent above, as they should
// take the latest verification nonce we send.
if chanState.ChanType.IsTaproot() {
//nolint:lll
//nolint:ll
channelReadyMsg.NextLocalNonce = localChanSyncMsg.LocalNonce
}
@ -3724,7 +3724,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg) {
// parse the payload we have no way of knowing whether
// we were the introduction node or not.
//
//nolint:lll
//nolint:ll
obfuscator, failCode := chanIterator.ExtractErrorEncrypter(
l.cfg.ExtractErrorEncrypter,
// We need our route role here because we
@ -3885,7 +3885,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg) {
inboundFee := l.cfg.FwrdingPolicy.InboundFee
//nolint:lll
//nolint:ll
updatePacket := &htlcPacket{
incomingChanID: l.ShortChanID(),
incomingHTLCID: add.ID,
@ -3936,7 +3936,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg) {
l.log.Errorf("unable to encode the "+
"remaining route %v", err)
cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage { //nolint:lll
cb := func(upd *lnwire.ChannelUpdate1) lnwire.FailureMessage { //nolint:ll
return lnwire.NewTemporaryChannelFailure(upd)
}
@ -3962,7 +3962,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg) {
if fwdPkg.State == channeldb.FwdStateLockedIn {
inboundFee := l.cfg.FwrdingPolicy.InboundFee
//nolint:lll
//nolint:ll
updatePacket := &htlcPacket{
incomingChanID: l.ShortChanID(),
incomingHTLCID: add.ID,

View File

@ -4889,7 +4889,7 @@ func (h *persistentLinkHarness) restartLink(
// the firing via force feeding.
bticker := ticker.NewForce(time.Hour)
//nolint:lll
//nolint:ll
aliceCfg := ChannelLinkConfig{
FwrdingPolicy: globalPolicy,
Peer: alicePeer,

View File

@ -1137,7 +1137,7 @@ func (h *hopNetwork) createChannelLink(server, peer *mockServer,
return server.htlcSwitch.ForwardPackets(linkQuit, packets...)
}
//nolint:lll
//nolint:ll
link := NewChannelLink(
ChannelLinkConfig{
BestHeight: server.htlcSwitch.BestHeight,

View File

@ -699,14 +699,14 @@ const (
// - number_of_witness_elements: 1 byte
// - sig_len: 1 byte
// - sweep_sig: 65 bytes (worst case w/o sighash default)
//nolint:lll
//nolint:ll
TaprootSecondLevelRevokeWitnessSize = TaprootKeyPathCustomSighashWitnessSize
// TaprootAcceptedRevokeWitnessSize:
// - number_of_witness_elements: 1 byte
// - sig_len: 1 byte
// - sweep_sig: 65 bytes (worst case w/o sighash default)
//nolint:lll
//nolint:ll
TaprootAcceptedRevokeWitnessSize = TaprootKeyPathCustomSighashWitnessSize
// TaprootOfferedRevokeWitnessSize:

View File

@ -861,11 +861,11 @@ var witnessSizeTests = []witnessSizeTest{
KeyDesc: keychain.KeyDescriptor{
PubKey: testKey.PubKey(),
},
//nolint:lll
//nolint:ll
WitnessScript: commitScriptTree.SettleLeaf.Script,
HashType: txscript.SigHashAll,
InputIndex: 0,
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:lll
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:ll
}
witness, err := input.TaprootCommitSpendSuccess(
@ -895,11 +895,11 @@ var witnessSizeTests = []witnessSizeTest{
KeyDesc: keychain.KeyDescriptor{
PubKey: testKey.PubKey(),
},
//nolint:lll
//nolint:ll
WitnessScript: commitScriptTree.RevocationLeaf.Script,
HashType: txscript.SigHashAll,
InputIndex: 0,
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:lll
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:ll
}
witness, err := input.TaprootCommitSpendRevoke(
@ -919,7 +919,7 @@ var witnessSizeTests = []witnessSizeTest{
require.NoError(t, err)
signer := &dummySigner{}
//nolint:lll
//nolint:ll
commitScriptTree, err := input.NewRemoteCommitScriptTree(
testKey.PubKey(), input.NoneTapLeaf(),
)
@ -929,11 +929,11 @@ var witnessSizeTests = []witnessSizeTest{
KeyDesc: keychain.KeyDescriptor{
PubKey: testKey.PubKey(),
},
//nolint:lll
//nolint:ll
WitnessScript: commitScriptTree.SettleLeaf.Script,
HashType: txscript.SigHashAll,
InputIndex: 0,
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:lll
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:ll
}
witness, err := input.TaprootCommitRemoteSpend(
@ -1174,7 +1174,7 @@ var witnessSizeTests = []witnessSizeTest{
WitnessScript: timeoutLeaf.Script,
HashType: txscript.SigHashAll,
InputIndex: 0,
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:lll
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:ll
}
witness, err := input.ReceiverHTLCScriptTaprootTimeout(
@ -1222,7 +1222,7 @@ var witnessSizeTests = []witnessSizeTest{
WitnessScript: timeoutLeaf.Script,
HashType: txscript.SigHashAll,
InputIndex: 0,
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:lll
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:ll
}
receiverSig, err := signer.SignOutputRaw(
testTx, receiverDesc,
@ -1236,7 +1236,7 @@ var witnessSizeTests = []witnessSizeTest{
WitnessScript: timeoutLeaf.Script,
HashType: txscript.SigHashAll,
InputIndex: 0,
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:lll
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:ll
}
witness, err := input.SenderHTLCScriptTaprootTimeout(
@ -1283,7 +1283,7 @@ var witnessSizeTests = []witnessSizeTest{
WitnessScript: successLeaf.Script,
HashType: txscript.SigHashAll,
InputIndex: 0,
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:lll
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:ll
}
witness, err := input.SenderHTLCScriptTaprootRedeem(
@ -1329,7 +1329,7 @@ var witnessSizeTests = []witnessSizeTest{
WitnessScript: successsLeaf.Script,
HashType: txscript.SigHashAll,
InputIndex: 0,
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:lll
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:ll
}
senderSig, err := signer.SignOutputRaw(
testTx, senderDesc,
@ -1343,7 +1343,7 @@ var witnessSizeTests = []witnessSizeTest{
WitnessScript: successsLeaf.Script,
HashType: txscript.SigHashAll,
InputIndex: 0,
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:lll
SignMethod: input.TaprootScriptSpendSignMethod, //nolint:ll
}
witness, err := input.ReceiverHTLCScriptTaprootRedeem(

View File

@ -1841,7 +1841,7 @@ func (i *InvoiceRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) {
// copySingleClients copies i.SingleInvoiceSubscription inside a lock. This is
// useful when we need to iterate the map to send notifications.
func (i *InvoiceRegistry) copySingleClients() map[uint32]*SingleInvoiceSubscription { //nolint:lll
func (i *InvoiceRegistry) copySingleClients() map[uint32]*SingleInvoiceSubscription { //nolint:ll
i.notificationClientMux.RLock()
defer i.notificationClientMux.RUnlock()

View File

@ -1054,7 +1054,7 @@ func (s *sqlInvoiceUpdater) AddHtlc(circuitKey models.CircuitKey,
)
if err != nil {
mappedSQLErr := sqldb.MapSQLError(err)
var uniqueConstraintErr *sqldb.ErrSQLUniqueConstraintViolation //nolint:lll
var uniqueConstraintErr *sqldb.ErrSQLUniqueConstraintViolation //nolint:ll
if errors.As(mappedSQLErr, &uniqueConstraintErr) {
return ErrDuplicateSetID{
SetID: setID,

View File

@ -382,7 +382,7 @@ func testChannelBackupRestoreBasic(ht *lntest.HarnessTest) {
// create a new nodeRestorer that will restore
// using the on-disk channel.backup.
//
//nolint:lll
//nolint:ll
backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
MultiChanBackup: multi,
}

View File

@ -609,7 +609,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// We expect alice to have a timeout tx resolution with
// an amount equal to the payment amount.
//nolint:lll
//nolint:ll
aliceReports[outpoint.String()] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_OUTGOING_HTLC,
Outcome: lnrpc.ResolutionOutcome_FIRST_STAGE,
@ -622,7 +622,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// incoming htlc timeout which reflects the full amount
// of the htlc. It has no spend tx, because carol stops
// monitoring the htlc once it has timed out.
//nolint:lll
//nolint:ll
carolReports[outpoint.String()] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_INCOMING_HTLC,
Outcome: lnrpc.ResolutionOutcome_TIMEOUT,

View File

@ -19,7 +19,7 @@ var (
probeAmt = int64(probeAmount) * 1_000
failureReasonNone = lnrpc.PaymentFailureReason_FAILURE_REASON_NONE
failureReasonNoRoute = lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE //nolint:lll
failureReasonNoRoute = lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE //nolint:ll
)
const (

View File

@ -144,7 +144,7 @@ func testBasicChannelFunding(ht *lntest.HarnessTest) {
chansCommitType == lnrpc.CommitmentType_ANCHORS:
case expType == lnrpc.CommitmentType_STATIC_REMOTE_KEY &&
chansCommitType == lnrpc.CommitmentType_STATIC_REMOTE_KEY: //nolint:lll
chansCommitType == lnrpc.CommitmentType_STATIC_REMOTE_KEY: //nolint:ll
case expType == lnrpc.CommitmentType_LEGACY &&
chansCommitType == lnrpc.CommitmentType_LEGACY:

View File

@ -21,7 +21,7 @@ func testHoldInvoicePersistence(ht *lntest.HarnessTest) {
const (
chanAmt = btcutil.Amount(1000000)
numPayments = 10
reason = lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS //nolint:lll
reason = lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS //nolint:ll
)
// Create carol, and clean up when the test finishes.

View File

@ -114,7 +114,7 @@ func (h *holdSubscription) cancel(ht *lntest.HarnessTest) {
)
require.Equal(ht, lnrpc.Payment_FAILED, payUpdate.Status,
"expected payment failed")
require.Equal(ht, lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, //nolint:lll
require.Equal(ht, lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, //nolint:ll
payUpdate.FailureReason, "expected unknown details")
}

View File

@ -138,7 +138,7 @@ func testHtlcErrorPropagation(ht *lntest.HarnessTest) {
}
ht.SendPaymentAssertFail(
alice, sendReq,
lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, //nolint:lll
lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, //nolint:ll
)
ht.AssertLastHTLCError(
alice, lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS,
@ -207,7 +207,7 @@ func testHtlcErrorPropagation(ht *lntest.HarnessTest) {
}
ht.SendPaymentAssertFail(
alice, sendReq,
lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, //nolint:lll
lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, //nolint:ll
)
ht.AssertLastHTLCError(
alice, lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS,

View File

@ -1137,7 +1137,7 @@ func testSweepHTLCs(ht *lntest.HarnessTest) {
return incoming, outgoing
}
//nolint:lll
//nolint:ll
// For neutrino backend, we need to give it more time to sync the
// blocks. There's a potential bug we need to fix:
// 2024-04-18 23:36:07.046 [ERR] NTFN: unable to get missed blocks: starting height 487 is greater than ending height 486

View File

@ -4,7 +4,7 @@ import "fmt"
// Config holds etcd configuration alongside with configuration related to our higher level interface.
//
//nolint:lll
//nolint:ll
type Config struct {
Embedded bool `long:"embedded" description:"Use embedded etcd instance instead of the external one. Note: use for testing only."`

View File

@ -4,7 +4,7 @@ import "time"
// Config holds postgres configuration data.
//
//nolint:lll
//nolint:ll
type Config struct {
Dsn string `long:"dsn" description:"Database connection string."`
Timeout time.Duration `long:"timeout" description:"Database connection timeout. Set to zero to disable."`

View File

@ -4,7 +4,7 @@ import "time"
// Config holds sqlite configuration data.
//
//nolint:lll
//nolint:ll
type Config struct {
Timeout time.Duration `long:"timeout" description:"The time after which a database query should be timed out."`
BusyTimeout time.Duration `long:"busytimeout" description:"The maximum amount of time to wait for a database connection to become available for a query."`

View File

@ -2,7 +2,7 @@ package lncfg
// AutoPilot holds the configuration options for the daemon's autopilot.
//
//nolint:lll
//nolint:ll
type AutoPilot struct {
Active bool `long:"active" description:"If the autopilot agent should be active or not."`
Heuristic map[string]float64 `long:"heuristic" description:"Heuristic to activate, and the weight to give it during scoring."`

View File

@ -11,7 +11,7 @@ const (
// Bitcoind holds the configuration options for the daemon's connection to
// bitcoind.
//
//nolint:lll
//nolint:ll
type Bitcoind struct {
Dir string `long:"dir" description:"The base directory that contains the node's data, logs, configuration file, etc."`
ConfigPath string `long:"config" description:"Configuration filepath. If not set, will default to the default filename under 'dir'."`

View File

@ -2,7 +2,7 @@ package lncfg
// Btcd holds the configuration options for the daemon's connection to btcd.
//
//nolint:lll
//nolint:ll
type Btcd struct {
Dir string `long:"dir" description:"The base directory that contains the node's data, logs, configuration file, etc."`
RPCHost string `long:"rpchost" description:"The daemon's rpc listening address. If a port is omitted, then the default port for the selected chain parameters will be used."`

View File

@ -21,7 +21,7 @@ const (
// Caches holds the configuration for various caches within lnd.
//
//nolint:lll
//nolint:ll
type Caches struct {
// RejectCacheSize is the maximum number of entries stored in lnd's
// reject cache, which is used for efficiently rejecting gossip updates.

View File

@ -8,7 +8,7 @@ import (
// Chain holds the configuration options for the daemon's chain settings.
//
//nolint:lll
//nolint:ll
type Chain struct {
Active bool `long:"active" description:"DEPRECATED: If the chain should be active or not. This field is now ignored since only the Bitcoin chain is supported" hidden:"true"`
ChainDir string `long:"chaindir" description:"The directory to store the chain's data within."`

View File

@ -71,7 +71,7 @@ const (
// DB holds database configuration for LND.
//
//nolint:lll
//nolint:ll
type DB struct {
Backend string `long:"backend" description:"The selected database backend."`

View File

@ -19,7 +19,7 @@ func IsDevBuild() bool {
// DevConfig specifies configs used for integration tests. These configs can
// only be used in tests and must NOT be exported for production usage.
//
//nolint:lll
//nolint:ll
type DevConfig struct {
ProcessChannelReadyWait time.Duration `long:"processchannelreadywait" description:"Time to sleep before processing remote node's channel_ready message."`
ReservationTimeout time.Duration `long:"reservationtimeout" description:"The maximum time we keep a pending channel open flow in memory."`

View File

@ -12,7 +12,7 @@ const DefaultMaxUpdateTimeout = 20 * time.Minute
// Fee holds the configuration options for fee estimation.
//
//nolint:lll
//nolint:ll
type Fee struct {
URL string `long:"url" description:"Optional URL for external fee estimation. If no URL is specified, the method for fee estimation will depend on the chosen backend and network. Must be set for neutrino on mainnet."`
MinUpdateTimeout time.Duration `long:"min-update-timeout" description:"The minimum interval in which fees will be updated from the specified fee URL."`

View File

@ -7,7 +7,7 @@ import (
"github.com/lightningnetwork/lnd/routing/route"
)
//nolint:lll
//nolint:ll
type Gossip struct {
PinnedSyncersRaw []string `long:"pinned-syncers" description:"A set of peers that should always remain in an active sync state, which can be used to closely synchronize the routing tables of two nodes. The value should be a hex-encoded pubkey, the flag can be specified multiple times to add multiple peers. Connected peers matching this pubkey will remain active for the duration of the connection and not count towards the NumActiveSyncer count."`

View File

@ -23,7 +23,7 @@ var (
// HealthCheckConfig contains the configuration for the different health checks
// the lnd runs.
//
//nolint:lll
//nolint:ll
type HealthCheckConfig struct {
ChainCheck *CheckConfig `group:"chainbackend" namespace:"chainbackend"`

View File

@ -13,7 +13,7 @@ var (
MaxMailboxDeliveryTimeout = 2 * time.Minute
)
//nolint:lll
//nolint:ll
type Htlcswitch struct {
MailboxDeliveryTimeout time.Duration `long:"mailboxdeliverytimeout" description:"The timeout value when delivering HTLCs to a channel link. Setting this value too small will result in local payment failures if large number of payments are sent over a short period."`
}

View File

@ -36,7 +36,7 @@ const (
// Invoices holds the configuration options for invoices.
//
//nolint:lll
//nolint:ll
type Invoices struct {
HoldExpiryDelta uint32 `long:"holdexpirydelta" description:"The number of blocks before a hold invoice's htlc expires that the invoice should be canceled to prevent a force close. Force closes will not be prevented if this value is not greater than DefaultIncomingBroadcastDelta."`
}

View File

@ -6,7 +6,7 @@ package lncfg
// Prometheus is the set of configuration data that specifies the listening
// address of the Prometheus exporter.
//
//nolint:lll
//nolint:ll
type Prometheus struct {
// Listen is the listening address that we should use to allow the main
// Prometheus server to scrape our metrics.

View File

@ -5,7 +5,7 @@ import "time"
// Neutrino holds the configuration options for the daemon's connection to
// neutrino.
//
//nolint:lll
//nolint:ll
type Neutrino struct {
AddPeers []string `short:"a" long:"addpeer" description:"Add a peer to connect with at startup"`
ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"`

View File

@ -7,7 +7,7 @@ import (
// Pprof holds the configuration options for LND's built-in pprof server.
//
//nolint:lll
//nolint:ll
type Pprof struct {
CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"`

View File

@ -11,7 +11,7 @@ import (
// compatibility of protocol additions, while defaulting to the latest within
// lnd, or to enable experimental protocol changes.
//
//nolint:lll
//nolint:ll
type ProtocolOptions struct {
// LegacyProtocol is a sub-config that houses all the legacy protocol
// options. These are mostly used for integration tests as most modern

View File

@ -13,7 +13,7 @@ import (
//
// TODO(yy): delete this build flag to unify with `lncfg/protocol.go`.
//
//nolint:lll
//nolint:ll
type ProtocolOptions struct {
// LegacyProtocol is a sub-config that houses all the legacy protocol
// options. These are mostly used for integration tests as most modern

View File

@ -7,7 +7,7 @@ package lncfg
// are mostly used for integration tests as most modern nodes should always run
// with them on by default.
//
//nolint:lll
//nolint:ll
type LegacyProtocol struct {
// LegacyOnionFormat if set to true, then we won't signal
// TLVOnionPayloadOptional. As a result, nodes that include us in the

View File

@ -13,7 +13,7 @@ const (
// RemoteSigner holds the configuration options for a remote RPC signer.
//
//nolint:lll
//nolint:ll
type RemoteSigner struct {
Enable bool `long:"enable" description:"Use a remote signer for signing any on-chain related transactions or messages. Only recommended if local wallet is initialized as watch-only. Remote signer must use the same seed/root key as the local watch-only wallet but must have private keys."`
RPCHost string `long:"rpchost" description:"The remote signer's RPC host:port"`

View File

@ -4,7 +4,7 @@ import "fmt"
// Routing holds the configuration options for routing.
//
//nolint:lll
//nolint:ll
type Routing struct {
AssumeChannelValid bool `long:"assumechanvalid" description:"DEPRECATED: Skip checking channel spentness during graph validation. This speedup comes at the risk of using an unvalidated view of the network for routing. (default: false)" hidden:"true"`
@ -15,7 +15,7 @@ type Routing struct {
// BlindedPaths holds the configuration options for blinded path construction.
//
//nolint:lll
//nolint:ll
type BlindedPaths struct {
MinNumRealHops uint8 `long:"min-num-real-hops" description:"The minimum number of real hops to include in a blinded path. This doesn't include our node, so if the minimum is 1, then the path will contain at minimum our node along with an introduction node hop. If it is zero then the shortest path will use our node as an introduction node."`
NumHops uint8 `long:"num-hops" description:"The number of hops to include in a blinded path. This doesn't include our node, so if it is 1, then the path will contain our node along with an introduction node or dummy node hop. If paths shorter than NumHops is found, then they will be padded using dummy hops."`

View File

@ -16,7 +16,7 @@ const (
// RPCMiddleware holds the configuration for RPC interception middleware.
//
//nolint:lll
//nolint:ll
type RPCMiddleware struct {
Enable bool `long:"enable" description:"Enable the RPC middleware interceptor functionality."`
InterceptTimeout time.Duration `long:"intercepttimeout" description:"Time after which a RPC middleware intercept request will time out and return an error if it hasn't yet received a response."`

View File

@ -19,7 +19,7 @@ const (
MaxAllowedFeeRate = 10_000
)
//nolint:lll
//nolint:ll
type Sweeper struct {
BatchWindowDuration time.Duration `long:"batchwindowduration" description:"Duration of the sweep batch window. The sweep is held back during the batch window to allow more inputs to be added and thereby lower the fee per input." hidden:"true"`
MaxFeeRate chainfee.SatPerVByte `long:"maxfeerate" description:"Maximum fee rate in sat/vb that the sweeper is allowed to use when sweeping funds, the fee rate derived from budgets are capped at this value. Setting this value too low can result in transactions not being confirmed in time, causing HTLCs to expire hence potentially losing funds."`

View File

@ -2,7 +2,7 @@ package lncfg
// Tor holds the configuration options for the daemon's connection to tor.
//
//nolint:lll
//nolint:ll
type Tor struct {
Active bool `long:"active" description:"Allow outbound and inbound connections to be routed through Tor"`
SOCKS string `long:"socks" description:"The host:port that Tor's exposed SOCKS5 proxy is listening on"`

View File

@ -5,7 +5,7 @@ import "github.com/lightningnetwork/lnd/watchtower"
// Watchtower holds the daemon specific configuration parameters for running a
// watchtower that shares resources with the daemon.
//
//nolint:lll
//nolint:ll
type Watchtower struct {
Active bool `long:"active" description:"If the watchtower should be active or not"`

View File

@ -19,7 +19,7 @@ const (
// Workers exposes CLI configuration for turning resources consumed by worker
// pools.
//
//nolint:lll
//nolint:ll
type Workers struct {
// Read is the maximum number of concurrent read pool workers.
Read int `long:"read" description:"Maximum number of concurrent read pool workers. This number should be proportional to the number of peers."`

View File

@ -9,7 +9,7 @@ import (
// WtClient holds the configuration options for the daemon's watchtower client.
//
//nolint:lll
//nolint:ll
type WtClient struct {
// Active determines whether a watchtower client should be created to
// back up channel states with registered watchtowers.

190
lnd.go
View File

@ -8,6 +8,7 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
"net/http/pprof"
@ -148,23 +149,45 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
interceptor signal.Interceptor) error {
defer func() {
ltndLog.Info("Shutdown complete\n")
ltndLog.Info("Shutdown complete")
err := cfg.LogRotator.Close()
if err != nil {
ltndLog.Errorf("Could not close log rotator: %v", err)
}
}()
mkErr := func(format string, args ...interface{}) error {
ltndLog.Errorf("Shutting down because error in main "+
"method: "+format, args...)
return fmt.Errorf(format, args...)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, err := build.WithBuildInfo(ctx, cfg.LogConfig)
if err != nil {
return fmt.Errorf("unable to add build info to context: %w",
err)
}
mkErr := func(msg string, err error, attrs ...any) error {
ltndLog.ErrorS(ctx, "Shutting down due to error in main "+
"method", err, attrs...)
var (
params = []any{err}
fmtStr = msg + ": %w"
)
for _, attr := range attrs {
fmtStr += " %s"
params = append(params, attr)
}
return fmt.Errorf(fmtStr, params...)
}
// Show version at startup.
ltndLog.Infof("Version: %s commit=%s, build=%s, logging=%s, "+
"debuglevel=%s", build.Version(), build.Commit,
build.Deployment, build.LoggingType, cfg.DebugLevel)
ltndLog.InfoS(ctx, "Version Info",
slog.String("version", build.Version()),
slog.String("commit", build.Commit),
slog.Any("debuglevel", build.Deployment),
slog.String("logging", cfg.DebugLevel))
var network string
switch {
@ -184,13 +207,9 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
network = "signet"
}
ltndLog.Infof("Active chain: %v (network=%v)",
strings.Title(BitcoinChainName), network,
)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ltndLog.InfoS(ctx, "Network Info",
"active_chain", strings.Title(BitcoinChainName),
"network", network)
// Enable http profiling server if requested.
if cfg.Pprof.Profile != "" {
@ -216,7 +235,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
"/debug/pprof/", http.StatusSeeOther,
))
ltndLog.Infof("Pprof listening on %v", cfg.Pprof.Profile)
ltndLog.InfoS(ctx, "Pprof listening", "addr", cfg.Pprof.Profile)
// Create the pprof server.
pprofServer := &http.Server{
@ -227,11 +246,10 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
// Shut the server down when lnd is shutting down.
defer func() {
ltndLog.Info("Stopping pprof server...")
ltndLog.InfoS(ctx, "Stopping pprof server...")
err := pprofServer.Shutdown(ctx)
if err != nil {
ltndLog.Errorf("Stop pprof server got err: %v",
err)
ltndLog.ErrorS(ctx, "Stop pprof server", err)
}
}()
@ -239,7 +257,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
go func() {
err := pprofServer.ListenAndServe()
if err != nil && !errors.Is(err, http.ErrServerClosed) {
ltndLog.Errorf("Serving pprof got err: %v", err)
ltndLog.ErrorS(ctx, "Could not serve pprof "+
"server", err)
}
}()
}
@ -248,7 +267,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
if cfg.Pprof.CPUProfile != "" {
f, err := os.Create(cfg.Pprof.CPUProfile)
if err != nil {
return mkErr("unable to create CPU profile: %v", err)
return mkErr("unable to create CPU profile", err)
}
_ = runtimePprof.StartCPUProfile(f)
defer func() {
@ -261,7 +280,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
// needs to be done early and once during the startup process, before
// any DB access.
if err := cfg.DB.Init(ctx, cfg.graphDatabaseDir()); err != nil {
return mkErr("error initializing DBs: %v", err)
return mkErr("error initializing DBs", err)
}
tlsManagerCfg := &TLSManagerCfg{
@ -286,7 +305,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
serverOpts, restDialOpts, restListen, cleanUp,
err := tlsManager.SetCertificateBeforeUnlock()
if err != nil {
return mkErr("error setting cert before unlock: %v", err)
return mkErr("error setting cert before unlock", err)
}
if cleanUp != nil {
defer cleanUp()
@ -303,8 +322,12 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
// connections.
lis, err := lncfg.ListenOnAddress(grpcEndpoint)
if err != nil {
return mkErr("unable to listen on %s: %v",
grpcEndpoint, err)
return mkErr("unable to listen on grpc "+
"endpoint", err,
slog.String(
"endpoint",
grpcEndpoint.String(),
))
}
defer lis.Close()
@ -323,7 +346,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
rpcsLog, cfg.NoMacaroons, cfg.RPCMiddleware.Mandatory,
)
if err := interceptorChain.Start(); err != nil {
return mkErr("error starting interceptor chain: %v", err)
return mkErr("error starting interceptor chain", err)
}
defer func() {
err := interceptorChain.Stop()
@ -364,14 +387,14 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
rpcServer := newRPCServer(cfg, interceptorChain, implCfg, interceptor)
err = rpcServer.RegisterWithGrpcServer(grpcServer)
if err != nil {
return mkErr("error registering gRPC server: %v", err)
return mkErr("error registering gRPC server", err)
}
// Now that both the WalletUnlocker and LightningService have been
// registered with the GRPC server, we can start listening.
err = startGrpcListen(cfg, grpcServer, grpcListeners)
if err != nil {
return mkErr("error starting gRPC listener: %v", err)
return mkErr("error starting gRPC listener", err)
}
// Now start the REST proxy for our gRPC server above. We'll ensure
@ -379,10 +402,10 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
// wildcard to prevent certificate issues when accessing the proxy
// externally.
stopProxy, err := startRestProxy(
cfg, rpcServer, restDialOpts, restListen,
ctx, cfg, rpcServer, restDialOpts, restListen,
)
if err != nil {
return mkErr("error starting REST proxy: %v", err)
return mkErr("error starting REST proxy", err)
}
defer stopProxy()
@ -399,8 +422,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
cancelElection()
}()
ltndLog.Infof("Using %v leader elector",
cfg.Cluster.LeaderElector)
ltndLog.InfoS(ctx, "Using leader elector",
"elector", cfg.Cluster.LeaderElector)
leaderElector, err = cfg.Cluster.MakeLeaderElector(
electionCtx, cfg.DB,
@ -414,8 +437,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
return
}
ltndLog.Infof("Attempting to resign from leader role "+
"(%v)", cfg.Cluster.ID)
ltndLog.InfoS(ctx, "Attempting to resign from "+
"leader role", "cluster_id", cfg.Cluster.ID)
// Ensure that we don't block the shutdown process if
// the leader resigning process takes too long. The
@ -433,24 +456,26 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
}
}()
ltndLog.Infof("Starting leadership campaign (%v)",
cfg.Cluster.ID)
ltndLog.InfoS(ctx, "Starting leadership campaign",
"cluster_id", cfg.Cluster.ID)
if err := leaderElector.Campaign(electionCtx); err != nil {
return mkErr("leadership campaign failed: %v", err)
return mkErr("leadership campaign failed", err)
}
elected = true
ltndLog.Infof("Elected as leader (%v)", cfg.Cluster.ID)
ltndLog.InfoS(ctx, "Elected as leader",
"cluster_id", cfg.Cluster.ID)
}
dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
switch {
case err == channeldb.ErrDryRunMigrationOK:
ltndLog.Infof("%v, exiting", err)
case errors.Is(err, channeldb.ErrDryRunMigrationOK):
ltndLog.InfoS(ctx, "Exiting due to BuildDatabase error",
slog.Any("err", err))
return nil
case err != nil:
return mkErr("unable to open databases: %v", err)
return mkErr("unable to open databases", err)
}
defer cleanUp()
@ -460,7 +485,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
grpcListeners,
)
if err != nil {
return mkErr("error creating wallet config: %v", err)
return mkErr("error creating wallet config", err)
}
defer cleanUp()
@ -469,7 +494,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
partialChainControl, walletConfig,
)
if err != nil {
return mkErr("error loading chain control: %v", err)
return mkErr("error loading chain control", err)
}
defer cleanUp()
@ -482,7 +507,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
},
)
if err != nil {
return mkErr("error deriving node key: %v", err)
return mkErr("error deriving node key", err)
}
if cfg.Tor.StreamIsolation && cfg.Tor.SkipProxyForClearNetTargets {
@ -491,14 +516,14 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
if cfg.Tor.Active {
if cfg.Tor.SkipProxyForClearNetTargets {
srvrLog.Info("Onion services are accessible via Tor! " +
"NOTE: Traffic to clearnet services is not " +
"routed via Tor.")
srvrLog.InfoS(ctx, "Onion services are accessible "+
"via Tor! NOTE: Traffic to clearnet services "+
"is not routed via Tor.")
} else {
srvrLog.Infof("Proxying all network traffic via Tor "+
"(stream_isolation=%v)! NOTE: Ensure the "+
"backend node is proxying over Tor as well",
cfg.Tor.StreamIsolation)
srvrLog.InfoS(ctx, "Proxying all network traffic "+
"via Tor! NOTE: Ensure the backend node is "+
"proxying over Tor as well",
"stream_isolation", cfg.Tor.StreamIsolation)
}
}
@ -515,13 +540,13 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
// Start the tor controller before giving it to any other
// subsystems.
if err := torController.Start(); err != nil {
return mkErr("unable to initialize tor controller: %v",
return mkErr("unable to initialize tor controller",
err)
}
defer func() {
if err := torController.Stop(); err != nil {
ltndLog.Errorf("error stopping tor "+
"controller: %v", err)
ltndLog.ErrorS(ctx, "Error stopping tor "+
"controller", err)
}
}()
}
@ -535,7 +560,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
},
)
if err != nil {
return mkErr("error deriving tower key: %v", err)
return mkErr("error deriving tower key", err)
}
wtCfg := &watchtower.Config{
@ -576,12 +601,12 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
wtCfg, lncfg.NormalizeAddresses,
)
if err != nil {
return mkErr("unable to configure watchtower: %v", err)
return mkErr("unable to configure watchtower", err)
}
tower, err = watchtower.New(wtConfig)
if err != nil {
return mkErr("unable to create watchtower: %v", err)
return mkErr("unable to create watchtower", err)
}
}
@ -604,7 +629,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
implCfg,
)
if err != nil {
return mkErr("unable to create server: %v", err)
return mkErr("unable to create server", err)
}
// Set up an autopilot manager from the current config. This will be
@ -615,22 +640,21 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
cfg.ActiveNetParams,
)
if err != nil {
return mkErr("unable to initialize autopilot: %v", err)
return mkErr("unable to initialize autopilot", err)
}
atplManager, err := autopilot.NewManager(atplCfg)
if err != nil {
return mkErr("unable to create autopilot manager: %v", err)
return mkErr("unable to create autopilot manager", err)
}
if err := atplManager.Start(); err != nil {
return mkErr("unable to start autopilot manager: %v", err)
return mkErr("unable to start autopilot manager", err)
}
defer atplManager.Stop()
err = tlsManager.LoadPermanentCertificate(activeChainControl.KeyRing)
if err != nil {
return mkErr("unable to load permanent TLS certificate: %v",
err)
return mkErr("unable to load permanent TLS certificate", err)
}
// Now we have created all dependencies necessary to populate and
@ -641,10 +665,10 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
server.invoiceHtlcModifier,
)
if err != nil {
return mkErr("unable to add deps to RPC server: %v", err)
return mkErr("unable to add deps to RPC server", err)
}
if err := rpcServer.Start(); err != nil {
return mkErr("unable to start RPC server: %v", err)
return mkErr("unable to start RPC server", err)
}
defer rpcServer.Stop()
@ -652,7 +676,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
interceptorChain.SetRPCActive()
if err := interceptor.Notifier.NotifyReady(true); err != nil {
return mkErr("error notifying ready: %v", err)
return mkErr("error notifying ready", err)
}
// We'll wait until we're fully synced to continue the start up of the
@ -661,11 +685,11 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
// funds.
_, bestHeight, err := activeChainControl.ChainIO.GetBestBlock()
if err != nil {
return mkErr("unable to determine chain tip: %v", err)
return mkErr("unable to determine chain tip", err)
}
ltndLog.Infof("Waiting for chain backend to finish sync, "+
"start_height=%v", bestHeight)
ltndLog.InfoS(ctx, "Waiting for chain backend to finish sync",
slog.Int64("start_height", int64(bestHeight)))
type syncResult struct {
synced bool
@ -692,12 +716,12 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
case res := <-syncedResChan:
if res.err != nil {
return mkErr("unable to determine if wallet "+
"is synced: %v", res.err)
"is synced", res.err)
}
ltndLog.Debugf("Syncing to block timestamp: %v, is "+
"synced=%v", time.Unix(res.bestBlockTime, 0),
res.synced)
ltndLog.DebugS(ctx, "Syncing to block chain",
"best_block_time", time.Unix(res.bestBlockTime, 0),
"is_synced", res.synced)
if res.synced {
break
@ -719,11 +743,11 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
_, bestHeight, err = activeChainControl.ChainIO.GetBestBlock()
if err != nil {
return mkErr("unable to determine chain tip: %v", err)
return mkErr("unable to determine chain tip", err)
}
ltndLog.Infof("Chain backend is fully synced (end_height=%v)!",
bestHeight)
ltndLog.InfoS(ctx, "Chain backend is fully synced!",
"end_height", bestHeight)
// With all the relevant chains initialized, we can finally start the
// server itself. We start the server in an asynchronous goroutine so
@ -737,8 +761,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
defer func() {
err := server.Stop()
if err != nil {
ltndLog.Warnf("Stopping the server including all "+
"its subsystems failed with %v", err)
ltndLog.WarnS(ctx, "Stopping the server including all "+
"its subsystems failed with", err)
}
}()
@ -748,7 +772,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
break
}
return mkErr("unable to start server: %v", err)
return mkErr("unable to start server", err)
case <-interceptor.ShutdownChannel():
return nil
@ -762,13 +786,13 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
// stopped together with the autopilot service.
if cfg.Autopilot.Active {
if err := atplManager.StartAgent(); err != nil {
return mkErr("unable to start autopilot agent: %v", err)
return mkErr("unable to start autopilot agent", err)
}
}
if cfg.Watchtower.Active {
if err := tower.Start(); err != nil {
return mkErr("unable to start watchtower: %v", err)
return mkErr("unable to start watchtower", err)
}
defer tower.Stop()
}
@ -921,7 +945,8 @@ func startGrpcListen(cfg *Config, grpcServer *grpc.Server,
// startRestProxy starts the given REST proxy on the listeners found in the
// config.
func startRestProxy(cfg *Config, rpcServer *rpcServer, restDialOpts []grpc.DialOption,
func startRestProxy(ctx context.Context, cfg *Config, rpcServer *rpcServer,
restDialOpts []grpc.DialOption,
restListen func(net.Addr) (net.Listener, error)) (func(), error) {
// We use the first RPC listener as the destination for our REST proxy.
@ -948,7 +973,6 @@ func startRestProxy(cfg *Config, rpcServer *rpcServer, restDialOpts []grpc.DialO
}
// Start a REST proxy for our gRPC server.
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
shutdownFuncs = append(shutdownFuncs, cancel)

View File

@ -298,7 +298,7 @@ func (s *Server) ImportGraph(ctx context.Context,
rpcEdge.ChanPoint, err)
}
makePolicy := func(rpcPolicy *lnrpc.RoutingPolicy) *models.ChannelEdgePolicy { //nolint:lll
makePolicy := func(rpcPolicy *lnrpc.RoutingPolicy) *models.ChannelEdgePolicy { //nolint:ll
policy := &models.ChannelEdgePolicy{
ChannelID: rpcEdge.ChannelId,
LastUpdate: time.Unix(

View File

@ -519,7 +519,7 @@ func AddInvoice(ctx context.Context, cfg *AddInvoiceConfig,
finalCLTVDelta := uint32(cltvExpiryDelta)
finalCLTVDelta += uint32(routing.BlockPadding)
//nolint:lll
//nolint:ll
paths, err := blindedpath.BuildBlindedPaymentPaths(
&blindedpath.BuildBlindedPathCfg{
FindRoutes: cfg.QueryBlindedRoutes,
@ -534,7 +534,7 @@ func AddInvoice(ctx context.Context, cfg *AddInvoiceConfig,
p *blindedpath.BlindedHopPolicy) (
*blindedpath.BlindedHopPolicy, error) {
//nolint:lll
//nolint:ll
return blindedpath.AddPolicyBuffer(
p, blindCfg.RoutePolicyIncrMultiplier,
blindCfg.RoutePolicyDecrMultiplier,

View File

@ -12,7 +12,7 @@ import (
// options, while if able to be populated, the latter fields MUST also be
// specified.
//
//nolint:lll
//nolint:ll
type Config struct {
RoutingConfig

View File

@ -154,7 +154,7 @@ func (r *forwardInterceptor) resolveFromClient(
outWireCustomRecords = fn.Some[lnwire.CustomRecords](cr)
}
//nolint:lll
//nolint:ll
return r.htlcSwitch.Resolve(&htlcswitch.FwdResolution{
Key: circuitKey,
Action: htlcswitch.FwdActionResumeModified,

View File

@ -600,7 +600,7 @@ func (s *Server) probePaymentRequest(ctx context.Context, paymentRequest string,
// If the payment probe failed we only return the failure reason and
// leave the probe result params unaltered.
if resp.FailureReason != lnrpc.PaymentFailureReason_FAILURE_REASON_NONE { //nolint:lll
if resp.FailureReason != lnrpc.PaymentFailureReason_FAILURE_REASON_NONE { //nolint:ll
return resp, nil
}
@ -786,7 +786,7 @@ func (s *Server) sendProbePayment(ctx context.Context,
case lnrpc.Payment_FAILED:
// Incorrect payment details point to a
// successful probe.
//nolint:lll
//nolint:ll
if payment.FailureReason == lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS {
return paymentDetails(payment)
}
@ -1031,7 +1031,7 @@ func (s *Server) SetMissionControlConfig(ctx context.Context,
req.Config.HopProbability,
),
AprioriWeight: float64(req.Config.Weight),
CapacityFraction: routing.DefaultCapacityFraction, //nolint:lll
CapacityFraction: routing.DefaultCapacityFraction, //nolint:ll
}
}

View File

@ -276,7 +276,7 @@ func TestIsLsp(t *testing.T) {
bobExpensiveCopy.FeeProportionalMillionths = 1_000_000
bobExpensiveCopy.CLTVExpiryDelta = bobHopHint.CLTVExpiryDelta - 1
//nolint:lll
//nolint:ll
lspTestCases := []struct {
name string
routeHints [][]zpay32.HopHint

View File

@ -8,7 +8,7 @@ import (
// RoutingConfig contains the configurable parameters that control routing.
//
//nolint:lll
//nolint:ll
type RoutingConfig struct {
// ProbabilityEstimatorType sets the estimator to use.
ProbabilityEstimatorType string `long:"estimator" choice:"apriori" choice:"bimodal" description:"Probability estimator used for pathfinding." `
@ -48,7 +48,7 @@ type RoutingConfig struct {
// AprioriConfig defines parameters for the apriori probability.
//
//nolint:lll
//nolint:ll
type AprioriConfig struct {
// HopProbability is the assumed success probability of a hop in a route
// when no other information is available.
@ -73,7 +73,7 @@ type AprioriConfig struct {
// BimodalConfig defines parameters for the bimodal probability.
//
//nolint:lll
//nolint:ll
type BimodalConfig struct {
// Scale describes the scale over which channels still have some
// liquidity left on both channel ends. A value of 0 means that we

View File

@ -199,7 +199,7 @@ var (
// and the native enum cannot be renumbered because it is stored in the
// watchtower and BreachArbitrator databases.
//
//nolint:lll
//nolint:ll
allWitnessTypes = map[input.WitnessType]WitnessType{
input.CommitmentTimeLock: WitnessType_COMMITMENT_TIME_LOCK,
input.CommitmentNoDelay: WitnessType_COMMITMENT_NO_DELAY,

View File

@ -203,6 +203,7 @@ func (cfg *BaseNodeConfig) GenArgs() []string {
"--bitcoin.defaultchanconfs=1",
"--accept-keysend",
"--keep-failed-payment-attempts",
"--logging.no-commit-hash",
fmt.Sprintf("--db.batch-commit-interval=%v", commitInterval),
fmt.Sprintf("--bitcoin.defaultremotedelay=%v", DefaultCSV),
fmt.Sprintf("--rpclisten=%v", cfg.RPCAddr()),

View File

@ -705,7 +705,7 @@ func (h *HarnessRPC) GetChanInfo(
// LookupHtlcResolution makes a RPC call to the node's LookupHtlcResolution and
// returns the response.
//
//nolint:lll
//nolint:ll
func (h *HarnessRPC) LookupHtlcResolution(
req *lnrpc.LookupHtlcResolutionRequest) *lnrpc.LookupHtlcResolutionResponse {

View File

@ -15,7 +15,7 @@ import (
// UpdateChanStatus makes a UpdateChanStatus RPC call to node's RouterClient
// and asserts.
//
//nolint:lll
//nolint:ll
func (h *HarnessRPC) UpdateChanStatus(
req *routerrpc.UpdateChanStatusRequest) *routerrpc.UpdateChanStatusResponse {
@ -76,7 +76,7 @@ func (h *HarnessRPC) SubscribeHtlcEvents() HtlcEventsClient {
// GetMissionControlConfig makes a RPC call to the node's
// GetMissionControlConfig and asserts.
//
//nolint:lll
//nolint:ll
func (h *HarnessRPC) GetMissionControlConfig() *routerrpc.GetMissionControlConfigResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
@ -142,7 +142,7 @@ func (h *HarnessRPC) SendToRouteV2(
// QueryProbability makes a RPC call to the node's QueryProbability and
// asserts.
//
//nolint:lll
//nolint:ll
func (h *HarnessRPC) QueryProbability(
req *routerrpc.QueryProbabilityRequest) *routerrpc.QueryProbabilityResponse {

View File

@ -88,7 +88,7 @@ func (h *HarnessRPC) MuSig2CreateSessionErr(
// MuSig2CombineKeys makes a RPC call to the node's SignerClient and asserts.
//
//nolint:lll
//nolint:ll
func (h *HarnessRPC) MuSig2CombineKeys(
req *signrpc.MuSig2CombineKeysRequest) *signrpc.MuSig2CombineKeysResponse {
@ -117,7 +117,7 @@ func (h *HarnessRPC) MuSig2CombineKeysErr(
// MuSig2RegisterNonces makes a RPC call to the node's SignerClient and asserts.
//
//nolint:lll
//nolint:ll
func (h *HarnessRPC) MuSig2RegisterNonces(
req *signrpc.MuSig2RegisterNoncesRequest) *signrpc.MuSig2RegisterNoncesResponse {

Some files were not shown because too many files have changed in this diff Show More