graph+routing: address linter errors

This is done in a separate commit so as to keep the original code-move
commit mostly a pure code move.
This commit is contained in:
Elle Mouton 2024-06-18 12:34:25 -07:00
parent 743502f99d
commit fe34d62eb1
No known key found for this signature in database
GPG key ID: D7D916376026F177
10 changed files with 216 additions and 192 deletions

View file

@ -2200,7 +2200,9 @@ func (d *AuthenticatedGossiper) updateChannel(info *models.ChannelEdgeInfo,
// To ensure that our signature is valid, we'll verify it ourself
// before committing it to the slice returned.
err = graph.ValidateChannelUpdateAnn(d.selfKey, info.Capacity, chanUpdate)
err = graph.ValidateChannelUpdateAnn(
d.selfKey, info.Capacity, chanUpdate,
)
if err != nil {
return nil, nil, fmt.Errorf("generated invalid channel "+
"update sig: %v", err)

View file

@ -222,7 +222,7 @@ func (b *Builder) Start() error {
// channels from the graph based on their spentness, but whether they
// are considered zombies or not. We will start zombie pruning after a
// small delay, to avoid slowing down startup of lnd.
if b.cfg.AssumeChannelValid {
if b.cfg.AssumeChannelValid { //nolint:nestif
time.AfterFunc(b.cfg.FirstTimePruneDelay, func() {
select {
case <-b.quit:
@ -256,6 +256,7 @@ func (b *Builder) Start() error {
if err != nil && !errors.Is(
err, channeldb.ErrGraphNoEdgesFound,
) {
return err
}
@ -290,7 +291,9 @@ func (b *Builder) Start() error {
// from the graph in order to ensure we maintain a tight graph
// of "useful" nodes.
err = b.cfg.Graph.PruneGraphNodes()
if err != nil && err != channeldb.ErrGraphNodesNotFound {
if err != nil &&
!errors.Is(err, channeldb.ErrGraphNodesNotFound) {
return err
}
}
@ -344,8 +347,8 @@ func (b *Builder) syncGraphWithChain() error {
switch {
// If the graph has never been pruned, or hasn't fully been
// created yet, then we don't treat this as an explicit error.
case err == channeldb.ErrGraphNeverPruned:
case err == channeldb.ErrGraphNotFound:
case errors.Is(err, channeldb.ErrGraphNeverPruned):
case errors.Is(err, channeldb.ErrGraphNotFound):
default:
return err
}
@ -355,7 +358,6 @@ func (b *Builder) syncGraphWithChain() error {
pruneHeight, pruneHash)
switch {
// If the graph has never been pruned, then we can exit early as this
// entails it's being created for the first time and hasn't seen any
// block or created channels.
@ -388,34 +390,40 @@ func (b *Builder) syncGraphWithChain() error {
}
pruneHash, pruneHeight, err = b.cfg.Graph.PruneTip()
if err != nil {
switch {
// If at this point the graph has never been pruned, we
// can exit as this entails we are back to the point
// where it hasn't seen any block or created channels,
// alas there's nothing left to prune.
case err == channeldb.ErrGraphNeverPruned:
return nil
case err == channeldb.ErrGraphNotFound:
return nil
default:
return err
}
switch {
// If at this point the graph has never been pruned, we can exit
// as this entails we are back to the point where it hasn't seen
// any block or created channels, alas there's nothing left to
// prune.
case errors.Is(err, channeldb.ErrGraphNeverPruned):
return nil
case errors.Is(err, channeldb.ErrGraphNotFound):
return nil
case err != nil:
return err
default:
}
mainBlockHash, err = b.cfg.Chain.GetBlockHash(int64(pruneHeight))
mainBlockHash, err = b.cfg.Chain.GetBlockHash(
int64(pruneHeight),
)
if err != nil {
return err
}
}
log.Infof("Syncing channel graph from height=%v (hash=%v) to height=%v "+
"(hash=%v)", pruneHeight, pruneHash, bestHeight, bestHash)
log.Infof("Syncing channel graph from height=%v (hash=%v) to "+
"height=%v (hash=%v)", pruneHeight, pruneHash, bestHeight,
bestHash)
// If we're not yet caught up, then we'll walk forward in the chain
// pruning the channel graph with each new block that hasn't yet been
// consumed by the channel graph.
var spentOutputs []*wire.OutPoint
for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ {
for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ { //nolint:lll
// Break out of the rescan early if a shutdown has been
// requested, otherwise long rescans will block the daemon from
// shutting down promptly.
@ -462,6 +470,7 @@ func (b *Builder) syncGraphWithChain() error {
log.Infof("Graph pruning complete: %v channels were closed since "+
"height %v", len(closedChans), pruneHeight)
return nil
}
@ -615,7 +624,11 @@ func (b *Builder) pruneZombieChans() error {
}
for _, u := range oldEdges {
filterPruneChans(u.Info, u.Policy1, u.Policy2)
err = filterPruneChans(u.Info, u.Policy1, u.Policy2)
if err != nil {
return fmt.Errorf("error filtering channels to "+
"prune: %w", err)
}
}
log.Infof("Pruning %v zombie channels", len(chansToPrune))
@ -640,7 +653,7 @@ func (b *Builder) pruneZombieChans() error {
// With the channels pruned, we'll also attempt to prune any nodes that
// were a part of them.
err = b.cfg.Graph.PruneGraphNodes()
if err != nil && err != channeldb.ErrGraphNodesNotFound {
if err != nil && !errors.Is(err, channeldb.ErrGraphNodesNotFound) {
return fmt.Errorf("unable to prune graph nodes: %w", err)
}
@ -761,7 +774,6 @@ func (b *Builder) networkHandler() {
}
for {
// If there are stats, resume the statTicker.
if !b.stats.Empty() {
b.statTicker.Resume()
@ -793,12 +805,14 @@ func (b *Builder) networkHandler() {
// Since this block is stale, we update our best height
// to the previous block.
blockHeight := uint32(chainUpdate.Height)
blockHeight := chainUpdate.Height
atomic.StoreUint32(&b.bestHeight, blockHeight-1)
// Update the channel graph to reflect that this block
// was disconnected.
_, err := b.cfg.Graph.DisconnectBlockAtHeight(blockHeight)
_, err := b.cfg.Graph.DisconnectBlockAtHeight(
blockHeight,
)
if err != nil {
log.Errorf("unable to prune graph with stale "+
"block: %v", err)
@ -836,7 +850,9 @@ func (b *Builder) networkHandler() {
"height=%v, got height=%v",
currentHeight+1, chainUpdate.Height)
err := b.getMissingBlocks(currentHeight, chainUpdate)
err := b.getMissingBlocks(
currentHeight, chainUpdate,
)
if err != nil {
log.Errorf("unable to retrieve missing"+
"blocks: %v", err)
@ -1136,6 +1152,8 @@ func makeFundingScript(bitcoinKey1, bitcoinKey2 []byte,
// channel/edge update network update. If the update didn't affect the internal
// state of the draft due to either being out of date, invalid, or redundant,
// then error is returned.
//
//nolint:funlen
func (b *Builder) processUpdate(msg interface{},
op ...batch.SchedulerOption) error {
@ -1166,7 +1184,9 @@ func (b *Builder) processUpdate(msg interface{},
_, _, exists, isZombie, err := b.cfg.Graph.HasChannelEdge(
msg.ChannelID,
)
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
if err != nil &&
!errors.Is(err, channeldb.ErrGraphNoEdgesFound) {
return errors.Errorf("unable to check for edge "+
"existence: %v", err)
}
@ -1188,7 +1208,8 @@ func (b *Builder) processUpdate(msg interface{},
// ChannelAnnouncement from the gossiper.
scid := lnwire.NewShortChanIDFromInt(msg.ChannelID)
if b.cfg.AssumeChannelValid || b.cfg.IsAlias(scid) {
if err := b.cfg.Graph.AddChannelEdge(msg, op...); err != nil {
err := b.cfg.Graph.AddChannelEdge(msg, op...)
if err != nil {
return fmt.Errorf("unable to add edge: %w", err)
}
log.Tracef("New channel discovered! Link "+
@ -1206,6 +1227,8 @@ func (b *Builder) processUpdate(msg interface{},
channelID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
fundingTx, err := b.fetchFundingTxWrapper(&channelID)
if err != nil {
//nolint:lll
//
// In order to ensure we don't erroneously mark a
// channel as a zombie due to an RPC failure, we'll
// attempt to string match for the relevant errors.
@ -1253,13 +1276,15 @@ func (b *Builder) processUpdate(msg interface{},
// formed. If this check fails, then this channel either
// doesn't exist, or isn't the one that was meant to be created
// according to the passed channel proofs.
fundingPoint, err := chanvalidate.Validate(&chanvalidate.Context{
Locator: &chanvalidate.ShortChanIDChanLocator{
ID: channelID,
fundingPoint, err := chanvalidate.Validate(
&chanvalidate.Context{
Locator: &chanvalidate.ShortChanIDChanLocator{
ID: channelID,
},
MultiSigPkScript: fundingPkScript,
FundingTx: fundingTx,
},
MultiSigPkScript: fundingPkScript,
FundingTx: fundingTx,
})
)
if err != nil {
// Mark the edge as a zombie so we won't try to
// re-validate it on start up.
@ -1336,16 +1361,20 @@ func (b *Builder) processUpdate(msg interface{},
edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
b.cfg.Graph.HasChannelEdge(msg.ChannelID)
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
if err != nil && !errors.Is(
err, channeldb.ErrGraphNoEdgesFound,
) {
return errors.Errorf("unable to check for edge "+
"existence: %v", err)
}
// If the channel is marked as a zombie in our database, and
// we consider this a stale update, then we should not apply the
// policy.
isStaleUpdate := time.Since(msg.LastUpdate) > b.cfg.ChannelPruneExpiry
isStaleUpdate := time.Since(msg.LastUpdate) >
b.cfg.ChannelPruneExpiry
if isZombie && isStaleUpdate {
return newErrf(ErrIgnored, "ignoring stale update "+
"(flags=%v|%v) for zombie chan_id=%v",
@ -1368,7 +1397,6 @@ func (b *Builder) processUpdate(msg interface{},
// that edge. If this message has a timestamp not strictly
// newer than what we already know of we can exit early.
switch {
// A flag set of 0 indicates this is an announcement for the
// "first" node in the channel.
case msg.ChannelFlags&lnwire.ChanUpdateDirection == 0:
@ -1448,7 +1476,7 @@ func (b *Builder) fetchFundingTxWrapper(chanID *lnwire.ShortChannelID) (
// short channel ID.
//
// TODO(roasbeef): replace with call to GetBlockTransaction? (would allow to
// later use getblocktxn)
// later use getblocktxn).
func (b *Builder) fetchFundingTx(
chanID *lnwire.ShortChannelID) (*wire.MsgTx, error) {
@ -1702,6 +1730,7 @@ func (b *Builder) AddProof(chanID lnwire.ShortChannelID,
}
info.AuthProof = proof
return b.cfg.Graph.UpdateChannelEdge(info)
}
@ -1739,6 +1768,7 @@ func (b *Builder) IsKnownEdge(chanID lnwire.ShortChannelID) bool {
_, _, exists, isZombie, _ := b.cfg.Graph.HasChannelEdge(
chanID.ToUint64(),
)
return exists || isZombie
}
@ -1754,7 +1784,6 @@ func (b *Builder) IsStaleEdgePolicy(chanID lnwire.ShortChannelID,
if err != nil {
log.Debugf("Check stale edge policy got error: %v", err)
return false
}
// If we know of the edge as a zombie, then we'll make some additional

View file

@ -182,7 +182,7 @@ func TestIgnoreChannelEdgePolicyForUnknownChannel(t *testing.T) {
}
// TestWakeUpOnStaleBranch tests that upon startup of the ChannelRouter, if the
// the chain previously reflected in the channel graph is stale (overtaken by a
// chain previously reflected in the channel graph is stale (overtaken by a
// longer chain), the channel router will prune the graph for any channels
// confirmed on the stale chain, and resync to the main chain.
func TestWakeUpOnStaleBranch(t *testing.T) {
@ -216,7 +216,6 @@ func TestWakeUpOnStaleBranch(t *testing.T) {
block.Transactions = append(block.Transactions,
fundingTx)
chanID1 = chanID.ToUint64()
}
ctx.chain.addBlock(block, height, rand.Uint32())
ctx.chain.setBestBlock(int32(height))
@ -418,7 +417,6 @@ func TestDisconnectedBlocks(t *testing.T) {
block.Transactions = append(block.Transactions,
fundingTx)
chanID1 = chanID.ToUint64()
}
ctx.chain.addBlock(block, height, rand.Uint32())
ctx.chain.setBestBlock(int32(height))
@ -633,7 +631,9 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) {
}
// The router should now be aware of the channel we created above.
_, _, hasChan, isZombie, err := ctx.graph.HasChannelEdge(chanID1.ToUint64())
_, _, hasChan, isZombie, err := ctx.graph.HasChannelEdge(
chanID1.ToUint64(),
)
if err != nil {
t.Fatalf("error looking for edge: %v", chanID1)
}
@ -713,7 +713,9 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) {
// At this point, the channel that was pruned should no longer be known
// by the router.
_, _, hasChan, isZombie, err = ctx.graph.HasChannelEdge(chanID1.ToUint64())
_, _, hasChan, isZombie, err = ctx.graph.HasChannelEdge(
chanID1.ToUint64(),
)
if err != nil {
t.Fatalf("error looking for edge: %v", chanID1)
}
@ -833,20 +835,23 @@ func TestPruneChannelGraphStaleEdges(t *testing.T) {
// All of the channels should exist before pruning them.
assertChannelsPruned(t, ctx.graph, testChannels)
// Proceed to prune the channels - only the last one should be pruned.
// Proceed to prune the channels - only the last one should be
// pruned.
if err := ctx.builder.pruneZombieChans(); err != nil {
t.Fatalf("unable to prune zombie channels: %v", err)
}
// We expect channels that have either both edges stale, or one edge
// stale with both known.
// We expect channels that have either both edges stale, or one
// edge stale with both known.
var prunedChannels []uint64
if strictPruning {
prunedChannels = []uint64{2, 5, 7}
} else {
prunedChannels = []uint64{2, 7}
}
assertChannelsPruned(t, ctx.graph, testChannels, prunedChannels...)
assertChannelsPruned(
t, ctx.graph, testChannels, prunedChannels...,
)
}
}
@ -1387,7 +1392,9 @@ func TestBlockDifferenceFix(t *testing.T) {
err := wait.NoError(func() error {
// Then router height should be updated to the latest block.
if atomic.LoadUint32(&ctx.builder.bestHeight) != newBlockHeight {
if atomic.LoadUint32(&ctx.builder.bestHeight) !=
newBlockHeight {
return fmt.Errorf("height should have been updated "+
"to %v, instead got %v", newBlockHeight,
ctx.builder.bestHeight)
@ -1589,7 +1596,10 @@ func parseTestGraph(t *testing.T, useCache bool, path string) (
}
err = graph.AddChannelEdge(&edgeInfo)
if err != nil && err != channeldb.ErrEdgeAlreadyExist {
if err != nil && !errors.Is(
err, channeldb.ErrEdgeAlreadyExist,
) {
return nil, err
}
@ -1601,17 +1611,27 @@ func parseTestGraph(t *testing.T, useCache bool, path string) (
}
edgePolicy := &models.ChannelEdgePolicy{
SigBytes: testSig.Serialize(),
MessageFlags: lnwire.ChanUpdateMsgFlags(edge.MessageFlags),
ChannelFlags: channelFlags,
ChannelID: edge.ChannelID,
LastUpdate: testTime,
TimeLockDelta: edge.Expiry,
MinHTLC: lnwire.MilliSatoshi(edge.MinHTLC),
MaxHTLC: lnwire.MilliSatoshi(edge.MaxHTLC),
FeeBaseMSat: lnwire.MilliSatoshi(edge.FeeBaseMsat),
FeeProportionalMillionths: lnwire.MilliSatoshi(edge.FeeRate),
ToNode: targetNode,
SigBytes: testSig.Serialize(),
MessageFlags: lnwire.ChanUpdateMsgFlags(
edge.MessageFlags,
),
ChannelFlags: channelFlags,
ChannelID: edge.ChannelID,
LastUpdate: testTime,
TimeLockDelta: edge.Expiry,
MinHTLC: lnwire.MilliSatoshi(
edge.MinHTLC,
),
MaxHTLC: lnwire.MilliSatoshi(
edge.MaxHTLC,
),
FeeBaseMSat: lnwire.MilliSatoshi(
edge.FeeBaseMsat,
),
FeeProportionalMillionths: lnwire.MilliSatoshi(
edge.FeeRate,
),
ToNode: targetNode,
}
if err := graph.UpdateEdgePolicy(edgePolicy); err != nil {
return nil, err
@ -1652,7 +1672,7 @@ func parseTestGraph(t *testing.T, useCache bool, path string) (
// testGraph is the struct which corresponds to the JSON format used to encode
// graphs within the files in the testdata directory.
//
// TODO(roasbeef): add test graph auto-generator
// TODO(roasbeef): add test graph auto-generator.
type testGraph struct {
Info []string `json:"info"`
Nodes []testNode `json:"nodes"`
@ -1788,13 +1808,14 @@ type testChannelPolicy struct {
Features *lnwire.FeatureVector
}
// createTestGraphFromChannels returns a fully populated ChannelGraph based on a set of
// test channels. Additional required information like keys are derived in
// a deterministic way and added to the channel graph. A list of nodes is
// not required and derived from the channel data. The goal is to keep
// instantiating a test channel graph as light weight as possible.
// createTestGraphFromChannels returns a fully populated ChannelGraph based on a
// set of test channels. Additional required information like keys are derived
// in a deterministic way and added to the channel graph. A list of nodes is not
// required and derived from the channel data. The goal is to keep instantiating
// a test channel graph as light weight as possible.
func createTestGraphFromChannels(t *testing.T, useCache bool,
testChannels []*testChannel, source string) (*testGraphInstance, error) {
testChannels []*testChannel, source string) (*testGraphInstance,
error) {
// We'll use this fake address for the IP address of all the nodes in
// our tests. This value isn't needed for path finding so it doesn't
@ -1940,7 +1961,9 @@ func createTestGraphFromChannels(t *testing.T, useCache bool,
}
err = graph.AddChannelEdge(&edgeInfo)
if err != nil && err != channeldb.ErrEdgeAlreadyExist {
if err != nil &&
!errors.Is(err, channeldb.ErrEdgeAlreadyExist) {
return nil, err
}
@ -1981,7 +2004,8 @@ func createTestGraphFromChannels(t *testing.T, useCache bool,
ToNode: node2Vertex,
ExtraOpaqueData: getExtraData(node1),
}
if err := graph.UpdateEdgePolicy(edgePolicy); err != nil {
err := graph.UpdateEdgePolicy(edgePolicy)
if err != nil {
return nil, err
}
}
@ -2011,12 +2035,13 @@ func createTestGraphFromChannels(t *testing.T, useCache bool,
ToNode: node1Vertex,
ExtraOpaqueData: getExtraData(node2),
}
if err := graph.UpdateEdgePolicy(edgePolicy); err != nil {
err := graph.UpdateEdgePolicy(edgePolicy)
if err != nil {
return nil, err
}
}
channelID++
channelID++ //nolint:ineffassign
}
return &testGraphInstance{

View file

@ -18,7 +18,7 @@ func init() {
}
// DisableLog disables all library log output. Logging output is disabled by
// by default until UseLogger is called.
// default until UseLogger is called.
func DisableLog() {
UseLogger(btclog.Disabled)
}

View file

@ -117,7 +117,6 @@ type topologyClient struct {
// notifyTopologyChange notifies all registered clients of a new change in
// graph topology in a non-blocking.
func (b *Builder) notifyTopologyChange(topologyDiff *TopologyChange) {
// notifyClient is a helper closure that will send topology updates to
// the given client.
notifyClient := func(clientID uint64, client *topologyClient) bool {

View file

@ -55,13 +55,19 @@ var (
timeout = time.Second * 5
testRBytes, _ = hex.DecodeString("8ce2bc69281ce27da07e6683571319d18e949ddfa2965fb6caa1bf0314f882d7")
testSBytes, _ = hex.DecodeString("299105481d63e0f4bc2a88121167221b6700d72a0ead154c03be696a292d24ae")
testRScalar = new(btcec.ModNScalar)
testSScalar = new(btcec.ModNScalar)
_ = testRScalar.SetByteSlice(testRBytes)
_ = testSScalar.SetByteSlice(testSBytes)
testSig = ecdsa.NewSignature(testRScalar, testSScalar)
testRBytes, _ = hex.DecodeString(
"8ce2bc69281ce27da07e6683571319d18e949ddfa2965fb6caa1bf03" +
"14f882d7",
)
testSBytes, _ = hex.DecodeString(
"299105481d63e0f4bc2a88121167221b6700d72a0ead154c03be696a2" +
"92d24ae",
)
testRScalar = new(btcec.ModNScalar)
testSScalar = new(btcec.ModNScalar)
_ = testRScalar.SetByteSlice(testRBytes)
_ = testSScalar.SetByteSlice(testSBytes)
testSig = ecdsa.NewSignature(testRScalar, testSScalar)
testAuthProof = models.ChannelAuthProof{
NodeSig1Bytes: testSig.Serialize(),
@ -1027,22 +1033,6 @@ type testCtx struct {
notifier *lnmock.ChainNotifier
}
func (c *testCtx) getChannelIDFromAlias(t *testing.T, a, b string) uint64 {
vertexA, ok := c.aliases[a]
require.True(t, ok, "cannot find aliases for %s", a)
vertexB, ok := c.aliases[b]
require.True(t, ok, "cannot find aliases for %s", b)
channelIDMap, ok := c.channelIDs[vertexA]
require.True(t, ok, "cannot find channelID map %s(%s)", vertexA, a)
channelID, ok := channelIDMap[vertexB]
require.True(t, ok, "cannot find channelID using %s(%s)", vertexB, b)
return channelID
}
func createTestCtxSingleNode(t *testing.T,
startingHeight uint32) *testCtx {
@ -1127,8 +1117,8 @@ type testGraphInstance struct {
graphBackend kvdb.Backend
// aliasMap is a map from a node's alias to its public key. This type is
// provided in order to allow easily look up from the human memorable alias
// to an exact node's public key.
// provided in order to allow easily look up from the human memorable
// alias to an exact node's public key.
aliasMap map[string]route.Vertex
// privKeyMap maps a node alias to its private key. This is used to be
@ -1201,7 +1191,7 @@ func createTestCtxFromGraphInstanceAssumeValid(t *testing.T,
}
t.Cleanup(func() {
graphBuilder.Stop()
require.NoError(t, graphBuilder.Stop())
})
return ctx

View file

@ -2288,7 +2288,8 @@ func TestPathFindSpecExample(t *testing.T) {
// parameters.
lastHop := route.Hops[1]
require.EqualValues(t, amt, lastHop.AmtToForward)
require.EqualValues(t, startingHeight+MinCLTVDelta, lastHop.OutgoingTimeLock)
require.EqualValues(t, startingHeight+MinCLTVDelta,
lastHop.OutgoingTimeLock)
}
func assertExpectedPath(t *testing.T, aliasMap map[string]route.Vertex,
@ -2297,7 +2298,8 @@ func assertExpectedPath(t *testing.T, aliasMap map[string]route.Vertex,
require.Len(t, path, len(nodeAliases))
for i, hop := range path {
require.Equal(t, aliasMap[nodeAliases[i]], hop.policy.ToNodePubKey())
require.Equal(t, aliasMap[nodeAliases[i]],
hop.policy.ToNodePubKey())
}
}

View file

@ -59,8 +59,6 @@ var (
priv2, _ = btcec.NewPrivateKey()
bitcoinKey2 = priv2.PubKey()
timeout = time.Second * 5
)
type testCtx struct {
@ -194,7 +192,7 @@ func createTestNode() (*channeldb.LightningNode, error) {
LastUpdate: time.Unix(updateTime, 0),
Addresses: testAddrs,
Color: color.RGBA{1, 2, 3, 0},
Alias: "kek" + string(pub[:]),
Alias: "kek" + string(pub),
AuthSigBytes: testSig.Serialize(),
Features: testFeatures,
}
@ -308,7 +306,6 @@ func TestSendPaymentRouteFailureFallback(t *testing.T) {
// the more costly path (through pham nuwen).
ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcherOld).setPaymentResult(
func(firstHop lnwire.ShortChannelID) ([32]byte, error) {
if firstHop == roasbeefSongoku {
return [32]byte{}, htlcswitch.NewForwardingError(
// TODO(roasbeef): temp node failure
@ -607,26 +604,29 @@ func TestSendPaymentErrorRepeatedFeeInsufficient(t *testing.T) {
// We'll now modify the SendToSwitch method to return an error for the
// outgoing channel to Son goku. This will be a fee related error, so
// it should only cause the edge to be pruned after the second attempt.
ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcherOld).setPaymentResult(
func(firstHop lnwire.ShortChannelID) ([32]byte, error) {
dispatcher, ok := ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcherOld) //nolint:lll
require.True(t, ok)
roasbeefSongoku := lnwire.NewShortChanIDFromInt(
roasbeefSongokuChanID,
dispatcher.setPaymentResult(func(firstHop lnwire.ShortChannelID) (
[32]byte, error) {
roasbeefSongoku := lnwire.NewShortChanIDFromInt(
roasbeefSongokuChanID,
)
if firstHop == roasbeefSongoku {
return [32]byte{}, htlcswitch.NewForwardingError(
// Within our error, we'll add a
// channel update which is meant to
// reflect the new fee schedule for the
// node/channel.
&lnwire.FailFeeInsufficient{
Update: errChanUpdate,
}, 1,
)
if firstHop == roasbeefSongoku {
return [32]byte{}, htlcswitch.NewForwardingError(
// Within our error, we'll add a
// channel update which is meant to
// reflect the new fee schedule for the
// node/channel.
&lnwire.FailFeeInsufficient{
Update: errChanUpdate,
}, 1,
)
}
}
return preImage, nil
})
return preImage, nil
})
// Send off the payment request to the router, route through phamnuwen
// should've been selected as a fall back and succeeded correctly.
@ -1211,12 +1211,8 @@ func TestFindPathFeeWeighting(t *testing.T) {
// The route that was chosen should be exactly one hop, and should be
// directly to luoji.
if len(path) != 1 {
t.Fatalf("expected path length of 1, instead was: %v", len(path))
}
if path[0].policy.ToNodePubKey() != ctx.aliases["luoji"] {
t.Fatalf("wrong node: %v", path[0].policy.ToNodePubKey())
}
require.Len(t, path, 1)
require.Equal(t, ctx.aliases["luoji"], path[0].policy.ToNodePubKey())
}
// TestEmptyRoutesGenerateSphinxPacket tests that the generateSphinxPacket
@ -1228,9 +1224,7 @@ func TestEmptyRoutesGenerateSphinxPacket(t *testing.T) {
sessionKey, _ := btcec.NewPrivateKey()
emptyRoute := &route.Route{}
_, _, err := generateSphinxPacket(emptyRoute, testHash[:], sessionKey)
if err != route.ErrNoRouteHopsProvided {
t.Fatalf("expected empty hops error: instead got: %v", err)
}
require.ErrorIs(t, err, route.ErrNoRouteHopsProvided)
}
// TestUnknownErrorSource tests that if the source of an error is unknown, all
@ -1270,7 +1264,9 @@ func TestUnknownErrorSource(t *testing.T) {
}, 4),
}
testGraph, err := createTestGraphFromChannels(t, true, testChannels, "a")
testGraph, err := createTestGraphFromChannels(
t, true, testChannels, "a",
)
require.NoError(t, err, "unable to create graph")
const startingBlockHeight = 101
@ -1284,20 +1280,23 @@ func TestUnknownErrorSource(t *testing.T) {
// We'll modify the SendToSwitch method so that it simulates hop b as a
// node that returns an unparsable failure if approached via the a->b
// channel.
ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcherOld).setPaymentResult(
func(firstHop lnwire.ShortChannelID) ([32]byte, error) {
dispatcher, ok := ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcherOld) //nolint:lll
require.True(t, ok)
// If channel a->b is used, return an error without
// source and message. The sender won't know the origin
// of the error.
if firstHop.ToUint64() == 1 {
return [32]byte{},
htlcswitch.ErrUnreadableFailureMessage
}
dispatcher.setPaymentResult(func(firstHop lnwire.ShortChannelID) (
[32]byte, error) {
// Otherwise the payment succeeds.
return lntypes.Preimage{}, nil
})
// If channel a->b is used, return an error without
// source and message. The sender won't know the origin
// of the error.
if firstHop.ToUint64() == 1 {
return [32]byte{},
htlcswitch.ErrUnreadableFailureMessage
}
// Otherwise the payment succeeds.
return lntypes.Preimage{}, nil
})
// Send off the payment request to the router. The expectation is that
// the route a->b->c is tried first. An unreadable faiure is returned
@ -1308,19 +1307,22 @@ func TestUnknownErrorSource(t *testing.T) {
payment.paymentHash)
// Next we modify payment result to return an unknown failure.
ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcherOld).setPaymentResult(
func(firstHop lnwire.ShortChannelID) ([32]byte, error) {
dispatcher, ok = ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcherOld) //nolint:lll
require.True(t, ok)
// If channel a->b is used, simulate that the failure
// couldn't be decoded (FailureMessage is nil).
if firstHop.ToUint64() == 2 {
return [32]byte{},
htlcswitch.NewUnknownForwardingError(1)
}
dispatcher.setPaymentResult(func(firstHop lnwire.ShortChannelID) (
[32]byte, error) {
// Otherwise the payment succeeds.
return lntypes.Preimage{}, nil
})
// If channel a->b is used, simulate that the failure
// couldn't be decoded (FailureMessage is nil).
if firstHop.ToUint64() == 2 {
return [32]byte{},
htlcswitch.NewUnknownForwardingError(1)
}
// Otherwise the payment succeeds.
return lntypes.Preimage{}, nil
})
// Send off the payment request to the router. We expect the payment to
// fail because both routes have been pruned.
@ -2353,7 +2355,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) {
)
node1Bytes := priv1.PubKey().SerializeCompressed()
node2Bytes := connectNode
if bytes.Compare(node1Bytes[:], node2Bytes[:]) == -1 {
if bytes.Compare(node1Bytes, node2Bytes[:]) == -1 {
pubKey1 = priv1.PubKey()
pubKey2 = connectNodeKey
} else {
@ -2558,35 +2560,6 @@ func (m *mockChain) GetBestBlock() (*chainhash.Hash, int32, error) {
return &blockHash, m.bestHeight, nil
}
func (m *mockChain) setBestBlock(height int32) {
m.Lock()
defer m.Unlock()
m.bestHeight = height
}
func (m *mockChain) addUtxo(op wire.OutPoint, out *wire.TxOut) {
m.Lock()
m.utxos[op] = *out
m.Unlock()
}
func (m *mockChain) delUtxo(op wire.OutPoint) {
m.Lock()
delete(m.utxos, op)
m.Unlock()
}
func (m *mockChain) addBlock(block *wire.MsgBlock, height uint32, nonce uint32) {
m.Lock()
block.Header.Nonce = nonce
hash := block.Header.BlockHash()
m.blocks[hash] = block
m.blockIndex[height] = hash
m.blockHeightIndex[hash] = height
m.Unlock()
}
func createChannelEdge(bitcoinKey1, bitcoinKey2 []byte,
chanValue btcutil.Amount, fundingHeight uint32) (*wire.MsgTx,
*wire.OutPoint, *lnwire.ShortChannelID, error) {

View file

@ -6666,7 +6666,8 @@ func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription,
// marshallTopologyChange performs a mapping from the topology change struct
// returned by the router to the form of notifications expected by the current
// gRPC service.
func marshallTopologyChange(topChange *graph.TopologyChange) *lnrpc.GraphTopologyUpdate {
func marshallTopologyChange(
topChange *graph.TopologyChange) *lnrpc.GraphTopologyUpdate {
// encodeKey is a simple helper function that converts a live public
// key into a hex-encoded version of the compressed serialization for
@ -6677,7 +6678,9 @@ func marshallTopologyChange(topChange *graph.TopologyChange) *lnrpc.GraphTopolog
nodeUpdates := make([]*lnrpc.NodeUpdate, len(topChange.NodeUpdates))
for i, nodeUpdate := range topChange.NodeUpdates {
nodeAddrs := make([]*lnrpc.NodeAddress, 0, len(nodeUpdate.Addresses))
nodeAddrs := make(
[]*lnrpc.NodeAddress, 0, len(nodeUpdate.Addresses),
)
for _, addr := range nodeUpdate.Addresses {
nodeAddr := &lnrpc.NodeAddress{
Network: addr.Network(),

View file

@ -1060,6 +1060,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
IsStillZombieChannel: s.graphBuilder.IsZombieChannel,
}, nodeKeyDesc)
//nolint:lll
s.localChanMgr = &localchans.Manager{
ForAllOutgoingChannels: s.graphBuilder.ForAllOutgoingChannels,
PropagateChanPolicyUpdate: s.authGossiper.PropagateChanPolicyUpdate,