mirror of
https://github.com/lightningnetwork/lnd.git
synced 2024-11-19 01:43:16 +01:00
blindedpath: move blinded path logic to own pkg
This commit is contained in:
parent
c62a9c235e
commit
398623bde5
@ -680,6 +680,8 @@ func DefaultConfig() Config {
|
||||
},
|
||||
Invoices: &lncfg.Invoices{
|
||||
HoldExpiryDelta: lncfg.DefaultHoldInvoiceExpiryDelta,
|
||||
},
|
||||
Routing: &lncfg.Routing{
|
||||
BlindedPaths: lncfg.BlindedPaths{
|
||||
MinNumRealHops: lncfg.DefaultMinNumRealBlindedPathHops,
|
||||
NumHops: lncfg.DefaultNumBlindedPathHops,
|
||||
@ -1686,6 +1688,7 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
|
||||
cfg.Sweeper,
|
||||
cfg.Htlcswitch,
|
||||
cfg.Invoices,
|
||||
cfg.Routing,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -367,8 +367,8 @@ func (b *blindedForwardTest) setupNetwork(ctx context.Context,
|
||||
// Bob to himself.
|
||||
b.dave = b.ht.NewNode("Dave", []string{
|
||||
"--bitcoin.timelockdelta=18",
|
||||
"--invoices.blinding.min-num-real-hops=2",
|
||||
"--invoices.blinding.num-hops=2",
|
||||
"--routing.blinding.min-num-real-hops=2",
|
||||
"--routing.blinding.num-hops=2",
|
||||
})
|
||||
|
||||
b.channels = setupFourHopNetwork(b.ht, b.carol, b.dave)
|
||||
@ -625,8 +625,8 @@ func testBlindedRouteInvoices(ht *lntest.HarnessTest) {
|
||||
// Restart Dave with blinded path restrictions that will result in him
|
||||
// creating a blinded path that uses himself as the introduction node.
|
||||
ht.RestartNodeWithExtraArgs(testCase.dave, []string{
|
||||
"--invoices.blinding.min-num-real-hops=0",
|
||||
"--invoices.blinding.num-hops=0",
|
||||
"--routing.blinding.min-num-real-hops=0",
|
||||
"--routing.blinding.num-hops=0",
|
||||
})
|
||||
ht.EnsureConnected(testCase.dave, testCase.carol)
|
||||
|
||||
@ -901,8 +901,8 @@ func testMPPToSingleBlindedPath(ht *lntest.HarnessTest) {
|
||||
// Restrict Dave so that he only ever chooses the Carol->Dave path for
|
||||
// a blinded route.
|
||||
dave := ht.NewNode("dave", []string{
|
||||
"--invoices.blinding.min-num-real-hops=1",
|
||||
"--invoices.blinding.num-hops=1",
|
||||
"--routing.blinding.min-num-real-hops=1",
|
||||
"--routing.blinding.num-hops=1",
|
||||
})
|
||||
carol := ht.NewNode("carol", nil)
|
||||
eve := ht.NewNode("eve", nil)
|
||||
@ -1098,8 +1098,8 @@ func testBlindedRouteDummyHops(ht *lntest.HarnessTest) {
|
||||
// Configure Dave so that all blinded paths always contain 2 hops and
|
||||
// so that there is no minimum number of real hops.
|
||||
dave := ht.NewNode("dave", []string{
|
||||
"--invoices.blinding.min-num-real-hops=0",
|
||||
"--invoices.blinding.num-hops=2",
|
||||
"--routing.blinding.min-num-real-hops=0",
|
||||
"--routing.blinding.num-hops=2",
|
||||
})
|
||||
|
||||
ht.EnsureConnected(alice, bob)
|
||||
@ -1183,8 +1183,8 @@ func testBlindedRouteDummyHops(ht *lntest.HarnessTest) {
|
||||
// of hops to 2 meaning that one dummy hop should be added.
|
||||
ht.RestartNodeWithExtraArgs(carol, nil)
|
||||
ht.RestartNodeWithExtraArgs(dave, []string{
|
||||
"--invoices.blinding.min-num-real-hops=1",
|
||||
"--invoices.blinding.num-hops=2",
|
||||
"--routing.blinding.min-num-real-hops=1",
|
||||
"--routing.blinding.num-hops=2",
|
||||
})
|
||||
ht.EnsureConnected(bob, carol)
|
||||
ht.EnsureConnected(carol, dave)
|
||||
|
@ -1,7 +1,5 @@
|
||||
package lncfg
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
// DefaultHoldInvoiceExpiryDelta defines the number of blocks before the
|
||||
// expiry height of a hold invoice's htlc that lnd will automatically
|
||||
@ -41,20 +39,6 @@ const (
|
||||
//nolint:lll
|
||||
type Invoices struct {
|
||||
HoldExpiryDelta uint32 `long:"holdexpirydelta" description:"The number of blocks before a hold invoice's htlc expires that the invoice should be canceled to prevent a force close. Force closes will not be prevented if this value is not greater than DefaultIncomingBroadcastDelta."`
|
||||
|
||||
BlindedPaths BlindedPaths `group:"blinding" namespace:"blinding"`
|
||||
}
|
||||
|
||||
// BlindedPaths holds the configuration options for blinded paths added to
|
||||
// invoices.
|
||||
//
|
||||
//nolint:lll
|
||||
type BlindedPaths struct {
|
||||
MinNumRealHops uint8 `long:"min-num-real-hops" description:"The minimum number of real hops to include in a blinded path. This doesn't include our node, so if the minimum is 1, then the path will contain at minimum our node along with an introduction node hop. If it is zero then the shortest path will use our node as an introduction node."`
|
||||
NumHops uint8 `long:"num-hops" description:"The number of hops to include in a blinded path. This doesn't include our node, so if it is 1, then the path will contain our node along with an introduction node or dummy node hop. If paths shorter than NumHops is found, then they will be padded using dummy hops."`
|
||||
MaxNumPaths uint8 `long:"max-num-paths" description:"The maximum number of blinded paths to select and add to an invoice."`
|
||||
PolicyIncreaseMultiplier float64 `long:"policy-increase-multiplier" description:"The amount by which to increase certain policy values of hops on a blinded path in order to add a probing buffer."`
|
||||
PolicyDecreaseMultiplier float64 `long:"policy-decrease-multiplier" description:"The amount by which to decrease certain policy values of hops on a blinded path in order to add a probing buffer."`
|
||||
}
|
||||
|
||||
// Validate checks that the various invoice config options are sane.
|
||||
@ -72,23 +56,5 @@ func (i *Invoices) Validate() error {
|
||||
i.HoldExpiryDelta, DefaultIncomingBroadcastDelta)
|
||||
}
|
||||
|
||||
if i.BlindedPaths.MinNumRealHops > i.BlindedPaths.NumHops {
|
||||
return fmt.Errorf("the minimum number of real hops in a " +
|
||||
"blinded path must be smaller than or equal to the " +
|
||||
"number of hops expected to be included in each path")
|
||||
}
|
||||
|
||||
if i.BlindedPaths.PolicyIncreaseMultiplier < 1 {
|
||||
return fmt.Errorf("the blinded route policy increase " +
|
||||
"multiplier must be greater than or equal to 1")
|
||||
}
|
||||
|
||||
if i.BlindedPaths.PolicyDecreaseMultiplier > 1 ||
|
||||
i.BlindedPaths.PolicyDecreaseMultiplier < 0 {
|
||||
|
||||
return fmt.Errorf("the blinded route policy decrease " +
|
||||
"multiplier must be in the range (0,1]")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1,5 +1,7 @@
|
||||
package lncfg
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Routing holds the configuration options for routing.
|
||||
//
|
||||
//nolint:lll
|
||||
@ -7,4 +9,42 @@ type Routing struct {
|
||||
AssumeChannelValid bool `long:"assumechanvalid" description:"DEPRECATED: Skip checking channel spentness during graph validation. This speedup comes at the risk of using an unvalidated view of the network for routing. (default: false)" hidden:"true"`
|
||||
|
||||
StrictZombiePruning bool `long:"strictgraphpruning" description:"If true, then the graph will be pruned more aggressively for zombies. In practice this means that edges with a single stale edge will be considered a zombie."`
|
||||
|
||||
BlindedPaths BlindedPaths `group:"blinding" namespace:"blinding"`
|
||||
}
|
||||
|
||||
// BlindedPaths holds the configuration options for blinded path construction.
|
||||
//
|
||||
//nolint:lll
|
||||
type BlindedPaths struct {
|
||||
MinNumRealHops uint8 `long:"min-num-real-hops" description:"The minimum number of real hops to include in a blinded path. This doesn't include our node, so if the minimum is 1, then the path will contain at minimum our node along with an introduction node hop. If it is zero then the shortest path will use our node as an introduction node."`
|
||||
NumHops uint8 `long:"num-hops" description:"The number of hops to include in a blinded path. This doesn't include our node, so if it is 1, then the path will contain our node along with an introduction node or dummy node hop. If paths shorter than NumHops is found, then they will be padded using dummy hops."`
|
||||
MaxNumPaths uint8 `long:"max-num-paths" description:"The maximum number of blinded paths to select and add to an invoice."`
|
||||
PolicyIncreaseMultiplier float64 `long:"policy-increase-multiplier" description:"The amount by which to increase certain policy values of hops on a blinded path in order to add a probing buffer."`
|
||||
PolicyDecreaseMultiplier float64 `long:"policy-decrease-multiplier" description:"The amount by which to decrease certain policy values of hops on a blinded path in order to add a probing buffer."`
|
||||
}
|
||||
|
||||
// Validate checks that the various routing config options are sane.
|
||||
//
|
||||
// NOTE: this is part of the Validator interface.
|
||||
func (r *Routing) Validate() error {
|
||||
if r.BlindedPaths.MinNumRealHops > r.BlindedPaths.NumHops {
|
||||
return fmt.Errorf("the minimum number of real hops in a " +
|
||||
"blinded path must be smaller than or equal to the " +
|
||||
"number of hops expected to be included in each path")
|
||||
}
|
||||
|
||||
if r.BlindedPaths.PolicyIncreaseMultiplier < 1 {
|
||||
return fmt.Errorf("the blinded route policy increase " +
|
||||
"multiplier must be greater than or equal to 1")
|
||||
}
|
||||
|
||||
if r.BlindedPaths.PolicyDecreaseMultiplier > 1 ||
|
||||
r.BlindedPaths.PolicyDecreaseMultiplier < 0 {
|
||||
|
||||
return fmt.Errorf("the blinded route policy decrease " +
|
||||
"multiplier must be in the range (0,1]")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
mathRand "math/rand"
|
||||
"slices"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@ -18,7 +17,6 @@ import (
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
sphinx "github.com/lightningnetwork/lightning-onion"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/channeldb/models"
|
||||
"github.com/lightningnetwork/lnd/invoices"
|
||||
@ -26,10 +24,9 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnutils"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/netann"
|
||||
"github.com/lightningnetwork/lnd/record"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/routing/blindedpath"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/tlv"
|
||||
"github.com/lightningnetwork/lnd/zpay32"
|
||||
)
|
||||
|
||||
@ -50,16 +47,8 @@ const (
|
||||
// maxHopHints is the maximum number of hint paths that will be included
|
||||
// in an invoice.
|
||||
maxHopHints = 20
|
||||
|
||||
// oneMillion is a constant used frequently in fee rate calculations.
|
||||
oneMillion = uint32(1_000_000)
|
||||
)
|
||||
|
||||
// errInvalidBlindedPath indicates that the chosen real path is not usable as
|
||||
// a blinded path.
|
||||
var errInvalidBlindedPath = errors.New("the chosen path results in an " +
|
||||
"unusable blinded path")
|
||||
|
||||
// AddInvoiceConfig contains dependencies for invoice creation.
|
||||
type AddInvoiceConfig struct {
|
||||
// AddInvoice is called to add the invoice to the registry.
|
||||
@ -515,32 +504,36 @@ func AddInvoice(ctx context.Context, cfg *AddInvoiceConfig,
|
||||
finalCLTVDelta += uint32(routing.BlockPadding)
|
||||
|
||||
//nolint:lll
|
||||
paths, err := buildBlindedPaymentPaths(&buildBlindedPathCfg{
|
||||
findRoutes: cfg.QueryBlindedRoutes,
|
||||
fetchChannelEdgesByID: cfg.Graph.FetchChannelEdgesByID,
|
||||
pathID: paymentAddr[:],
|
||||
valueMsat: invoice.Value,
|
||||
bestHeight: cfg.BestHeight,
|
||||
minFinalCLTVExpiryDelta: finalCLTVDelta,
|
||||
blocksUntilExpiry: blindedPathExpiry,
|
||||
addPolicyBuffer: func(p *blindedHopPolicy) (
|
||||
*blindedHopPolicy, error) {
|
||||
paths, err := blindedpath.BuildBlindedPaymentPaths(
|
||||
&blindedpath.BuildBlindedPathCfg{
|
||||
FindRoutes: cfg.QueryBlindedRoutes,
|
||||
FetchChannelEdgesByID: cfg.Graph.FetchChannelEdgesByID,
|
||||
PathID: paymentAddr[:],
|
||||
ValueMsat: invoice.Value,
|
||||
BestHeight: cfg.BestHeight,
|
||||
MinFinalCLTVExpiryDelta: finalCLTVDelta,
|
||||
BlocksUntilExpiry: blindedPathExpiry,
|
||||
AddPolicyBuffer: func(
|
||||
p *blindedpath.BlindedHopPolicy) (
|
||||
*blindedpath.BlindedHopPolicy, error) {
|
||||
|
||||
return addPolicyBuffer(
|
||||
p, cfg.BlindedRoutePolicyIncrMultiplier,
|
||||
cfg.BlindedRoutePolicyDecrMultiplier,
|
||||
)
|
||||
//nolint:lll
|
||||
return blindedpath.AddPolicyBuffer(
|
||||
p, cfg.BlindedRoutePolicyIncrMultiplier,
|
||||
cfg.BlindedRoutePolicyDecrMultiplier,
|
||||
)
|
||||
},
|
||||
MinNumHops: cfg.MinNumHops,
|
||||
// TODO: make configurable
|
||||
DummyHopPolicy: &blindedpath.BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 80,
|
||||
FeeRate: 100,
|
||||
BaseFee: 100,
|
||||
MinHTLCMsat: 0,
|
||||
MaxHTLCMsat: lnwire.MaxMilliSatoshi,
|
||||
},
|
||||
},
|
||||
minNumHops: cfg.MinNumHops,
|
||||
// TODO: make configurable
|
||||
dummyHopPolicy: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 80,
|
||||
feeRate: 100,
|
||||
baseFee: 100,
|
||||
minHTLCMsat: 0,
|
||||
maxHTLCMsat: lnwire.MaxMilliSatoshi,
|
||||
},
|
||||
})
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -972,813 +965,3 @@ func PopulateHopHints(cfg *SelectHopHintsCfg, amtMSat lnwire.MilliSatoshi,
|
||||
hopHints = append(hopHints, selectedHints...)
|
||||
return hopHints, nil
|
||||
}
|
||||
|
||||
// buildBlindedPathCfg defines the various resources and configuration values
|
||||
// required to build a blinded payment path to this node.
|
||||
type buildBlindedPathCfg struct {
|
||||
// findRoutes returns a set of routes to us that can be used for the
|
||||
// construction of blinded paths. These routes will consist of real
|
||||
// nodes advertising the route blinding feature bit. They may be of
|
||||
// various lengths and may even contain only a single hop. Any route
|
||||
// shorter than minNumHops will be padded with dummy hops during route
|
||||
// construction.
|
||||
findRoutes func(value lnwire.MilliSatoshi) ([]*route.Route, error)
|
||||
|
||||
// fetchChannelEdgesByID attempts to look up the two directed edges for
|
||||
// the channel identified by the channel ID.
|
||||
fetchChannelEdgesByID func(chanID uint64) (*models.ChannelEdgeInfo,
|
||||
*models.ChannelEdgePolicy, *models.ChannelEdgePolicy, error)
|
||||
|
||||
// bestHeight can be used to fetch the best block height that this node
|
||||
// is aware of.
|
||||
bestHeight func() (uint32, error)
|
||||
|
||||
// addPolicyBuffer is a function that can be used to alter the policy
|
||||
// values of the given channel edge. The main reason for doing this is
|
||||
// to add a safety buffer so that if the node makes small policy changes
|
||||
// during the lifetime of the blinded path, then the path remains valid
|
||||
// and so probing is more difficult. Note that this will only be called
|
||||
// for the policies of real nodes and won't be applied to
|
||||
// dummyHopPolicy.
|
||||
addPolicyBuffer func(policy *blindedHopPolicy) (*blindedHopPolicy,
|
||||
error)
|
||||
|
||||
// pathID is the secret data to embed in the blinded path data that we
|
||||
// will receive back as the recipient. This is the equivalent of the
|
||||
// payment address used in normal payments. It lets the recipient check
|
||||
// that the path is being used in the correct context.
|
||||
pathID []byte
|
||||
|
||||
// valueMsat is the payment amount in milli-satoshis that must be
|
||||
// routed. This will be used for selecting appropriate routes to use for
|
||||
// the blinded path.
|
||||
valueMsat lnwire.MilliSatoshi
|
||||
|
||||
// minFinalCLTVExpiryDelta is the minimum CLTV delta that the recipient
|
||||
// requires for the final hop of the payment.
|
||||
//
|
||||
// NOTE that the caller is responsible for adding additional block
|
||||
// padding to this value to account for blocks being mined while the
|
||||
// payment is in-flight.
|
||||
minFinalCLTVExpiryDelta uint32
|
||||
|
||||
// blocksUntilExpiry is the number of blocks that this blinded path
|
||||
// should remain valid for.
|
||||
blocksUntilExpiry uint32
|
||||
|
||||
// minNumHops is the minimum number of hops that each blinded path
|
||||
// should be. If the number of hops in a path returned by findRoutes is
|
||||
// less than this number, then dummy hops will be post-fixed to the
|
||||
// route.
|
||||
minNumHops uint8
|
||||
|
||||
// dummyHopPolicy holds the policy values that should be used for dummy
|
||||
// hops. Note that these will _not_ be buffered via addPolicyBuffer.
|
||||
dummyHopPolicy *blindedHopPolicy
|
||||
}
|
||||
|
||||
// buildBlindedPaymentPaths uses the passed config to construct a set of blinded
|
||||
// payment paths that can be added to the invoice.
|
||||
func buildBlindedPaymentPaths(cfg *buildBlindedPathCfg) (
|
||||
[]*zpay32.BlindedPaymentPath, error) {
|
||||
|
||||
if cfg.minFinalCLTVExpiryDelta >= cfg.blocksUntilExpiry {
|
||||
return nil, fmt.Errorf("blinded path CLTV expiry delta (%d) "+
|
||||
"must be greater than the minimum final CLTV expiry "+
|
||||
"delta (%d)", cfg.blocksUntilExpiry,
|
||||
cfg.minFinalCLTVExpiryDelta)
|
||||
}
|
||||
|
||||
// Find some appropriate routes for the value to be routed. This will
|
||||
// return a set of routes made up of real nodes.
|
||||
routes, err := cfg.findRoutes(cfg.valueMsat)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(routes) == 0 {
|
||||
return nil, fmt.Errorf("could not find any routes to self to " +
|
||||
"use for blinded route construction")
|
||||
}
|
||||
|
||||
// Not every route returned will necessarily result in a usable blinded
|
||||
// path and so the number of paths returned might be less than the
|
||||
// number of real routes returned by findRoutes above.
|
||||
paths := make([]*zpay32.BlindedPaymentPath, 0, len(routes))
|
||||
|
||||
// For each route returned, we will construct the associated blinded
|
||||
// payment path.
|
||||
for _, route := range routes {
|
||||
path, err := buildBlindedPaymentPath(
|
||||
cfg, extractCandidatePath(route),
|
||||
)
|
||||
if errors.Is(err, errInvalidBlindedPath) {
|
||||
log.Debugf("Not using route (%s) as a blinded path "+
|
||||
"since it resulted in an invalid blinded path",
|
||||
route)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
paths = append(paths, path)
|
||||
}
|
||||
|
||||
if len(paths) == 0 {
|
||||
return nil, fmt.Errorf("could not build any blinded paths")
|
||||
}
|
||||
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
// buildBlindedPaymentPath takes a route from an introduction node to this node
|
||||
// and uses the given config to convert it into a blinded payment path.
|
||||
func buildBlindedPaymentPath(cfg *buildBlindedPathCfg, path *candidatePath) (
|
||||
*zpay32.BlindedPaymentPath, error) {
|
||||
|
||||
// Pad the given route with dummy hops until the minimum number of hops
|
||||
// is met.
|
||||
err := path.padWithDummyHops(cfg.minNumHops)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hops, minHTLC, maxHTLC, err := collectRelayInfo(cfg, path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not collect blinded path relay "+
|
||||
"info: %w", err)
|
||||
}
|
||||
|
||||
relayInfo := make([]*record.PaymentRelayInfo, len(hops))
|
||||
for i, hop := range hops {
|
||||
relayInfo[i] = hop.relayInfo
|
||||
}
|
||||
|
||||
// Using the collected relay info, we can calculate the aggregated
|
||||
// policy values for the route.
|
||||
baseFee, feeRate, cltvDelta := calcBlindedPathPolicies(
|
||||
relayInfo, uint16(cfg.minFinalCLTVExpiryDelta),
|
||||
)
|
||||
|
||||
currentHeight, err := cfg.bestHeight()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The next step is to calculate the payment constraints to communicate
|
||||
// to each hop and to package up the hop info for each hop. We will
|
||||
// handle the final hop first since its payload looks a bit different,
|
||||
// and then we will iterate backwards through the remaining hops.
|
||||
//
|
||||
// Note that the +1 here is required because the route won't have the
|
||||
// introduction node included in the "Hops". But since we want to create
|
||||
// payloads for all the hops as well as the introduction node, we add 1
|
||||
// here to get the full hop length along with the introduction node.
|
||||
hopDataSet := make([]*hopData, 0, len(path.hops)+1)
|
||||
|
||||
// Determine the maximum CLTV expiry for the destination node.
|
||||
cltvExpiry := currentHeight + cfg.blocksUntilExpiry +
|
||||
cfg.minFinalCLTVExpiryDelta
|
||||
|
||||
constraints := &record.PaymentConstraints{
|
||||
MaxCltvExpiry: cltvExpiry,
|
||||
HtlcMinimumMsat: minHTLC,
|
||||
}
|
||||
|
||||
// If the blinded route has only a source node (introduction node) and
|
||||
// no hops, then the destination node is also the source node.
|
||||
finalHopPubKey := path.introNode
|
||||
if len(path.hops) > 0 {
|
||||
finalHopPubKey = path.hops[len(path.hops)-1].pubKey
|
||||
}
|
||||
|
||||
// For the final hop, we only send it the path ID and payment
|
||||
// constraints.
|
||||
info, err := buildFinalHopRouteData(
|
||||
finalHopPubKey, cfg.pathID, constraints,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hopDataSet = append(hopDataSet, info)
|
||||
|
||||
// Iterate through the remaining (non-final) hops, back to front.
|
||||
for i := len(hops) - 1; i >= 0; i-- {
|
||||
hop := hops[i]
|
||||
|
||||
cltvExpiry += uint32(hop.relayInfo.CltvExpiryDelta)
|
||||
|
||||
constraints = &record.PaymentConstraints{
|
||||
MaxCltvExpiry: cltvExpiry,
|
||||
HtlcMinimumMsat: minHTLC,
|
||||
}
|
||||
|
||||
var info *hopData
|
||||
if hop.nextHopIsDummy {
|
||||
info, err = buildDummyRouteData(
|
||||
hop.hopPubKey, hop.relayInfo, constraints,
|
||||
)
|
||||
} else {
|
||||
info, err = buildHopRouteData(
|
||||
hop.hopPubKey, hop.nextSCID, hop.relayInfo,
|
||||
constraints,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hopDataSet = append(hopDataSet, info)
|
||||
}
|
||||
|
||||
// Sort the hop info list in reverse order so that the data for the
|
||||
// introduction node is first.
|
||||
slices.Reverse(hopDataSet)
|
||||
|
||||
// Add padding to each route data instance until the encrypted data
|
||||
// blobs are all the same size.
|
||||
paymentPath, _, err := padHopInfo(hopDataSet, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Derive an ephemeral session key.
|
||||
sessionKey, err := btcec.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Encrypt the hop info.
|
||||
blindedPath, err := sphinx.BuildBlindedPath(sessionKey, paymentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(blindedPath.BlindedHops) < 1 {
|
||||
return nil, fmt.Errorf("blinded path must have at least one " +
|
||||
"hop")
|
||||
}
|
||||
|
||||
// Overwrite the introduction point's blinded pub key with the real
|
||||
// pub key since then we can use this more compact format in the
|
||||
// invoice without needing to encode the un-used blinded node pub key of
|
||||
// the intro node.
|
||||
blindedPath.BlindedHops[0].BlindedNodePub =
|
||||
blindedPath.IntroductionPoint
|
||||
|
||||
// Now construct a z32 blinded path.
|
||||
return &zpay32.BlindedPaymentPath{
|
||||
FeeBaseMsat: uint32(baseFee),
|
||||
FeeRate: feeRate,
|
||||
CltvExpiryDelta: cltvDelta,
|
||||
HTLCMinMsat: uint64(minHTLC),
|
||||
HTLCMaxMsat: uint64(maxHTLC),
|
||||
Features: lnwire.EmptyFeatureVector(),
|
||||
FirstEphemeralBlindingPoint: blindedPath.BlindingPoint,
|
||||
Hops: blindedPath.BlindedHops,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// hopRelayInfo packages together the relay info to send to hop on a blinded
|
||||
// path along with the pub key of that hop and the SCID that the hop should
|
||||
// forward the payment on to.
|
||||
type hopRelayInfo struct {
|
||||
hopPubKey route.Vertex
|
||||
nextSCID lnwire.ShortChannelID
|
||||
relayInfo *record.PaymentRelayInfo
|
||||
nextHopIsDummy bool
|
||||
}
|
||||
|
||||
// collectRelayInfo collects the relay policy rules for each relay hop on the
|
||||
// route and applies any policy buffers.
|
||||
//
|
||||
// For the blinded route:
|
||||
//
|
||||
// C --chan(CB)--> B --chan(BA)--> A
|
||||
//
|
||||
// where C is the introduction node, the route.Route struct we are given will
|
||||
// have SourcePubKey set to C's pub key, and then it will have the following
|
||||
// route.Hops:
|
||||
//
|
||||
// - PubKeyBytes: B, ChannelID: chan(CB)
|
||||
// - PubKeyBytes: A, ChannelID: chan(BA)
|
||||
//
|
||||
// We, however, want to collect the channel policies for the following PubKey
|
||||
// and ChannelID pairs:
|
||||
//
|
||||
// - PubKey: C, ChannelID: chan(CB)
|
||||
// - PubKey: B, ChannelID: chan(BA)
|
||||
//
|
||||
// Therefore, when we go through the route and its hops to collect policies, our
|
||||
// index for collecting public keys will be trailing that of the channel IDs by
|
||||
// 1.
|
||||
func collectRelayInfo(cfg *buildBlindedPathCfg, path *candidatePath) (
|
||||
[]*hopRelayInfo, lnwire.MilliSatoshi, lnwire.MilliSatoshi, error) {
|
||||
|
||||
var (
|
||||
hops = make([]*hopRelayInfo, 0, len(path.hops))
|
||||
minHTLC lnwire.MilliSatoshi
|
||||
maxHTLC lnwire.MilliSatoshi
|
||||
)
|
||||
|
||||
var (
|
||||
// The first pub key is that of the introduction node.
|
||||
hopSource = path.introNode
|
||||
)
|
||||
for _, hop := range path.hops {
|
||||
var (
|
||||
// For dummy hops, we use pre-configured policy values.
|
||||
policy = cfg.dummyHopPolicy
|
||||
err error
|
||||
)
|
||||
if !hop.isDummy {
|
||||
// For real hops, retrieve the channel policy for this
|
||||
// hop's channel ID in the direction pointing away from
|
||||
// the hopSource node.
|
||||
policy, err = getNodeChannelPolicy(
|
||||
cfg, hop.channelID, hopSource,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
|
||||
// Apply any policy changes now before caching the
|
||||
// policy.
|
||||
policy, err = cfg.addPolicyBuffer(policy)
|
||||
if err != nil {
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// If this is the first policy we are collecting, then use this
|
||||
// policy to set the base values for min/max htlc.
|
||||
if len(hops) == 0 {
|
||||
minHTLC = policy.minHTLCMsat
|
||||
maxHTLC = policy.maxHTLCMsat
|
||||
} else {
|
||||
if policy.minHTLCMsat > minHTLC {
|
||||
minHTLC = policy.minHTLCMsat
|
||||
}
|
||||
|
||||
if policy.maxHTLCMsat < maxHTLC {
|
||||
maxHTLC = policy.maxHTLCMsat
|
||||
}
|
||||
}
|
||||
|
||||
// From the policy values for this hop, we can collect the
|
||||
// payment relay info that we will send to this hop.
|
||||
hops = append(hops, &hopRelayInfo{
|
||||
hopPubKey: hopSource,
|
||||
nextSCID: lnwire.NewShortChanIDFromInt(hop.channelID),
|
||||
relayInfo: &record.PaymentRelayInfo{
|
||||
FeeRate: policy.feeRate,
|
||||
BaseFee: policy.baseFee,
|
||||
CltvExpiryDelta: policy.cltvExpiryDelta,
|
||||
},
|
||||
nextHopIsDummy: hop.isDummy,
|
||||
})
|
||||
|
||||
// This hop's pub key will be the policy creator for the next
|
||||
// hop.
|
||||
hopSource = hop.pubKey
|
||||
}
|
||||
|
||||
// It can happen that there is no HTLC-range overlap between the various
|
||||
// hops along the path. We return errInvalidBlindedPath to indicate that
|
||||
// this route was not usable
|
||||
if minHTLC > maxHTLC {
|
||||
return nil, 0, 0, fmt.Errorf("%w: resulting blinded path min "+
|
||||
"HTLC value is larger than the resulting max HTLC "+
|
||||
"value", errInvalidBlindedPath)
|
||||
}
|
||||
|
||||
return hops, minHTLC, maxHTLC, nil
|
||||
}
|
||||
|
||||
// buildDummyRouteData constructs the record.BlindedRouteData struct for the
|
||||
// given a hop in a blinded route where the following hop is a dummy hop.
|
||||
func buildDummyRouteData(node route.Vertex, relayInfo *record.PaymentRelayInfo,
|
||||
constraints *record.PaymentConstraints) (*hopData, error) {
|
||||
|
||||
nodeID, err := btcec.ParsePubKey(node[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &hopData{
|
||||
data: record.NewDummyHopRouteData(
|
||||
nodeID, *relayInfo, *constraints,
|
||||
),
|
||||
nodeID: nodeID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// buildHopRouteData constructs the record.BlindedRouteData struct for the given
|
||||
// non-final hop on a blinded path and packages it with the node's ID.
|
||||
func buildHopRouteData(node route.Vertex, scid lnwire.ShortChannelID,
|
||||
relayInfo *record.PaymentRelayInfo,
|
||||
constraints *record.PaymentConstraints) (*hopData, error) {
|
||||
|
||||
// Wrap up the data we want to send to this hop.
|
||||
blindedRouteHopData := record.NewNonFinalBlindedRouteData(
|
||||
scid, nil, *relayInfo, constraints, nil,
|
||||
)
|
||||
|
||||
nodeID, err := btcec.ParsePubKey(node[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &hopData{
|
||||
data: blindedRouteHopData,
|
||||
nodeID: nodeID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// buildFinalHopRouteData constructs the record.BlindedRouteData struct for the
|
||||
// final hop and packages it with the real node ID of the node it is intended
|
||||
// for.
|
||||
func buildFinalHopRouteData(node route.Vertex, pathID []byte,
|
||||
constraints *record.PaymentConstraints) (*hopData, error) {
|
||||
|
||||
blindedRouteHopData := record.NewFinalHopBlindedRouteData(
|
||||
constraints, pathID,
|
||||
)
|
||||
nodeID, err := btcec.ParsePubKey(node[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &hopData{
|
||||
data: blindedRouteHopData,
|
||||
nodeID: nodeID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getNodeChanPolicy fetches the routing policy info for the given channel and
|
||||
// node pair.
|
||||
func getNodeChannelPolicy(cfg *buildBlindedPathCfg, chanID uint64,
|
||||
nodeID route.Vertex) (*blindedHopPolicy, error) {
|
||||
|
||||
// Attempt to fetch channel updates for the given channel. We will have
|
||||
// at most two updates for a given channel.
|
||||
_, update1, update2, err := cfg.fetchChannelEdgesByID(chanID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now we need to determine which of the updates was created by the
|
||||
// node in question. We know the update is the correct one if the
|
||||
// "ToNode" for the fetched policy is _not_ equal to the node ID in
|
||||
// question.
|
||||
var policy *models.ChannelEdgePolicy
|
||||
switch {
|
||||
case update1 != nil && !bytes.Equal(update1.ToNode[:], nodeID[:]):
|
||||
policy = update1
|
||||
|
||||
case update2 != nil && !bytes.Equal(update2.ToNode[:], nodeID[:]):
|
||||
policy = update2
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("no channel updates found from node "+
|
||||
"%s for channel %d", nodeID, chanID)
|
||||
}
|
||||
|
||||
return &blindedHopPolicy{
|
||||
cltvExpiryDelta: policy.TimeLockDelta,
|
||||
feeRate: uint32(policy.FeeProportionalMillionths),
|
||||
baseFee: policy.FeeBaseMSat,
|
||||
minHTLCMsat: policy.MinHTLC,
|
||||
maxHTLCMsat: policy.MaxHTLC,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// candidatePath holds all the information about a route to this node that we
|
||||
// need in order to build a blinded route.
|
||||
type candidatePath struct {
|
||||
introNode route.Vertex
|
||||
finalNodeID route.Vertex
|
||||
hops []*blindedPathHop
|
||||
}
|
||||
|
||||
// padWithDummyHops will append n dummy hops to the candidatePath hop set. The
|
||||
// pub key for the dummy hop will be the same as the pub key for the final hop
|
||||
// of the path. That way, the final hop will be able to decrypt the data
|
||||
// encrypted for each dummy hop.
|
||||
func (c *candidatePath) padWithDummyHops(n uint8) error {
|
||||
for len(c.hops) < int(n) {
|
||||
c.hops = append(c.hops, &blindedPathHop{
|
||||
pubKey: c.finalNodeID,
|
||||
isDummy: true,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// blindedPathHop holds the information we need to know about a hop in a route
|
||||
// in order to use it in the construction of a blinded path.
|
||||
type blindedPathHop struct {
|
||||
// pubKey is the real pub key of a node on a blinded path.
|
||||
pubKey route.Vertex
|
||||
|
||||
// channelID is the channel along which the previous hop should forward
|
||||
// their HTLC in order to reach this hop.
|
||||
channelID uint64
|
||||
|
||||
// isDummy is true if this hop is an appended dummy hop.
|
||||
isDummy bool
|
||||
}
|
||||
|
||||
// extractCandidatePath extracts the data it needs from the given route.Route in
|
||||
// order to construct a candidatePath.
|
||||
func extractCandidatePath(path *route.Route) *candidatePath {
|
||||
var (
|
||||
hops = make([]*blindedPathHop, len(path.Hops))
|
||||
finalNode = path.SourcePubKey
|
||||
)
|
||||
for i, hop := range path.Hops {
|
||||
hops[i] = &blindedPathHop{
|
||||
pubKey: hop.PubKeyBytes,
|
||||
channelID: hop.ChannelID,
|
||||
}
|
||||
|
||||
if i == len(path.Hops)-1 {
|
||||
finalNode = hop.PubKeyBytes
|
||||
}
|
||||
}
|
||||
|
||||
return &candidatePath{
|
||||
introNode: path.SourcePubKey,
|
||||
finalNodeID: finalNode,
|
||||
hops: hops,
|
||||
}
|
||||
}
|
||||
|
||||
// blindedHopPolicy holds the set of relay policy values to use for a channel
|
||||
// in a blinded path.
|
||||
type blindedHopPolicy struct {
|
||||
cltvExpiryDelta uint16
|
||||
feeRate uint32
|
||||
baseFee lnwire.MilliSatoshi
|
||||
minHTLCMsat lnwire.MilliSatoshi
|
||||
maxHTLCMsat lnwire.MilliSatoshi
|
||||
}
|
||||
|
||||
// addPolicyBuffer constructs the bufferedChanPolicies for a path hop by taking
|
||||
// its actual policy values and multiplying them by the given multipliers.
|
||||
// The base fee, fee rate and minimum HTLC msat values are adjusted via the
|
||||
// incMultiplier while the maximum HTLC msat value is adjusted via the
|
||||
// decMultiplier. If adjustments of the HTLC values no longer make sense
|
||||
// then the original HTLC value is used.
|
||||
func addPolicyBuffer(policy *blindedHopPolicy, incMultiplier,
|
||||
decMultiplier float64) (*blindedHopPolicy, error) {
|
||||
|
||||
if incMultiplier < 1 {
|
||||
return nil, fmt.Errorf("blinded path policy increase " +
|
||||
"multiplier must be greater than or equal to 1")
|
||||
}
|
||||
|
||||
if decMultiplier < 0 || decMultiplier > 1 {
|
||||
return nil, fmt.Errorf("blinded path policy decrease " +
|
||||
"multiplier must be in the range [0;1]")
|
||||
}
|
||||
|
||||
var (
|
||||
minHTLCMsat = lnwire.MilliSatoshi(
|
||||
float64(policy.minHTLCMsat) * incMultiplier,
|
||||
)
|
||||
maxHTLCMsat = lnwire.MilliSatoshi(
|
||||
float64(policy.maxHTLCMsat) * decMultiplier,
|
||||
)
|
||||
)
|
||||
|
||||
// Make sure the new minimum is not more than the original maximum.
|
||||
// If it is, then just stick to the original minimum.
|
||||
if minHTLCMsat > policy.maxHTLCMsat {
|
||||
minHTLCMsat = policy.minHTLCMsat
|
||||
}
|
||||
|
||||
// Make sure the new maximum is not less than the original minimum.
|
||||
// If it is, then just stick to the original maximum.
|
||||
if maxHTLCMsat < policy.minHTLCMsat {
|
||||
maxHTLCMsat = policy.maxHTLCMsat
|
||||
}
|
||||
|
||||
// Also ensure that the new htlc bounds make sense. If the new minimum
|
||||
// is greater than the new maximum, then just let both to their original
|
||||
// values.
|
||||
if minHTLCMsat > maxHTLCMsat {
|
||||
minHTLCMsat = policy.minHTLCMsat
|
||||
maxHTLCMsat = policy.maxHTLCMsat
|
||||
}
|
||||
|
||||
return &blindedHopPolicy{
|
||||
cltvExpiryDelta: uint16(
|
||||
float64(policy.cltvExpiryDelta) * incMultiplier,
|
||||
),
|
||||
feeRate: uint32(float64(policy.feeRate) * incMultiplier),
|
||||
baseFee: lnwire.MilliSatoshi(
|
||||
float64(policy.baseFee) * incMultiplier,
|
||||
),
|
||||
minHTLCMsat: minHTLCMsat,
|
||||
maxHTLCMsat: maxHTLCMsat,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// calcBlindedPathPolicies computes the accumulated policy values for the path.
|
||||
// These values include the total base fee, the total proportional fee and the
|
||||
// total CLTV delta. This function assumes that all the passed relay infos have
|
||||
// already been adjusted with a buffer to account for easy probing attacks.
|
||||
func calcBlindedPathPolicies(relayInfo []*record.PaymentRelayInfo,
|
||||
ourMinFinalCLTVDelta uint16) (lnwire.MilliSatoshi, uint32, uint16) {
|
||||
|
||||
var (
|
||||
totalFeeBase lnwire.MilliSatoshi
|
||||
totalFeeProp uint32
|
||||
totalCLTV = ourMinFinalCLTVDelta
|
||||
)
|
||||
// Use the algorithms defined in BOLT 4 to calculate the accumulated
|
||||
// relay fees for the route:
|
||||
//nolint:lll
|
||||
// https://github.com/lightning/bolts/blob/db278ab9b2baa0b30cfe79fb3de39280595938d3/04-onion-routing.md?plain=1#L255
|
||||
for i := len(relayInfo) - 1; i >= 0; i-- {
|
||||
info := relayInfo[i]
|
||||
|
||||
totalFeeBase = calcNextTotalBaseFee(
|
||||
totalFeeBase, info.BaseFee, info.FeeRate,
|
||||
)
|
||||
|
||||
totalFeeProp = calcNextTotalFeeRate(totalFeeProp, info.FeeRate)
|
||||
|
||||
totalCLTV += info.CltvExpiryDelta
|
||||
}
|
||||
|
||||
return totalFeeBase, totalFeeProp, totalCLTV
|
||||
}
|
||||
|
||||
// calcNextTotalBaseFee takes the current total accumulated base fee of a
|
||||
// blinded path at hop `n` along with the fee rate and base fee of the hop at
|
||||
// `n-1` and uses these to calculate the accumulated base fee at hop `n-1`.
|
||||
func calcNextTotalBaseFee(currentTotal, hopBaseFee lnwire.MilliSatoshi,
|
||||
hopFeeRate uint32) lnwire.MilliSatoshi {
|
||||
|
||||
numerator := (uint32(hopBaseFee) * oneMillion) +
|
||||
(uint32(currentTotal) * (oneMillion + hopFeeRate)) +
|
||||
oneMillion - 1
|
||||
|
||||
return lnwire.MilliSatoshi(numerator / oneMillion)
|
||||
}
|
||||
|
||||
// calculateNextTotalFeeRate takes the current total accumulated fee rate of a
|
||||
// blinded path at hop `n` along with the fee rate of the hop at `n-1` and uses
|
||||
// these to calculate the accumulated fee rate at hop `n-1`.
|
||||
func calcNextTotalFeeRate(currentTotal, hopFeeRate uint32) uint32 {
|
||||
numerator := (currentTotal+hopFeeRate)*oneMillion +
|
||||
currentTotal*hopFeeRate + oneMillion - 1
|
||||
|
||||
return numerator / oneMillion
|
||||
}
|
||||
|
||||
// hopData packages the record.BlindedRouteData for a hop on a blinded path with
|
||||
// the real node ID of that hop.
|
||||
type hopData struct {
|
||||
data *record.BlindedRouteData
|
||||
nodeID *btcec.PublicKey
|
||||
}
|
||||
|
||||
// padStats can be used to keep track of various pieces of data that we collect
|
||||
// during a call to padHopInfo. This is useful for logging and for test
|
||||
// assertions.
|
||||
type padStats struct {
|
||||
minPayloadSize int
|
||||
maxPayloadSize int
|
||||
finalPaddedSize int
|
||||
numIterations int
|
||||
}
|
||||
|
||||
// padHopInfo iterates over a set of record.BlindedRouteData and adds padding
|
||||
// where needed until the resulting encrypted data blobs are all the same size.
|
||||
// This may take a few iterations due to the fact that a TLV field is used to
|
||||
// add this padding. For example, if we want to add a 1 byte padding to a
|
||||
// record.BlindedRouteData when it does not yet have any padding, then adding
|
||||
// a 1 byte padding will actually add 3 bytes due to the bytes required when
|
||||
// adding the initial type and length bytes. However, on the next iteration if
|
||||
// we again add just 1 byte, then only a single byte will be added. The same
|
||||
// iteration is required for padding values on the BigSize encoding bucket
|
||||
// edges. The number of iterations that this function takes is also returned for
|
||||
// testing purposes. If prePad is true, then zero byte padding is added to each
|
||||
// payload that does not yet have padding. This will save some iterations for
|
||||
// the majority of cases.
|
||||
func padHopInfo(hopInfo []*hopData, prePad bool) ([]*sphinx.HopInfo, *padStats,
|
||||
error) {
|
||||
|
||||
var (
|
||||
paymentPath = make([]*sphinx.HopInfo, len(hopInfo))
|
||||
stats padStats
|
||||
)
|
||||
|
||||
// Pre-pad each payload with zero byte padding (if it does not yet have
|
||||
// padding) to save a couple of iterations in the majority of cases.
|
||||
if prePad {
|
||||
for _, info := range hopInfo {
|
||||
if info.data.Padding.IsSome() {
|
||||
continue
|
||||
}
|
||||
|
||||
info.data.PadBy(0)
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
stats.numIterations++
|
||||
|
||||
// On each iteration of the loop, we first determine the
|
||||
// current largest encoded data blob size. This will be the
|
||||
// size we aim to get the others to match.
|
||||
var (
|
||||
maxLen int
|
||||
minLen = math.MaxInt8
|
||||
)
|
||||
for i, hop := range hopInfo {
|
||||
plainText, err := record.EncodeBlindedRouteData(
|
||||
hop.data,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(plainText) > maxLen {
|
||||
maxLen = len(plainText)
|
||||
|
||||
// Update the stats to take note of this new
|
||||
// max since this may be the final max that all
|
||||
// payloads will be padded to.
|
||||
stats.finalPaddedSize = maxLen
|
||||
}
|
||||
if len(plainText) < minLen {
|
||||
minLen = len(plainText)
|
||||
}
|
||||
|
||||
paymentPath[i] = &sphinx.HopInfo{
|
||||
NodePub: hop.nodeID,
|
||||
PlainText: plainText,
|
||||
}
|
||||
}
|
||||
|
||||
// If this is our first iteration, then we take note of the min
|
||||
// and max lengths of the payloads pre-padding for logging
|
||||
// later.
|
||||
if stats.numIterations == 1 {
|
||||
stats.minPayloadSize = minLen
|
||||
stats.maxPayloadSize = maxLen
|
||||
}
|
||||
|
||||
// Now we iterate over them again and determine which ones we
|
||||
// need to add padding to.
|
||||
var numEqual int
|
||||
for i, hop := range hopInfo {
|
||||
plainText := paymentPath[i].PlainText
|
||||
|
||||
// If the plaintext length is equal to the desired
|
||||
// length, then we can continue. We use numEqual to
|
||||
// keep track of how many have the same length.
|
||||
if len(plainText) == maxLen {
|
||||
numEqual++
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// If we previously added padding to this hop, we keep
|
||||
// the length of that initial padding too.
|
||||
var existingPadding int
|
||||
hop.data.Padding.WhenSome(
|
||||
func(p tlv.RecordT[tlv.TlvType1, []byte]) {
|
||||
existingPadding = len(p.Val)
|
||||
},
|
||||
)
|
||||
|
||||
// Add some padding bytes to the hop.
|
||||
hop.data.PadBy(
|
||||
existingPadding + maxLen - len(plainText),
|
||||
)
|
||||
}
|
||||
|
||||
// If all the payloads have the same length, we can exit the
|
||||
// loop.
|
||||
if numEqual == len(hopInfo) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Finished padding %d blinded path payloads to %d bytes "+
|
||||
"each where the pre-padded min and max sizes were %d and %d "+
|
||||
"bytes respectively", len(hopInfo), stats.finalPaddedSize,
|
||||
stats.minPayloadSize, stats.maxPayloadSize)
|
||||
|
||||
return paymentPath, &stats, nil
|
||||
}
|
||||
|
@ -1,24 +1,15 @@
|
||||
package invoicesrpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
sphinx "github.com/lightningnetwork/lightning-onion"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/channeldb/models"
|
||||
"github.com/lightningnetwork/lnd/keychain"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/record"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/tlv"
|
||||
"github.com/lightningnetwork/lnd/zpay32"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -905,952 +896,3 @@ func TestPopulateHopHints(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestApplyBlindedPathPolicyBuffer tests blinded policy adjustments.
|
||||
func TestApplyBlindedPathPolicyBuffer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
policyIn *blindedHopPolicy
|
||||
expectedOut *blindedHopPolicy
|
||||
incMultiplier float64
|
||||
decMultiplier float64
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "invalid increase multiplier",
|
||||
incMultiplier: 0,
|
||||
expectedError: "blinded path policy increase " +
|
||||
"multiplier must be greater than or equal to 1",
|
||||
},
|
||||
{
|
||||
name: "decrease multiplier too small",
|
||||
incMultiplier: 1,
|
||||
decMultiplier: -1,
|
||||
expectedError: "blinded path policy decrease " +
|
||||
"multiplier must be in the range [0;1]",
|
||||
},
|
||||
{
|
||||
name: "decrease multiplier too big",
|
||||
incMultiplier: 1,
|
||||
decMultiplier: 2,
|
||||
expectedError: "blinded path policy decrease " +
|
||||
"multiplier must be in the range [0;1]",
|
||||
},
|
||||
{
|
||||
name: "no change",
|
||||
incMultiplier: 1,
|
||||
decMultiplier: 1,
|
||||
policyIn: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 1,
|
||||
minHTLCMsat: 2,
|
||||
maxHTLCMsat: 3,
|
||||
baseFee: 4,
|
||||
feeRate: 5,
|
||||
},
|
||||
expectedOut: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 1,
|
||||
minHTLCMsat: 2,
|
||||
maxHTLCMsat: 3,
|
||||
baseFee: 4,
|
||||
feeRate: 5,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "buffer up by 100% and down by and down " +
|
||||
"by 50%",
|
||||
incMultiplier: 2,
|
||||
decMultiplier: 0.5,
|
||||
policyIn: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 10,
|
||||
minHTLCMsat: 20,
|
||||
maxHTLCMsat: 300,
|
||||
baseFee: 40,
|
||||
feeRate: 50,
|
||||
},
|
||||
expectedOut: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 20,
|
||||
minHTLCMsat: 40,
|
||||
maxHTLCMsat: 150,
|
||||
baseFee: 80,
|
||||
feeRate: 100,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new HTLC minimum larger than OG " +
|
||||
"maximum",
|
||||
incMultiplier: 2,
|
||||
decMultiplier: 1,
|
||||
policyIn: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 10,
|
||||
minHTLCMsat: 20,
|
||||
maxHTLCMsat: 30,
|
||||
baseFee: 40,
|
||||
feeRate: 50,
|
||||
},
|
||||
expectedOut: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 20,
|
||||
minHTLCMsat: 20,
|
||||
maxHTLCMsat: 30,
|
||||
baseFee: 80,
|
||||
feeRate: 100,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new HTLC maximum smaller than OG " +
|
||||
"minimum",
|
||||
incMultiplier: 1,
|
||||
decMultiplier: 0.5,
|
||||
policyIn: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 10,
|
||||
minHTLCMsat: 20,
|
||||
maxHTLCMsat: 30,
|
||||
baseFee: 40,
|
||||
feeRate: 50,
|
||||
},
|
||||
expectedOut: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 10,
|
||||
minHTLCMsat: 20,
|
||||
maxHTLCMsat: 30,
|
||||
baseFee: 40,
|
||||
feeRate: 50,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new HTLC minimum and maximums are not " +
|
||||
"compatible",
|
||||
incMultiplier: 2,
|
||||
decMultiplier: 0.5,
|
||||
policyIn: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 10,
|
||||
minHTLCMsat: 30,
|
||||
maxHTLCMsat: 100,
|
||||
baseFee: 40,
|
||||
feeRate: 50,
|
||||
},
|
||||
expectedOut: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 20,
|
||||
minHTLCMsat: 30,
|
||||
maxHTLCMsat: 100,
|
||||
baseFee: 80,
|
||||
feeRate: 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
bufferedPolicy, err := addPolicyBuffer(
|
||||
test.policyIn, test.incMultiplier,
|
||||
test.decMultiplier,
|
||||
)
|
||||
if test.expectedError != "" {
|
||||
require.ErrorContains(
|
||||
t, err, test.expectedError,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, test.expectedOut, bufferedPolicy)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestBlindedPathAccumulatedPolicyCalc tests the logic for calculating the
|
||||
// accumulated routing policies of a blinded route against an example mentioned
|
||||
// in the spec document:
|
||||
// https://github.com/lightning/bolts/blob/master/proposals/route-blinding.md
|
||||
func TestBlindedPathAccumulatedPolicyCalc(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// In the spec example, the blinded route is:
|
||||
// Carol -> Bob -> Alice
|
||||
// And Alice chooses the following buffered policy for both the C->B
|
||||
// and B->A edges.
|
||||
nodePolicy := &record.PaymentRelayInfo{
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
CltvExpiryDelta: 144,
|
||||
}
|
||||
|
||||
hopPolicies := []*record.PaymentRelayInfo{
|
||||
nodePolicy,
|
||||
nodePolicy,
|
||||
}
|
||||
|
||||
// Alice's minimum final expiry delta is chosen to be 12.
|
||||
aliceMinFinalExpDelta := uint16(12)
|
||||
|
||||
totalBase, totalRate, totalCLTVDelta := calcBlindedPathPolicies(
|
||||
hopPolicies, aliceMinFinalExpDelta,
|
||||
)
|
||||
|
||||
require.Equal(t, lnwire.MilliSatoshi(201), totalBase)
|
||||
require.EqualValues(t, 1001, totalRate)
|
||||
require.EqualValues(t, 300, totalCLTVDelta)
|
||||
}
|
||||
|
||||
// TestPadBlindedHopInfo asserts that the padding of blinded hop data is done
|
||||
// correctly and that it takes the expected number of iterations.
|
||||
func TestPadBlindedHopInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
expectedIterations int
|
||||
expectedFinalSize int
|
||||
|
||||
// We will use the pathID field of BlindedRouteData to set an
|
||||
// initial payload size. The ints in this list represent the
|
||||
// size of each pathID.
|
||||
pathIDs []int
|
||||
|
||||
// existingPadding is a map from entry index (based on the
|
||||
// pathIDs set) to the number of pre-existing padding bytes to
|
||||
// add.
|
||||
existingPadding map[int]int
|
||||
|
||||
// prePad is true if all the hop payloads should be pre-padded
|
||||
// with a zero length TLV Padding field.
|
||||
prePad bool
|
||||
}{
|
||||
{
|
||||
// If there is only one entry, then no padding is
|
||||
// expected.
|
||||
name: "single entry",
|
||||
expectedIterations: 1,
|
||||
pathIDs: []int{10},
|
||||
|
||||
// The final size will be 12 since the path ID is 10
|
||||
// bytes, and it will be prefixed by type and value
|
||||
// bytes.
|
||||
expectedFinalSize: 12,
|
||||
},
|
||||
{
|
||||
// All the payloads are the same size from the get go
|
||||
// meaning that no padding is expected.
|
||||
name: "all start equal",
|
||||
expectedIterations: 1,
|
||||
pathIDs: []int{10, 10, 10},
|
||||
|
||||
// The final size will be 12 since the path ID is 10
|
||||
// bytes, and it will be prefixed by type and value
|
||||
// bytes.
|
||||
expectedFinalSize: 12,
|
||||
},
|
||||
{
|
||||
// If the blobs differ by 1 byte it will take 4
|
||||
// iterations:
|
||||
// 1) padding of 1 is added to entry 2 which will
|
||||
// increase its size by 3 bytes since padding does
|
||||
// not yet exist for it.
|
||||
// 2) Now entry 1 will be short 2 bytes. It will be
|
||||
// padded by 2 bytes but again since it is a new
|
||||
// padding field, 4 bytes are added.
|
||||
// 3) Finally, entry 2 is padded by 1 extra. Since it
|
||||
// already does have a padding field, this does end
|
||||
// up adding only 1 extra byte.
|
||||
// 4) The fourth iteration determines that all are now
|
||||
// the same size.
|
||||
name: "differ by 1 - no pre-padding",
|
||||
expectedIterations: 4,
|
||||
pathIDs: []int{4, 3},
|
||||
expectedFinalSize: 10,
|
||||
},
|
||||
{
|
||||
// By pre-padding the payloads with a zero byte padding,
|
||||
// we can reduce the number of iterations quite a bit.
|
||||
name: "differ by 1 - with pre-padding",
|
||||
expectedIterations: 2,
|
||||
pathIDs: []int{4, 3},
|
||||
expectedFinalSize: 8,
|
||||
prePad: true,
|
||||
},
|
||||
{
|
||||
name: "existing padding and diff of 1",
|
||||
expectedIterations: 2,
|
||||
pathIDs: []int{10, 11},
|
||||
|
||||
// By adding some existing padding, the type and length
|
||||
// field for the padding are already accounted for in
|
||||
// the first iteration, and so we only expect two
|
||||
// iterations to get the payloads to match size here:
|
||||
// one for adding a single extra byte to the smaller
|
||||
// payload and another for confirming the sizes match.
|
||||
existingPadding: map[int]int{0: 1, 1: 1},
|
||||
expectedFinalSize: 16,
|
||||
},
|
||||
{
|
||||
// In this test, we test a BigSize bucket shift. We do
|
||||
// this by setting the initial path ID's of both entries
|
||||
// to a 0 size which means the total encoding of those
|
||||
// will be 2 bytes (to encode the type and length). Then
|
||||
// for the initial padding, we let the first entry be
|
||||
// 253 bytes long which is just long enough to be in
|
||||
// the second BigSize bucket which uses 3 bytes to
|
||||
// encode the value length. We make the second entry
|
||||
// 252 bytes which still puts it in the first bucket
|
||||
// which uses 1 byte for the length. The difference in
|
||||
// overall packet size will be 3 bytes (the first entry
|
||||
// has 2 more length bytes and 1 more value byte). So
|
||||
// the function will try to pad the second entry by 3
|
||||
// bytes (iteration 1). This will however result in the
|
||||
// second entry shifting to the second BigSize bucket
|
||||
// meaning it will gain an additional 2 bytes for the
|
||||
// new length encoding meaning that overall it gains 5
|
||||
// bytes in size. This will result in another iteration
|
||||
// which will result in padding the first entry with an
|
||||
// extra 2 bytes to meet the second entry's new size
|
||||
// (iteration 2). One more iteration (3) is then done
|
||||
// to confirm that all entries are now the same size.
|
||||
name: "big size bucket shift",
|
||||
expectedIterations: 3,
|
||||
|
||||
// We make the path IDs large enough so that
|
||||
pathIDs: []int{0, 0},
|
||||
existingPadding: map[int]int{0: 253, 1: 252},
|
||||
expectedFinalSize: 261,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// If the test includes existing padding, then make sure
|
||||
// that the number of existing padding entries is equal
|
||||
// to the number of pathID entries.
|
||||
if test.existingPadding != nil {
|
||||
require.Len(t, test.existingPadding,
|
||||
len(test.pathIDs))
|
||||
}
|
||||
|
||||
hopDataSet := make([]*hopData, len(test.pathIDs))
|
||||
for i, l := range test.pathIDs {
|
||||
pathID := tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType6](
|
||||
make([]byte, l),
|
||||
),
|
||||
)
|
||||
data := &record.BlindedRouteData{
|
||||
PathID: pathID,
|
||||
}
|
||||
|
||||
if test.existingPadding != nil {
|
||||
//nolint:lll
|
||||
padding := tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType1](
|
||||
make([]byte, test.existingPadding[i]),
|
||||
),
|
||||
)
|
||||
|
||||
data.Padding = padding
|
||||
}
|
||||
|
||||
hopDataSet[i] = &hopData{data: data}
|
||||
}
|
||||
|
||||
hopInfo, stats, err := padHopInfo(
|
||||
hopDataSet, test.prePad,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.expectedIterations,
|
||||
stats.numIterations)
|
||||
require.Equal(t, test.expectedFinalSize,
|
||||
stats.finalPaddedSize)
|
||||
|
||||
// We expect all resulting blobs to be the same size.
|
||||
for _, info := range hopInfo {
|
||||
require.Len(
|
||||
t, info.PlainText,
|
||||
test.expectedFinalSize,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestPadBlindedHopInfoBlackBox tests the padHopInfo function via the
|
||||
// quick.Check testing function. It generates a random set of hopData and
|
||||
// asserts that the resulting padded set always has the same encoded length.
|
||||
func TestPadBlindedHopInfoBlackBox(t *testing.T) {
|
||||
fn := func(data hopDataList) bool {
|
||||
resultList, _, err := padHopInfo(data, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be a resulting sphinx.HopInfo struct for each
|
||||
// hopData passed to the padHopInfo function.
|
||||
if len(resultList) != len(data) {
|
||||
return false
|
||||
}
|
||||
|
||||
// There is nothing left to check if input set was empty to
|
||||
// start with.
|
||||
if len(data) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Now, assert that the encoded size of each item is the same.
|
||||
// Get the size of the first item as a base point.
|
||||
payloadSize := len(resultList[0].PlainText)
|
||||
|
||||
// All the other entries should have the same encoded size.
|
||||
for i := 1; i < len(resultList); i++ {
|
||||
if len(resultList[i].PlainText) != payloadSize {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
require.NoError(t, quick.Check(fn, nil))
|
||||
}
|
||||
|
||||
type hopDataList []*hopData
|
||||
|
||||
// Generate returns a random instance of the hopDataList type.
|
||||
//
|
||||
// NOTE: this is part of the quick.Generate interface.
|
||||
func (h hopDataList) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
data := make(hopDataList, rand.Intn(size))
|
||||
for i := 0; i < len(data); i++ {
|
||||
data[i] = &hopData{
|
||||
data: genBlindedRouteData(rand),
|
||||
nodeID: pubkey,
|
||||
}
|
||||
}
|
||||
|
||||
return reflect.ValueOf(data)
|
||||
}
|
||||
|
||||
// A compile-time check to ensure that hopDataList implements the
|
||||
// quick.Generator interface.
|
||||
var _ quick.Generator = (*hopDataList)(nil)
|
||||
|
||||
// sometimesDo calls the given function with a 50% probability.
|
||||
func sometimesDo(fn func(), rand *rand.Rand) {
|
||||
if rand.Intn(1) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
fn()
|
||||
}
|
||||
|
||||
// genBlindedRouteData generates a random record.BlindedRouteData object.
|
||||
func genBlindedRouteData(rand *rand.Rand) *record.BlindedRouteData {
|
||||
var data record.BlindedRouteData
|
||||
|
||||
sometimesDo(func() {
|
||||
data.Padding = tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType1](
|
||||
make([]byte, rand.Intn(1000000)),
|
||||
),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.ShortChannelID = tlv.SomeRecordT(
|
||||
tlv.NewRecordT[tlv.TlvType2](lnwire.ShortChannelID{
|
||||
BlockHeight: rand.Uint32(),
|
||||
TxIndex: rand.Uint32(),
|
||||
TxPosition: uint16(rand.Uint32()),
|
||||
}),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.NextNodeID = tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType4](pubkey),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.PathID = tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType6](
|
||||
make([]byte, rand.Intn(100)),
|
||||
),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.NextBlindingOverride = tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType8](pubkey),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.RelayInfo = tlv.SomeRecordT(
|
||||
tlv.NewRecordT[tlv.TlvType10](record.PaymentRelayInfo{
|
||||
CltvExpiryDelta: uint16(rand.Uint32()),
|
||||
FeeRate: rand.Uint32(),
|
||||
BaseFee: lnwire.MilliSatoshi(
|
||||
rand.Uint32(),
|
||||
),
|
||||
}),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.Constraints = tlv.SomeRecordT(
|
||||
tlv.NewRecordT[tlv.TlvType12](record.PaymentConstraints{
|
||||
MaxCltvExpiry: rand.Uint32(),
|
||||
HtlcMinimumMsat: lnwire.MilliSatoshi(
|
||||
rand.Uint32(),
|
||||
),
|
||||
}),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
return &data
|
||||
}
|
||||
|
||||
// TestBuildBlindedPath tests the logic for constructing a blinded path against
|
||||
// an example mentioned in this spec document:
|
||||
// https://github.com/lightning/bolts/blob/master/proposals/route-blinding.md
|
||||
// This example does not use any dummy hops.
|
||||
func TestBuildBlindedPath(t *testing.T) {
|
||||
// Alice chooses the following path to herself for blinded path
|
||||
// construction:
|
||||
// Carol -> Bob -> Alice.
|
||||
// Let's construct the corresponding route.Route for this which will be
|
||||
// returned from the `findRoutes` config callback.
|
||||
var (
|
||||
privC, pkC = btcec.PrivKeyFromBytes([]byte{1})
|
||||
privB, pkB = btcec.PrivKeyFromBytes([]byte{2})
|
||||
privA, pkA = btcec.PrivKeyFromBytes([]byte{3})
|
||||
|
||||
carol = route.NewVertex(pkC)
|
||||
bob = route.NewVertex(pkB)
|
||||
alice = route.NewVertex(pkA)
|
||||
|
||||
chanCB = uint64(1)
|
||||
chanBA = uint64(2)
|
||||
)
|
||||
|
||||
realRoute := &route.Route{
|
||||
SourcePubKey: carol,
|
||||
Hops: []*route.Hop{
|
||||
{
|
||||
PubKeyBytes: bob,
|
||||
ChannelID: chanCB,
|
||||
},
|
||||
{
|
||||
PubKeyBytes: alice,
|
||||
ChannelID: chanBA,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
realPolicies := map[uint64]*models.ChannelEdgePolicy{
|
||||
chanCB: {
|
||||
ChannelID: chanCB,
|
||||
ToNode: bob,
|
||||
},
|
||||
chanBA: {
|
||||
ChannelID: chanBA,
|
||||
ToNode: alice,
|
||||
},
|
||||
}
|
||||
|
||||
paths, err := buildBlindedPaymentPaths(&buildBlindedPathCfg{
|
||||
findRoutes: func(_ lnwire.MilliSatoshi) ([]*route.Route,
|
||||
error) {
|
||||
|
||||
return []*route.Route{realRoute}, nil
|
||||
},
|
||||
fetchChannelEdgesByID: func(chanID uint64) (
|
||||
*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
|
||||
*models.ChannelEdgePolicy, error) {
|
||||
|
||||
return nil, realPolicies[chanID], nil, nil
|
||||
},
|
||||
bestHeight: func() (uint32, error) {
|
||||
return 1000, nil
|
||||
},
|
||||
// In the spec example, all the policies get replaced with
|
||||
// the same static values.
|
||||
addPolicyBuffer: func(_ *blindedHopPolicy) (
|
||||
*blindedHopPolicy, error) {
|
||||
|
||||
return &blindedHopPolicy{
|
||||
feeRate: 500,
|
||||
baseFee: 100,
|
||||
cltvExpiryDelta: 144,
|
||||
minHTLCMsat: 1000,
|
||||
maxHTLCMsat: lnwire.MaxMilliSatoshi,
|
||||
}, nil
|
||||
},
|
||||
pathID: []byte{1, 2, 3},
|
||||
valueMsat: 1000,
|
||||
minFinalCLTVExpiryDelta: 12,
|
||||
blocksUntilExpiry: 200,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, paths, 1)
|
||||
|
||||
path := paths[0]
|
||||
|
||||
// Check that all the accumulated policy values are correct.
|
||||
require.EqualValues(t, 201, path.FeeBaseMsat)
|
||||
require.EqualValues(t, 1001, path.FeeRate)
|
||||
require.EqualValues(t, 300, path.CltvExpiryDelta)
|
||||
require.EqualValues(t, 1000, path.HTLCMinMsat)
|
||||
require.EqualValues(t, lnwire.MaxMilliSatoshi, path.HTLCMaxMsat)
|
||||
|
||||
// Now we check the hops.
|
||||
require.Len(t, path.Hops, 3)
|
||||
|
||||
// Assert that all the encrypted recipient blobs have been padded such
|
||||
// that they are all the same size.
|
||||
require.Len(t, path.Hops[0].CipherText, len(path.Hops[1].CipherText))
|
||||
require.Len(t, path.Hops[1].CipherText, len(path.Hops[2].CipherText))
|
||||
|
||||
// The first hop, should have the real pub key of the introduction
|
||||
// node: Carol.
|
||||
hop := path.Hops[0]
|
||||
require.True(t, hop.BlindedNodePub.IsEqual(pkC))
|
||||
|
||||
// As Carol, let's decode the hop data and assert that all expected
|
||||
// values have been included.
|
||||
var (
|
||||
blindingPoint = path.FirstEphemeralBlindingPoint
|
||||
data *record.BlindedRouteData
|
||||
)
|
||||
|
||||
// Check that Carol's info is correct.
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privC, blindingPoint, hop.CipherText,
|
||||
)
|
||||
|
||||
require.Equal(
|
||||
t, lnwire.NewShortChanIDFromInt(chanCB),
|
||||
data.ShortChannelID.UnwrapOrFail(t).Val,
|
||||
)
|
||||
|
||||
require.Equal(t, record.PaymentRelayInfo{
|
||||
CltvExpiryDelta: 144,
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
}, data.RelayInfo.UnwrapOrFail(t).Val)
|
||||
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1500,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
|
||||
// Check that all Bob's info is correct.
|
||||
hop = path.Hops[1]
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privB, blindingPoint, hop.CipherText,
|
||||
)
|
||||
|
||||
require.Equal(
|
||||
t, lnwire.NewShortChanIDFromInt(chanBA),
|
||||
data.ShortChannelID.UnwrapOrFail(t).Val,
|
||||
)
|
||||
|
||||
require.Equal(t, record.PaymentRelayInfo{
|
||||
CltvExpiryDelta: 144,
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
}, data.RelayInfo.UnwrapOrFail(t).Val)
|
||||
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1356,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
|
||||
// Check that all Alice's info is correct.
|
||||
hop = path.Hops[2]
|
||||
data, _ = decryptAndDecodeHopData(
|
||||
t, privA, blindingPoint, hop.CipherText,
|
||||
)
|
||||
require.True(t, data.ShortChannelID.IsNone())
|
||||
require.True(t, data.RelayInfo.IsNone())
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1212,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
require.Equal(t, []byte{1, 2, 3}, data.PathID.UnwrapOrFail(t).Val)
|
||||
}
|
||||
|
||||
// TestBuildBlindedPathWithDummyHops tests the construction of a blinded path
|
||||
// which includes dummy hops.
|
||||
func TestBuildBlindedPathWithDummyHops(t *testing.T) {
|
||||
// Alice chooses the following path to herself for blinded path
|
||||
// construction:
|
||||
// Carol -> Bob -> Alice.
|
||||
// Let's construct the corresponding route.Route for this which will be
|
||||
// returned from the `findRoutes` config callback.
|
||||
var (
|
||||
privC, pkC = btcec.PrivKeyFromBytes([]byte{1})
|
||||
privB, pkB = btcec.PrivKeyFromBytes([]byte{2})
|
||||
privA, pkA = btcec.PrivKeyFromBytes([]byte{3})
|
||||
|
||||
carol = route.NewVertex(pkC)
|
||||
bob = route.NewVertex(pkB)
|
||||
alice = route.NewVertex(pkA)
|
||||
|
||||
chanCB = uint64(1)
|
||||
chanBA = uint64(2)
|
||||
)
|
||||
|
||||
realRoute := &route.Route{
|
||||
SourcePubKey: carol,
|
||||
Hops: []*route.Hop{
|
||||
{
|
||||
PubKeyBytes: bob,
|
||||
ChannelID: chanCB,
|
||||
},
|
||||
{
|
||||
PubKeyBytes: alice,
|
||||
ChannelID: chanBA,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
realPolicies := map[uint64]*models.ChannelEdgePolicy{
|
||||
chanCB: {
|
||||
ChannelID: chanCB,
|
||||
ToNode: bob,
|
||||
},
|
||||
chanBA: {
|
||||
ChannelID: chanBA,
|
||||
ToNode: alice,
|
||||
},
|
||||
}
|
||||
|
||||
paths, err := buildBlindedPaymentPaths(&buildBlindedPathCfg{
|
||||
findRoutes: func(_ lnwire.MilliSatoshi) ([]*route.Route,
|
||||
error) {
|
||||
|
||||
return []*route.Route{realRoute}, nil
|
||||
},
|
||||
fetchChannelEdgesByID: func(chanID uint64) (
|
||||
*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
|
||||
*models.ChannelEdgePolicy, error) {
|
||||
|
||||
policy, ok := realPolicies[chanID]
|
||||
if !ok {
|
||||
return nil, nil, nil,
|
||||
fmt.Errorf("edge not found")
|
||||
}
|
||||
|
||||
return nil, policy, nil, nil
|
||||
},
|
||||
bestHeight: func() (uint32, error) {
|
||||
return 1000, nil
|
||||
},
|
||||
// In the spec example, all the policies get replaced with
|
||||
// the same static values.
|
||||
addPolicyBuffer: func(_ *blindedHopPolicy) (
|
||||
*blindedHopPolicy, error) {
|
||||
|
||||
return &blindedHopPolicy{
|
||||
feeRate: 500,
|
||||
baseFee: 100,
|
||||
cltvExpiryDelta: 144,
|
||||
minHTLCMsat: 1000,
|
||||
maxHTLCMsat: lnwire.MaxMilliSatoshi,
|
||||
}, nil
|
||||
},
|
||||
pathID: []byte{1, 2, 3},
|
||||
valueMsat: 1000,
|
||||
minFinalCLTVExpiryDelta: 12,
|
||||
blocksUntilExpiry: 200,
|
||||
|
||||
// By setting the minimum number of hops to 4, we force 2 dummy
|
||||
// hops to be added to the real route.
|
||||
minNumHops: 4,
|
||||
|
||||
dummyHopPolicy: &blindedHopPolicy{
|
||||
cltvExpiryDelta: 50,
|
||||
feeRate: 100,
|
||||
baseFee: 100,
|
||||
minHTLCMsat: 1000,
|
||||
maxHTLCMsat: lnwire.MaxMilliSatoshi,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, paths, 1)
|
||||
|
||||
path := paths[0]
|
||||
|
||||
// Check that all the accumulated policy values are correct.
|
||||
require.EqualValues(t, 403, path.FeeBaseMsat)
|
||||
require.EqualValues(t, 1203, path.FeeRate)
|
||||
require.EqualValues(t, 400, path.CltvExpiryDelta)
|
||||
require.EqualValues(t, 1000, path.HTLCMinMsat)
|
||||
require.EqualValues(t, lnwire.MaxMilliSatoshi, path.HTLCMaxMsat)
|
||||
|
||||
// Now we check the hops.
|
||||
require.Len(t, path.Hops, 5)
|
||||
|
||||
// Assert that all the encrypted recipient blobs have been padded such
|
||||
// that they are all the same size.
|
||||
require.Len(t, path.Hops[0].CipherText, len(path.Hops[1].CipherText))
|
||||
require.Len(t, path.Hops[1].CipherText, len(path.Hops[2].CipherText))
|
||||
require.Len(t, path.Hops[2].CipherText, len(path.Hops[3].CipherText))
|
||||
require.Len(t, path.Hops[3].CipherText, len(path.Hops[4].CipherText))
|
||||
|
||||
// The first hop, should have the real pub key of the introduction
|
||||
// node: Carol.
|
||||
hop := path.Hops[0]
|
||||
require.True(t, hop.BlindedNodePub.IsEqual(pkC))
|
||||
|
||||
// As Carol, let's decode the hop data and assert that all expected
|
||||
// values have been included.
|
||||
var (
|
||||
blindingPoint = path.FirstEphemeralBlindingPoint
|
||||
data *record.BlindedRouteData
|
||||
)
|
||||
|
||||
// Check that Carol's info is correct.
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privC, blindingPoint, hop.CipherText,
|
||||
)
|
||||
|
||||
require.Equal(
|
||||
t, lnwire.NewShortChanIDFromInt(chanCB),
|
||||
data.ShortChannelID.UnwrapOrFail(t).Val,
|
||||
)
|
||||
|
||||
require.Equal(t, record.PaymentRelayInfo{
|
||||
CltvExpiryDelta: 144,
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
}, data.RelayInfo.UnwrapOrFail(t).Val)
|
||||
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1600,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
|
||||
// Check that all Bob's info is correct.
|
||||
hop = path.Hops[1]
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privB, blindingPoint, hop.CipherText,
|
||||
)
|
||||
|
||||
require.Equal(
|
||||
t, lnwire.NewShortChanIDFromInt(chanBA),
|
||||
data.ShortChannelID.UnwrapOrFail(t).Val,
|
||||
)
|
||||
|
||||
require.Equal(t, record.PaymentRelayInfo{
|
||||
CltvExpiryDelta: 144,
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
}, data.RelayInfo.UnwrapOrFail(t).Val)
|
||||
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1456,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
|
||||
// Check that all Alice's info is correct. The payload should contain
|
||||
// a next_node_id field that is equal to Alice's public key. This
|
||||
// indicates to Alice that she should continue peeling the onion.
|
||||
hop = path.Hops[2]
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privA, blindingPoint, hop.CipherText,
|
||||
)
|
||||
require.True(t, data.ShortChannelID.IsNone())
|
||||
require.True(t, data.RelayInfo.IsSome())
|
||||
require.True(t, data.Constraints.IsSome())
|
||||
require.Equal(t, pkA, data.NextNodeID.UnwrapOrFail(t).Val)
|
||||
|
||||
// Alice should be able to decrypt the next payload with her private
|
||||
// key. This next payload is yet another dummy hop.
|
||||
hop = path.Hops[3]
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privA, blindingPoint, hop.CipherText,
|
||||
)
|
||||
require.True(t, data.ShortChannelID.IsNone())
|
||||
require.True(t, data.RelayInfo.IsSome())
|
||||
require.True(t, data.Constraints.IsSome())
|
||||
require.Equal(t, pkA, data.NextNodeID.UnwrapOrFail(t).Val)
|
||||
|
||||
// Unwrapping one more time should reveal the final hop info for Alice.
|
||||
hop = path.Hops[4]
|
||||
data, _ = decryptAndDecodeHopData(
|
||||
t, privA, blindingPoint, hop.CipherText,
|
||||
)
|
||||
require.True(t, data.ShortChannelID.IsNone())
|
||||
require.True(t, data.RelayInfo.IsNone())
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1212,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
require.Equal(t, []byte{1, 2, 3}, data.PathID.UnwrapOrFail(t).Val)
|
||||
}
|
||||
|
||||
// TestSingleHopBlindedPath tests that blinded path construction is done
|
||||
// correctly for the case where the destination node is also the introduction
|
||||
// node.
|
||||
func TestSingleHopBlindedPath(t *testing.T) {
|
||||
var (
|
||||
_, pkC = btcec.PrivKeyFromBytes([]byte{1})
|
||||
carol = route.NewVertex(pkC)
|
||||
)
|
||||
|
||||
realRoute := &route.Route{
|
||||
SourcePubKey: carol,
|
||||
// No hops since Carol is both the introduction node and the
|
||||
// final destination node.
|
||||
Hops: []*route.Hop{},
|
||||
}
|
||||
|
||||
paths, err := buildBlindedPaymentPaths(&buildBlindedPathCfg{
|
||||
findRoutes: func(_ lnwire.MilliSatoshi) ([]*route.Route,
|
||||
error) {
|
||||
|
||||
return []*route.Route{realRoute}, nil
|
||||
},
|
||||
bestHeight: func() (uint32, error) {
|
||||
return 1000, nil
|
||||
},
|
||||
pathID: []byte{1, 2, 3},
|
||||
valueMsat: 1000,
|
||||
minFinalCLTVExpiryDelta: 12,
|
||||
blocksUntilExpiry: 200,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, paths, 1)
|
||||
|
||||
path := paths[0]
|
||||
|
||||
// Check that all the accumulated policy values are correct. Since this
|
||||
// is a unique case where the destination node is also the introduction
|
||||
// node, the accumulated fee and HTLC values should be zero and the
|
||||
// CLTV expiry delta should be equal to Carol's minFinalCLTVExpiryDelta.
|
||||
require.EqualValues(t, 0, path.FeeBaseMsat)
|
||||
require.EqualValues(t, 0, path.FeeRate)
|
||||
require.EqualValues(t, 0, path.HTLCMinMsat)
|
||||
require.EqualValues(t, 0, path.HTLCMaxMsat)
|
||||
require.EqualValues(t, 12, path.CltvExpiryDelta)
|
||||
}
|
||||
|
||||
func decryptAndDecodeHopData(t *testing.T, priv *btcec.PrivateKey,
|
||||
ephem *btcec.PublicKey, cipherText []byte) (*record.BlindedRouteData,
|
||||
*btcec.PublicKey) {
|
||||
|
||||
router := sphinx.NewRouter(
|
||||
&keychain.PrivKeyECDH{PrivKey: priv}, nil,
|
||||
)
|
||||
|
||||
decrypted, err := router.DecryptBlindedHopData(ephem, cipherText)
|
||||
require.NoError(t, err)
|
||||
|
||||
buf := bytes.NewBuffer(decrypted)
|
||||
routeData, err := record.DecodeBlindedRouteData(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
nextEphem, err := router.NextEphemeral(ephem)
|
||||
require.NoError(t, err)
|
||||
|
||||
return routeData, nextEphem
|
||||
}
|
||||
|
842
routing/blindedpath/blinded_path.go
Normal file
842
routing/blindedpath/blinded_path.go
Normal file
@ -0,0 +1,842 @@
|
||||
package blindedpath
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
sphinx "github.com/lightningnetwork/lightning-onion"
|
||||
"github.com/lightningnetwork/lnd/channeldb/models"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/record"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/tlv"
|
||||
"github.com/lightningnetwork/lnd/zpay32"
|
||||
)
|
||||
|
||||
const (
|
||||
// oneMillion is a constant used frequently in fee rate calculations.
|
||||
oneMillion = uint32(1_000_000)
|
||||
)
|
||||
|
||||
// errInvalidBlindedPath indicates that the chosen real path is not usable as
|
||||
// a blinded path.
|
||||
var errInvalidBlindedPath = errors.New("the chosen path results in an " +
|
||||
"unusable blinded path")
|
||||
|
||||
// BuildBlindedPathCfg defines the various resources and configuration values
|
||||
// required to build a blinded payment path to this node.
|
||||
type BuildBlindedPathCfg struct {
|
||||
// FindRoutes returns a set of routes to us that can be used for the
|
||||
// construction of blinded paths. These routes will consist of real
|
||||
// nodes advertising the route blinding feature bit. They may be of
|
||||
// various lengths and may even contain only a single hop. Any route
|
||||
// shorter than MinNumHops will be padded with dummy hops during route
|
||||
// construction.
|
||||
FindRoutes func(value lnwire.MilliSatoshi) ([]*route.Route, error)
|
||||
|
||||
// FetchChannelEdgesByID attempts to look up the two directed edges for
|
||||
// the channel identified by the channel ID.
|
||||
FetchChannelEdgesByID func(chanID uint64) (*models.ChannelEdgeInfo,
|
||||
*models.ChannelEdgePolicy, *models.ChannelEdgePolicy, error)
|
||||
|
||||
// BestHeight can be used to fetch the best block height that this node
|
||||
// is aware of.
|
||||
BestHeight func() (uint32, error)
|
||||
|
||||
// AddPolicyBuffer is a function that can be used to alter the policy
|
||||
// values of the given channel edge. The main reason for doing this is
|
||||
// to add a safety buffer so that if the node makes small policy changes
|
||||
// during the lifetime of the blinded path, then the path remains valid
|
||||
// and so probing is more difficult. Note that this will only be called
|
||||
// for the policies of real nodes and won't be applied to
|
||||
// DummyHopPolicy.
|
||||
AddPolicyBuffer func(policy *BlindedHopPolicy) (*BlindedHopPolicy,
|
||||
error)
|
||||
|
||||
// PathID is the secret data to embed in the blinded path data that we
|
||||
// will receive back as the recipient. This is the equivalent of the
|
||||
// payment address used in normal payments. It lets the recipient check
|
||||
// that the path is being used in the correct context.
|
||||
PathID []byte
|
||||
|
||||
// ValueMsat is the payment amount in milli-satoshis that must be
|
||||
// routed. This will be used for selecting appropriate routes to use for
|
||||
// the blinded path.
|
||||
ValueMsat lnwire.MilliSatoshi
|
||||
|
||||
// MinFinalCLTVExpiryDelta is the minimum CLTV delta that the recipient
|
||||
// requires for the final hop of the payment.
|
||||
//
|
||||
// NOTE that the caller is responsible for adding additional block
|
||||
// padding to this value to account for blocks being mined while the
|
||||
// payment is in-flight.
|
||||
MinFinalCLTVExpiryDelta uint32
|
||||
|
||||
// BlocksUntilExpiry is the number of blocks that this blinded path
|
||||
// should remain valid for.
|
||||
BlocksUntilExpiry uint32
|
||||
|
||||
// MinNumHops is the minimum number of hops that each blinded path
|
||||
// should be. If the number of hops in a path returned by FindRoutes is
|
||||
// less than this number, then dummy hops will be post-fixed to the
|
||||
// route.
|
||||
MinNumHops uint8
|
||||
|
||||
// DummyHopPolicy holds the policy values that should be used for dummy
|
||||
// hops. Note that these will _not_ be buffered via AddPolicyBuffer.
|
||||
DummyHopPolicy *BlindedHopPolicy
|
||||
}
|
||||
|
||||
// BuildBlindedPaymentPaths uses the passed config to construct a set of blinded
|
||||
// payment paths that can be added to the invoice.
|
||||
func BuildBlindedPaymentPaths(cfg *BuildBlindedPathCfg) (
|
||||
[]*zpay32.BlindedPaymentPath, error) {
|
||||
|
||||
if cfg.MinFinalCLTVExpiryDelta >= cfg.BlocksUntilExpiry {
|
||||
return nil, fmt.Errorf("blinded path CLTV expiry delta (%d) "+
|
||||
"must be greater than the minimum final CLTV expiry "+
|
||||
"delta (%d)", cfg.BlocksUntilExpiry,
|
||||
cfg.MinFinalCLTVExpiryDelta)
|
||||
}
|
||||
|
||||
// Find some appropriate routes for the value to be routed. This will
|
||||
// return a set of routes made up of real nodes.
|
||||
routes, err := cfg.FindRoutes(cfg.ValueMsat)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(routes) == 0 {
|
||||
return nil, fmt.Errorf("could not find any routes to self to " +
|
||||
"use for blinded route construction")
|
||||
}
|
||||
|
||||
// Not every route returned will necessarily result in a usable blinded
|
||||
// path and so the number of paths returned might be less than the
|
||||
// number of real routes returned by FindRoutes above.
|
||||
paths := make([]*zpay32.BlindedPaymentPath, 0, len(routes))
|
||||
|
||||
// For each route returned, we will construct the associated blinded
|
||||
// payment path.
|
||||
for _, route := range routes {
|
||||
path, err := buildBlindedPaymentPath(
|
||||
cfg, extractCandidatePath(route),
|
||||
)
|
||||
if errors.Is(err, errInvalidBlindedPath) {
|
||||
log.Debugf("Not using route (%s) as a blinded path "+
|
||||
"since it resulted in an invalid blinded path",
|
||||
route)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
paths = append(paths, path)
|
||||
}
|
||||
|
||||
if len(paths) == 0 {
|
||||
return nil, fmt.Errorf("could not build any blinded paths")
|
||||
}
|
||||
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
// buildBlindedPaymentPath takes a route from an introduction node to this node
|
||||
// and uses the given config to convert it into a blinded payment path.
|
||||
func buildBlindedPaymentPath(cfg *BuildBlindedPathCfg, path *candidatePath) (
|
||||
*zpay32.BlindedPaymentPath, error) {
|
||||
|
||||
// Pad the given route with dummy hops until the minimum number of hops
|
||||
// is met.
|
||||
err := path.padWithDummyHops(cfg.MinNumHops)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hops, minHTLC, maxHTLC, err := collectRelayInfo(cfg, path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not collect blinded path relay "+
|
||||
"info: %w", err)
|
||||
}
|
||||
|
||||
relayInfo := make([]*record.PaymentRelayInfo, len(hops))
|
||||
for i, hop := range hops {
|
||||
relayInfo[i] = hop.relayInfo
|
||||
}
|
||||
|
||||
// Using the collected relay info, we can calculate the aggregated
|
||||
// policy values for the route.
|
||||
baseFee, feeRate, cltvDelta := calcBlindedPathPolicies(
|
||||
relayInfo, uint16(cfg.MinFinalCLTVExpiryDelta),
|
||||
)
|
||||
|
||||
currentHeight, err := cfg.BestHeight()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The next step is to calculate the payment constraints to communicate
|
||||
// to each hop and to package up the hop info for each hop. We will
|
||||
// handle the final hop first since its payload looks a bit different,
|
||||
// and then we will iterate backwards through the remaining hops.
|
||||
//
|
||||
// Note that the +1 here is required because the route won't have the
|
||||
// introduction node included in the "Hops". But since we want to create
|
||||
// payloads for all the hops as well as the introduction node, we add 1
|
||||
// here to get the full hop length along with the introduction node.
|
||||
hopDataSet := make([]*hopData, 0, len(path.hops)+1)
|
||||
|
||||
// Determine the maximum CLTV expiry for the destination node.
|
||||
cltvExpiry := currentHeight + cfg.BlocksUntilExpiry +
|
||||
cfg.MinFinalCLTVExpiryDelta
|
||||
|
||||
constraints := &record.PaymentConstraints{
|
||||
MaxCltvExpiry: cltvExpiry,
|
||||
HtlcMinimumMsat: minHTLC,
|
||||
}
|
||||
|
||||
// If the blinded route has only a source node (introduction node) and
|
||||
// no hops, then the destination node is also the source node.
|
||||
finalHopPubKey := path.introNode
|
||||
if len(path.hops) > 0 {
|
||||
finalHopPubKey = path.hops[len(path.hops)-1].pubKey
|
||||
}
|
||||
|
||||
// For the final hop, we only send it the path ID and payment
|
||||
// constraints.
|
||||
info, err := buildFinalHopRouteData(
|
||||
finalHopPubKey, cfg.PathID, constraints,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hopDataSet = append(hopDataSet, info)
|
||||
|
||||
// Iterate through the remaining (non-final) hops, back to front.
|
||||
for i := len(hops) - 1; i >= 0; i-- {
|
||||
hop := hops[i]
|
||||
|
||||
cltvExpiry += uint32(hop.relayInfo.CltvExpiryDelta)
|
||||
|
||||
constraints = &record.PaymentConstraints{
|
||||
MaxCltvExpiry: cltvExpiry,
|
||||
HtlcMinimumMsat: minHTLC,
|
||||
}
|
||||
|
||||
var info *hopData
|
||||
if hop.nextHopIsDummy {
|
||||
info, err = buildDummyRouteData(
|
||||
hop.hopPubKey, hop.relayInfo, constraints,
|
||||
)
|
||||
} else {
|
||||
info, err = buildHopRouteData(
|
||||
hop.hopPubKey, hop.nextSCID, hop.relayInfo,
|
||||
constraints,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hopDataSet = append(hopDataSet, info)
|
||||
}
|
||||
|
||||
// Sort the hop info list in reverse order so that the data for the
|
||||
// introduction node is first.
|
||||
sort.Slice(hopDataSet, func(i, j int) bool {
|
||||
return j < i
|
||||
})
|
||||
|
||||
// Add padding to each route data instance until the encrypted data
|
||||
// blobs are all the same size.
|
||||
paymentPath, _, err := padHopInfo(hopDataSet, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Derive an ephemeral session key.
|
||||
sessionKey, err := btcec.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Encrypt the hop info.
|
||||
blindedPath, err := sphinx.BuildBlindedPath(sessionKey, paymentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(blindedPath.BlindedHops) < 1 {
|
||||
return nil, fmt.Errorf("blinded path must have at least one " +
|
||||
"hop")
|
||||
}
|
||||
|
||||
// Overwrite the introduction point's blinded pub key with the real
|
||||
// pub key since then we can use this more compact format in the
|
||||
// invoice without needing to encode the un-used blinded node pub key of
|
||||
// the intro node.
|
||||
blindedPath.BlindedHops[0].BlindedNodePub =
|
||||
blindedPath.IntroductionPoint
|
||||
|
||||
// Now construct a z32 blinded path.
|
||||
return &zpay32.BlindedPaymentPath{
|
||||
FeeBaseMsat: uint32(baseFee),
|
||||
FeeRate: feeRate,
|
||||
CltvExpiryDelta: cltvDelta,
|
||||
HTLCMinMsat: uint64(minHTLC),
|
||||
HTLCMaxMsat: uint64(maxHTLC),
|
||||
Features: lnwire.EmptyFeatureVector(),
|
||||
FirstEphemeralBlindingPoint: blindedPath.BlindingPoint,
|
||||
Hops: blindedPath.BlindedHops,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// hopRelayInfo packages together the relay info to send to hop on a blinded
|
||||
// path along with the pub key of that hop and the SCID that the hop should
|
||||
// forward the payment on to.
|
||||
type hopRelayInfo struct {
|
||||
hopPubKey route.Vertex
|
||||
nextSCID lnwire.ShortChannelID
|
||||
relayInfo *record.PaymentRelayInfo
|
||||
nextHopIsDummy bool
|
||||
}
|
||||
|
||||
// collectRelayInfo collects the relay policy rules for each relay hop on the
|
||||
// route and applies any policy buffers.
|
||||
//
|
||||
// For the blinded route:
|
||||
//
|
||||
// C --chan(CB)--> B --chan(BA)--> A
|
||||
//
|
||||
// where C is the introduction node, the route.Route struct we are given will
|
||||
// have SourcePubKey set to C's pub key, and then it will have the following
|
||||
// route.Hops:
|
||||
//
|
||||
// - PubKeyBytes: B, ChannelID: chan(CB)
|
||||
// - PubKeyBytes: A, ChannelID: chan(BA)
|
||||
//
|
||||
// We, however, want to collect the channel policies for the following PubKey
|
||||
// and ChannelID pairs:
|
||||
//
|
||||
// - PubKey: C, ChannelID: chan(CB)
|
||||
// - PubKey: B, ChannelID: chan(BA)
|
||||
//
|
||||
// Therefore, when we go through the route and its hops to collect policies, our
|
||||
// index for collecting public keys will be trailing that of the channel IDs by
|
||||
// 1.
|
||||
func collectRelayInfo(cfg *BuildBlindedPathCfg, path *candidatePath) (
|
||||
[]*hopRelayInfo, lnwire.MilliSatoshi, lnwire.MilliSatoshi, error) {
|
||||
|
||||
var (
|
||||
hops = make([]*hopRelayInfo, 0, len(path.hops))
|
||||
minHTLC lnwire.MilliSatoshi
|
||||
maxHTLC lnwire.MilliSatoshi
|
||||
)
|
||||
|
||||
var (
|
||||
// The first pub key is that of the introduction node.
|
||||
hopSource = path.introNode
|
||||
)
|
||||
for _, hop := range path.hops {
|
||||
var (
|
||||
// For dummy hops, we use pre-configured policy values.
|
||||
policy = cfg.DummyHopPolicy
|
||||
err error
|
||||
)
|
||||
if !hop.isDummy {
|
||||
// For real hops, retrieve the channel policy for this
|
||||
// hop's channel ID in the direction pointing away from
|
||||
// the hopSource node.
|
||||
policy, err = getNodeChannelPolicy(
|
||||
cfg, hop.channelID, hopSource,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
|
||||
// Apply any policy changes now before caching the
|
||||
// policy.
|
||||
policy, err = cfg.AddPolicyBuffer(policy)
|
||||
if err != nil {
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// If this is the first policy we are collecting, then use this
|
||||
// policy to set the base values for min/max htlc.
|
||||
if len(hops) == 0 {
|
||||
minHTLC = policy.MinHTLCMsat
|
||||
maxHTLC = policy.MaxHTLCMsat
|
||||
} else {
|
||||
if policy.MinHTLCMsat > minHTLC {
|
||||
minHTLC = policy.MinHTLCMsat
|
||||
}
|
||||
|
||||
if policy.MaxHTLCMsat < maxHTLC {
|
||||
maxHTLC = policy.MaxHTLCMsat
|
||||
}
|
||||
}
|
||||
|
||||
// From the policy values for this hop, we can collect the
|
||||
// payment relay info that we will send to this hop.
|
||||
hops = append(hops, &hopRelayInfo{
|
||||
hopPubKey: hopSource,
|
||||
nextSCID: lnwire.NewShortChanIDFromInt(hop.channelID),
|
||||
relayInfo: &record.PaymentRelayInfo{
|
||||
FeeRate: policy.FeeRate,
|
||||
BaseFee: policy.BaseFee,
|
||||
CltvExpiryDelta: policy.CLTVExpiryDelta,
|
||||
},
|
||||
nextHopIsDummy: hop.isDummy,
|
||||
})
|
||||
|
||||
// This hop's pub key will be the policy creator for the next
|
||||
// hop.
|
||||
hopSource = hop.pubKey
|
||||
}
|
||||
|
||||
// It can happen that there is no HTLC-range overlap between the various
|
||||
// hops along the path. We return errInvalidBlindedPath to indicate that
|
||||
// this route was not usable
|
||||
if minHTLC > maxHTLC {
|
||||
return nil, 0, 0, fmt.Errorf("%w: resulting blinded path min "+
|
||||
"HTLC value is larger than the resulting max HTLC "+
|
||||
"value", errInvalidBlindedPath)
|
||||
}
|
||||
|
||||
return hops, minHTLC, maxHTLC, nil
|
||||
}
|
||||
|
||||
// buildDummyRouteData constructs the record.BlindedRouteData struct for the
|
||||
// given a hop in a blinded route where the following hop is a dummy hop.
|
||||
func buildDummyRouteData(node route.Vertex, relayInfo *record.PaymentRelayInfo,
|
||||
constraints *record.PaymentConstraints) (*hopData, error) {
|
||||
|
||||
nodeID, err := btcec.ParsePubKey(node[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &hopData{
|
||||
data: record.NewDummyHopRouteData(
|
||||
nodeID, *relayInfo, *constraints,
|
||||
),
|
||||
nodeID: nodeID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// buildHopRouteData constructs the record.BlindedRouteData struct for the given
|
||||
// non-final hop on a blinded path and packages it with the node's ID.
|
||||
func buildHopRouteData(node route.Vertex, scid lnwire.ShortChannelID,
|
||||
relayInfo *record.PaymentRelayInfo,
|
||||
constraints *record.PaymentConstraints) (*hopData, error) {
|
||||
|
||||
// Wrap up the data we want to send to this hop.
|
||||
blindedRouteHopData := record.NewNonFinalBlindedRouteData(
|
||||
scid, nil, *relayInfo, constraints, nil,
|
||||
)
|
||||
|
||||
nodeID, err := btcec.ParsePubKey(node[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &hopData{
|
||||
data: blindedRouteHopData,
|
||||
nodeID: nodeID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// buildFinalHopRouteData constructs the record.BlindedRouteData struct for the
|
||||
// final hop and packages it with the real node ID of the node it is intended
|
||||
// for.
|
||||
func buildFinalHopRouteData(node route.Vertex, pathID []byte,
|
||||
constraints *record.PaymentConstraints) (*hopData, error) {
|
||||
|
||||
blindedRouteHopData := record.NewFinalHopBlindedRouteData(
|
||||
constraints, pathID,
|
||||
)
|
||||
nodeID, err := btcec.ParsePubKey(node[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &hopData{
|
||||
data: blindedRouteHopData,
|
||||
nodeID: nodeID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getNodeChanPolicy fetches the routing policy info for the given channel and
|
||||
// node pair.
|
||||
func getNodeChannelPolicy(cfg *BuildBlindedPathCfg, chanID uint64,
|
||||
nodeID route.Vertex) (*BlindedHopPolicy, error) {
|
||||
|
||||
// Attempt to fetch channel updates for the given channel. We will have
|
||||
// at most two updates for a given channel.
|
||||
_, update1, update2, err := cfg.FetchChannelEdgesByID(chanID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now we need to determine which of the updates was created by the
|
||||
// node in question. We know the update is the correct one if the
|
||||
// "ToNode" for the fetched policy is _not_ equal to the node ID in
|
||||
// question.
|
||||
var policy *models.ChannelEdgePolicy
|
||||
switch {
|
||||
case update1 != nil && !bytes.Equal(update1.ToNode[:], nodeID[:]):
|
||||
policy = update1
|
||||
|
||||
case update2 != nil && !bytes.Equal(update2.ToNode[:], nodeID[:]):
|
||||
policy = update2
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("no channel updates found from node "+
|
||||
"%s for channel %d", nodeID, chanID)
|
||||
}
|
||||
|
||||
return &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: policy.TimeLockDelta,
|
||||
FeeRate: uint32(policy.FeeProportionalMillionths),
|
||||
BaseFee: policy.FeeBaseMSat,
|
||||
MinHTLCMsat: policy.MinHTLC,
|
||||
MaxHTLCMsat: policy.MaxHTLC,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// candidatePath holds all the information about a route to this node that we
|
||||
// need in order to build a blinded route.
|
||||
type candidatePath struct {
|
||||
introNode route.Vertex
|
||||
finalNodeID route.Vertex
|
||||
hops []*blindedPathHop
|
||||
}
|
||||
|
||||
// padWithDummyHops will append n dummy hops to the candidatePath hop set. The
|
||||
// pub key for the dummy hop will be the same as the pub key for the final hop
|
||||
// of the path. That way, the final hop will be able to decrypt the data
|
||||
// encrypted for each dummy hop.
|
||||
func (c *candidatePath) padWithDummyHops(n uint8) error {
|
||||
for len(c.hops) < int(n) {
|
||||
c.hops = append(c.hops, &blindedPathHop{
|
||||
pubKey: c.finalNodeID,
|
||||
isDummy: true,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// blindedPathHop holds the information we need to know about a hop in a route
|
||||
// in order to use it in the construction of a blinded path.
|
||||
type blindedPathHop struct {
|
||||
// pubKey is the real pub key of a node on a blinded path.
|
||||
pubKey route.Vertex
|
||||
|
||||
// channelID is the channel along which the previous hop should forward
|
||||
// their HTLC in order to reach this hop.
|
||||
channelID uint64
|
||||
|
||||
// isDummy is true if this hop is an appended dummy hop.
|
||||
isDummy bool
|
||||
}
|
||||
|
||||
// extractCandidatePath extracts the data it needs from the given route.Route in
|
||||
// order to construct a candidatePath.
|
||||
func extractCandidatePath(path *route.Route) *candidatePath {
|
||||
var (
|
||||
hops = make([]*blindedPathHop, len(path.Hops))
|
||||
finalNode = path.SourcePubKey
|
||||
)
|
||||
for i, hop := range path.Hops {
|
||||
hops[i] = &blindedPathHop{
|
||||
pubKey: hop.PubKeyBytes,
|
||||
channelID: hop.ChannelID,
|
||||
}
|
||||
|
||||
if i == len(path.Hops)-1 {
|
||||
finalNode = hop.PubKeyBytes
|
||||
}
|
||||
}
|
||||
|
||||
return &candidatePath{
|
||||
introNode: path.SourcePubKey,
|
||||
finalNodeID: finalNode,
|
||||
hops: hops,
|
||||
}
|
||||
}
|
||||
|
||||
// BlindedHopPolicy holds the set of relay policy values to use for a channel
|
||||
// in a blinded path.
|
||||
type BlindedHopPolicy struct {
|
||||
CLTVExpiryDelta uint16
|
||||
FeeRate uint32
|
||||
BaseFee lnwire.MilliSatoshi
|
||||
MinHTLCMsat lnwire.MilliSatoshi
|
||||
MaxHTLCMsat lnwire.MilliSatoshi
|
||||
}
|
||||
|
||||
// AddPolicyBuffer constructs the bufferedChanPolicies for a path hop by taking
|
||||
// its actual policy values and multiplying them by the given multipliers.
|
||||
// The base fee, fee rate and minimum HTLC msat values are adjusted via the
|
||||
// incMultiplier while the maximum HTLC msat value is adjusted via the
|
||||
// decMultiplier. If adjustments of the HTLC values no longer make sense
|
||||
// then the original HTLC value is used.
|
||||
func AddPolicyBuffer(policy *BlindedHopPolicy, incMultiplier,
|
||||
decMultiplier float64) (*BlindedHopPolicy, error) {
|
||||
|
||||
if incMultiplier < 1 {
|
||||
return nil, fmt.Errorf("blinded path policy increase " +
|
||||
"multiplier must be greater than or equal to 1")
|
||||
}
|
||||
|
||||
if decMultiplier < 0 || decMultiplier > 1 {
|
||||
return nil, fmt.Errorf("blinded path policy decrease " +
|
||||
"multiplier must be in the range [0;1]")
|
||||
}
|
||||
|
||||
var (
|
||||
minHTLCMsat = lnwire.MilliSatoshi(
|
||||
float64(policy.MinHTLCMsat) * incMultiplier,
|
||||
)
|
||||
maxHTLCMsat = lnwire.MilliSatoshi(
|
||||
float64(policy.MaxHTLCMsat) * decMultiplier,
|
||||
)
|
||||
)
|
||||
|
||||
// Make sure the new minimum is not more than the original maximum.
|
||||
// If it is, then just stick to the original minimum.
|
||||
if minHTLCMsat > policy.MaxHTLCMsat {
|
||||
minHTLCMsat = policy.MinHTLCMsat
|
||||
}
|
||||
|
||||
// Make sure the new maximum is not less than the original minimum.
|
||||
// If it is, then just stick to the original maximum.
|
||||
if maxHTLCMsat < policy.MinHTLCMsat {
|
||||
maxHTLCMsat = policy.MaxHTLCMsat
|
||||
}
|
||||
|
||||
// Also ensure that the new htlc bounds make sense. If the new minimum
|
||||
// is greater than the new maximum, then just let both to their original
|
||||
// values.
|
||||
if minHTLCMsat > maxHTLCMsat {
|
||||
minHTLCMsat = policy.MinHTLCMsat
|
||||
maxHTLCMsat = policy.MaxHTLCMsat
|
||||
}
|
||||
|
||||
return &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: uint16(
|
||||
float64(policy.CLTVExpiryDelta) * incMultiplier,
|
||||
),
|
||||
FeeRate: uint32(
|
||||
float64(policy.FeeRate) * incMultiplier,
|
||||
),
|
||||
BaseFee: lnwire.MilliSatoshi(
|
||||
float64(policy.BaseFee) * incMultiplier,
|
||||
),
|
||||
MinHTLCMsat: minHTLCMsat,
|
||||
MaxHTLCMsat: maxHTLCMsat,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// calcBlindedPathPolicies computes the accumulated policy values for the path.
|
||||
// These values include the total base fee, the total proportional fee and the
|
||||
// total CLTV delta. This function assumes that all the passed relay infos have
|
||||
// already been adjusted with a buffer to account for easy probing attacks.
|
||||
func calcBlindedPathPolicies(relayInfo []*record.PaymentRelayInfo,
|
||||
ourMinFinalCLTVDelta uint16) (lnwire.MilliSatoshi, uint32, uint16) {
|
||||
|
||||
var (
|
||||
totalFeeBase lnwire.MilliSatoshi
|
||||
totalFeeProp uint32
|
||||
totalCLTV = ourMinFinalCLTVDelta
|
||||
)
|
||||
// Use the algorithms defined in BOLT 4 to calculate the accumulated
|
||||
// relay fees for the route:
|
||||
//nolint:lll
|
||||
// https://github.com/lightning/bolts/blob/db278ab9b2baa0b30cfe79fb3de39280595938d3/04-onion-routing.md?plain=1#L255
|
||||
for i := len(relayInfo) - 1; i >= 0; i-- {
|
||||
info := relayInfo[i]
|
||||
|
||||
totalFeeBase = calcNextTotalBaseFee(
|
||||
totalFeeBase, info.BaseFee, info.FeeRate,
|
||||
)
|
||||
|
||||
totalFeeProp = calcNextTotalFeeRate(totalFeeProp, info.FeeRate)
|
||||
|
||||
totalCLTV += info.CltvExpiryDelta
|
||||
}
|
||||
|
||||
return totalFeeBase, totalFeeProp, totalCLTV
|
||||
}
|
||||
|
||||
// calcNextTotalBaseFee takes the current total accumulated base fee of a
|
||||
// blinded path at hop `n` along with the fee rate and base fee of the hop at
|
||||
// `n+1` and uses these to calculate the accumulated base fee at hop `n+1`.
|
||||
func calcNextTotalBaseFee(currentTotal, hopBaseFee lnwire.MilliSatoshi,
|
||||
hopFeeRate uint32) lnwire.MilliSatoshi {
|
||||
|
||||
numerator := (uint32(hopBaseFee) * oneMillion) +
|
||||
(uint32(currentTotal) * (oneMillion + hopFeeRate)) +
|
||||
oneMillion - 1
|
||||
|
||||
return lnwire.MilliSatoshi(numerator / oneMillion)
|
||||
}
|
||||
|
||||
// calculateNextTotalFeeRate takes the current total accumulated fee rate of a
|
||||
// blinded path at hop `n` along with the fee rate of the hop at `n+1` and uses
|
||||
// these to calculate the accumulated fee rate at hop `n+1`.
|
||||
func calcNextTotalFeeRate(currentTotal, hopFeeRate uint32) uint32 {
|
||||
numerator := (currentTotal+hopFeeRate)*oneMillion +
|
||||
currentTotal*hopFeeRate + oneMillion - 1
|
||||
|
||||
return numerator / oneMillion
|
||||
}
|
||||
|
||||
// hopData packages the record.BlindedRouteData for a hop on a blinded path with
|
||||
// the real node ID of that hop.
|
||||
type hopData struct {
|
||||
data *record.BlindedRouteData
|
||||
nodeID *btcec.PublicKey
|
||||
}
|
||||
|
||||
// padStats can be used to keep track of various pieces of data that we collect
|
||||
// during a call to padHopInfo. This is useful for logging and for test
|
||||
// assertions.
|
||||
type padStats struct {
|
||||
minPayloadSize int
|
||||
maxPayloadSize int
|
||||
finalPaddedSize int
|
||||
numIterations int
|
||||
}
|
||||
|
||||
// padHopInfo iterates over a set of record.BlindedRouteData and adds padding
|
||||
// where needed until the resulting encrypted data blobs are all the same size.
|
||||
// This may take a few iterations due to the fact that a TLV field is used to
|
||||
// add this padding. For example, if we want to add a 1 byte padding to a
|
||||
// record.BlindedRouteData when it does not yet have any padding, then adding
|
||||
// a 1 byte padding will actually add 3 bytes due to the bytes required when
|
||||
// adding the initial type and length bytes. However, on the next iteration if
|
||||
// we again add just 1 byte, then only a single byte will be added. The same
|
||||
// iteration is required for padding values on the BigSize encoding bucket
|
||||
// edges. The number of iterations that this function takes is also returned for
|
||||
// testing purposes. If prePad is true, then zero byte padding is added to each
|
||||
// payload that does not yet have padding. This will save some iterations for
|
||||
// the majority of cases.
|
||||
func padHopInfo(hopInfo []*hopData, prePad bool) ([]*sphinx.HopInfo, *padStats,
|
||||
error) {
|
||||
|
||||
var (
|
||||
paymentPath = make([]*sphinx.HopInfo, len(hopInfo))
|
||||
stats padStats
|
||||
)
|
||||
|
||||
// Pre-pad each payload with zero byte padding (if it does not yet have
|
||||
// padding) to save a couple of iterations in the majority of cases.
|
||||
if prePad {
|
||||
for _, info := range hopInfo {
|
||||
if info.data.Padding.IsSome() {
|
||||
continue
|
||||
}
|
||||
|
||||
info.data.PadBy(0)
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
stats.numIterations++
|
||||
|
||||
// On each iteration of the loop, we first determine the
|
||||
// current largest encoded data blob size. This will be the
|
||||
// size we aim to get the others to match.
|
||||
var (
|
||||
maxLen int
|
||||
minLen = math.MaxInt8
|
||||
)
|
||||
for i, hop := range hopInfo {
|
||||
plainText, err := record.EncodeBlindedRouteData(
|
||||
hop.data,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(plainText) > maxLen {
|
||||
maxLen = len(plainText)
|
||||
|
||||
// Update the stats to take note of this new
|
||||
// max since this may be the final max that all
|
||||
// payloads will be padded to.
|
||||
stats.finalPaddedSize = maxLen
|
||||
}
|
||||
if len(plainText) < minLen {
|
||||
minLen = len(plainText)
|
||||
}
|
||||
|
||||
paymentPath[i] = &sphinx.HopInfo{
|
||||
NodePub: hop.nodeID,
|
||||
PlainText: plainText,
|
||||
}
|
||||
}
|
||||
|
||||
// If this is our first iteration, then we take note of the min
|
||||
// and max lengths of the payloads pre-padding for logging
|
||||
// later.
|
||||
if stats.numIterations == 1 {
|
||||
stats.minPayloadSize = minLen
|
||||
stats.maxPayloadSize = maxLen
|
||||
}
|
||||
|
||||
// Now we iterate over them again and determine which ones we
|
||||
// need to add padding to.
|
||||
var numEqual int
|
||||
for i, hop := range hopInfo {
|
||||
plainText := paymentPath[i].PlainText
|
||||
|
||||
// If the plaintext length is equal to the desired
|
||||
// length, then we can continue. We use numEqual to
|
||||
// keep track of how many have the same length.
|
||||
if len(plainText) == maxLen {
|
||||
numEqual++
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// If we previously added padding to this hop, we keep
|
||||
// the length of that initial padding too.
|
||||
var existingPadding int
|
||||
hop.data.Padding.WhenSome(
|
||||
func(p tlv.RecordT[tlv.TlvType1, []byte]) {
|
||||
existingPadding = len(p.Val)
|
||||
},
|
||||
)
|
||||
|
||||
// Add some padding bytes to the hop.
|
||||
hop.data.PadBy(
|
||||
existingPadding + maxLen - len(plainText),
|
||||
)
|
||||
}
|
||||
|
||||
// If all the payloads have the same length, we can exit the
|
||||
// loop.
|
||||
if numEqual == len(hopInfo) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Finished padding %d blinded path payloads to %d bytes "+
|
||||
"each where the pre-padded min and max sizes were %d and %d "+
|
||||
"bytes respectively", len(hopInfo), stats.finalPaddedSize,
|
||||
stats.minPayloadSize, stats.maxPayloadSize)
|
||||
|
||||
return paymentPath, &stats, nil
|
||||
}
|
979
routing/blindedpath/blinded_path_test.go
Normal file
979
routing/blindedpath/blinded_path_test.go
Normal file
@ -0,0 +1,979 @@
|
||||
package blindedpath
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
sphinx "github.com/lightningnetwork/lightning-onion"
|
||||
"github.com/lightningnetwork/lnd/channeldb/models"
|
||||
"github.com/lightningnetwork/lnd/keychain"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/record"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/tlv"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
pubkeyBytes, _ = hex.DecodeString(
|
||||
"598ec453728e0ffe0ae2f5e174243cf58f2" +
|
||||
"a3f2c83d2457b43036db568b11093",
|
||||
)
|
||||
pubKeyY = new(btcec.FieldVal)
|
||||
_ = pubKeyY.SetByteSlice(pubkeyBytes)
|
||||
pubkey = btcec.NewPublicKey(new(btcec.FieldVal).SetInt(4), pubKeyY)
|
||||
)
|
||||
|
||||
// TestApplyBlindedPathPolicyBuffer tests blinded policy adjustments.
|
||||
func TestApplyBlindedPathPolicyBuffer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
policyIn *BlindedHopPolicy
|
||||
expectedOut *BlindedHopPolicy
|
||||
incMultiplier float64
|
||||
decMultiplier float64
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "invalid increase multiplier",
|
||||
incMultiplier: 0,
|
||||
expectedError: "blinded path policy increase " +
|
||||
"multiplier must be greater than or equal to 1",
|
||||
},
|
||||
{
|
||||
name: "decrease multiplier too small",
|
||||
incMultiplier: 1,
|
||||
decMultiplier: -1,
|
||||
expectedError: "blinded path policy decrease " +
|
||||
"multiplier must be in the range [0;1]",
|
||||
},
|
||||
{
|
||||
name: "decrease multiplier too big",
|
||||
incMultiplier: 1,
|
||||
decMultiplier: 2,
|
||||
expectedError: "blinded path policy decrease " +
|
||||
"multiplier must be in the range [0;1]",
|
||||
},
|
||||
{
|
||||
name: "no change",
|
||||
incMultiplier: 1,
|
||||
decMultiplier: 1,
|
||||
policyIn: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 1,
|
||||
MinHTLCMsat: 2,
|
||||
MaxHTLCMsat: 3,
|
||||
BaseFee: 4,
|
||||
FeeRate: 5,
|
||||
},
|
||||
expectedOut: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 1,
|
||||
MinHTLCMsat: 2,
|
||||
MaxHTLCMsat: 3,
|
||||
BaseFee: 4,
|
||||
FeeRate: 5,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "buffer up by 100% and down by 50%",
|
||||
incMultiplier: 2,
|
||||
decMultiplier: 0.5,
|
||||
policyIn: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 10,
|
||||
MinHTLCMsat: 20,
|
||||
MaxHTLCMsat: 300,
|
||||
BaseFee: 40,
|
||||
FeeRate: 50,
|
||||
},
|
||||
expectedOut: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 20,
|
||||
MinHTLCMsat: 40,
|
||||
MaxHTLCMsat: 150,
|
||||
BaseFee: 80,
|
||||
FeeRate: 100,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new HTLC minimum larger than OG " +
|
||||
"maximum",
|
||||
incMultiplier: 2,
|
||||
decMultiplier: 1,
|
||||
policyIn: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 10,
|
||||
MinHTLCMsat: 20,
|
||||
MaxHTLCMsat: 30,
|
||||
BaseFee: 40,
|
||||
FeeRate: 50,
|
||||
},
|
||||
expectedOut: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 20,
|
||||
MinHTLCMsat: 20,
|
||||
MaxHTLCMsat: 30,
|
||||
BaseFee: 80,
|
||||
FeeRate: 100,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new HTLC maximum smaller than OG " +
|
||||
"minimum",
|
||||
incMultiplier: 1,
|
||||
decMultiplier: 0.5,
|
||||
policyIn: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 10,
|
||||
MinHTLCMsat: 20,
|
||||
MaxHTLCMsat: 30,
|
||||
BaseFee: 40,
|
||||
FeeRate: 50,
|
||||
},
|
||||
expectedOut: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 10,
|
||||
MinHTLCMsat: 20,
|
||||
MaxHTLCMsat: 30,
|
||||
BaseFee: 40,
|
||||
FeeRate: 50,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new HTLC minimum and maximums are not " +
|
||||
"compatible",
|
||||
incMultiplier: 2,
|
||||
decMultiplier: 0.5,
|
||||
policyIn: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 10,
|
||||
MinHTLCMsat: 30,
|
||||
MaxHTLCMsat: 100,
|
||||
BaseFee: 40,
|
||||
FeeRate: 50,
|
||||
},
|
||||
expectedOut: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 20,
|
||||
MinHTLCMsat: 30,
|
||||
MaxHTLCMsat: 100,
|
||||
BaseFee: 80,
|
||||
FeeRate: 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
bufferedPolicy, err := AddPolicyBuffer(
|
||||
test.policyIn, test.incMultiplier,
|
||||
test.decMultiplier,
|
||||
)
|
||||
if test.expectedError != "" {
|
||||
require.ErrorContains(
|
||||
t, err, test.expectedError,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, test.expectedOut, bufferedPolicy)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestBlindedPathAccumulatedPolicyCalc tests the logic for calculating the
|
||||
// accumulated routing policies of a blinded route against an example mentioned
|
||||
// in the spec document:
|
||||
// https://github.com/lightning/bolts/blob/master/proposals/route-blinding.md
|
||||
func TestBlindedPathAccumulatedPolicyCalc(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// In the spec example, the blinded route is:
|
||||
// Carol -> Bob -> Alice
|
||||
// And Alice chooses the following buffered policy for both the C->B
|
||||
// and B->A edges.
|
||||
nodePolicy := &record.PaymentRelayInfo{
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
CltvExpiryDelta: 144,
|
||||
}
|
||||
|
||||
hopPolicies := []*record.PaymentRelayInfo{
|
||||
nodePolicy,
|
||||
nodePolicy,
|
||||
}
|
||||
|
||||
// Alice's minimum final expiry delta is chosen to be 12.
|
||||
aliceMinFinalExpDelta := uint16(12)
|
||||
|
||||
totalBase, totalRate, totalCLTVDelta := calcBlindedPathPolicies(
|
||||
hopPolicies, aliceMinFinalExpDelta,
|
||||
)
|
||||
|
||||
require.Equal(t, lnwire.MilliSatoshi(201), totalBase)
|
||||
require.EqualValues(t, 1001, totalRate)
|
||||
require.EqualValues(t, 300, totalCLTVDelta)
|
||||
}
|
||||
|
||||
// TestPadBlindedHopInfo asserts that the padding of blinded hop data is done
|
||||
// correctly and that it takes the expected number of iterations.
|
||||
func TestPadBlindedHopInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
expectedIterations int
|
||||
expectedFinalSize int
|
||||
|
||||
// We will use the PathID field of BlindedRouteData to set an
|
||||
// initial payload size. The ints in this list represent the
|
||||
// size of each PathID.
|
||||
pathIDs []int
|
||||
|
||||
// existingPadding is a map from entry index (based on the
|
||||
// pathIDs set) to the number of pre-existing padding bytes to
|
||||
// add.
|
||||
existingPadding map[int]int
|
||||
|
||||
// prePad is true if all the hop payloads should be pre-padded
|
||||
// with a zero length TLV Padding field.
|
||||
prePad bool
|
||||
}{
|
||||
{
|
||||
// If there is only one entry, then no padding is
|
||||
// expected.
|
||||
name: "single entry",
|
||||
expectedIterations: 1,
|
||||
pathIDs: []int{10},
|
||||
|
||||
// The final size will be 12 since the path ID is 10
|
||||
// bytes, and it will be prefixed by type and value
|
||||
// bytes.
|
||||
expectedFinalSize: 12,
|
||||
},
|
||||
{
|
||||
// All the payloads are the same size from the get go
|
||||
// meaning that no padding is expected.
|
||||
name: "all start equal",
|
||||
expectedIterations: 1,
|
||||
pathIDs: []int{10, 10, 10},
|
||||
|
||||
// The final size will be 12 since the path ID is 10
|
||||
// bytes, and it will be prefixed by type and value
|
||||
// bytes.
|
||||
expectedFinalSize: 12,
|
||||
},
|
||||
{
|
||||
// If the blobs differ by 1 byte it will take 4
|
||||
// iterations:
|
||||
// 1) padding of 1 is added to entry 2 which will
|
||||
// increase its size by 3 bytes since padding does
|
||||
// not yet exist for it.
|
||||
// 2) Now entry 1 will be short 2 bytes. It will be
|
||||
// padded by 2 bytes but again since it is a new
|
||||
// padding field, 4 bytes are added.
|
||||
// 3) Finally, entry 2 is padded by 1 extra. Since it
|
||||
// already does have a padding field, this does end
|
||||
// up adding only 1 extra byte.
|
||||
// 4) The fourth iteration determines that all are now
|
||||
// the same size.
|
||||
name: "differ by 1 - no pre-padding",
|
||||
expectedIterations: 4,
|
||||
pathIDs: []int{4, 3},
|
||||
expectedFinalSize: 10,
|
||||
},
|
||||
{
|
||||
// By pre-padding the payloads with a zero byte padding,
|
||||
// we can reduce the number of iterations quite a bit.
|
||||
name: "differ by 1 - with pre-padding",
|
||||
expectedIterations: 2,
|
||||
pathIDs: []int{4, 3},
|
||||
expectedFinalSize: 8,
|
||||
prePad: true,
|
||||
},
|
||||
{
|
||||
name: "existing padding and diff of 1",
|
||||
expectedIterations: 2,
|
||||
pathIDs: []int{10, 11},
|
||||
|
||||
// By adding some existing padding, the type and length
|
||||
// field for the padding are already accounted for in
|
||||
// the first iteration, and so we only expect two
|
||||
// iterations to get the payloads to match size here:
|
||||
// one for adding a single extra byte to the smaller
|
||||
// payload and another for confirming the sizes match.
|
||||
existingPadding: map[int]int{0: 1, 1: 1},
|
||||
expectedFinalSize: 16,
|
||||
},
|
||||
{
|
||||
// In this test, we test a BigSize bucket shift. We do
|
||||
// this by setting the initial path ID's of both entries
|
||||
// to a 0 size which means the total encoding of those
|
||||
// will be 2 bytes (to encode the type and length). Then
|
||||
// for the initial padding, we let the first entry be
|
||||
// 253 bytes long which is just long enough to be in
|
||||
// the second BigSize bucket which uses 3 bytes to
|
||||
// encode the value length. We make the second entry
|
||||
// 252 bytes which still puts it in the first bucket
|
||||
// which uses 1 byte for the length. The difference in
|
||||
// overall packet size will be 3 bytes (the first entry
|
||||
// has 2 more length bytes and 1 more value byte). So
|
||||
// the function will try to pad the second entry by 3
|
||||
// bytes (iteration 1). This will however result in the
|
||||
// second entry shifting to the second BigSize bucket
|
||||
// meaning it will gain an additional 2 bytes for the
|
||||
// new length encoding meaning that overall it gains 5
|
||||
// bytes in size. This will result in another iteration
|
||||
// which will result in padding the first entry with an
|
||||
// extra 2 bytes to meet the second entry's new size
|
||||
// (iteration 2). One more iteration (3) is then done
|
||||
// to confirm that all entries are now the same size.
|
||||
name: "big size bucket shift",
|
||||
expectedIterations: 3,
|
||||
|
||||
// We make the path IDs large enough so that
|
||||
pathIDs: []int{0, 0},
|
||||
existingPadding: map[int]int{0: 253, 1: 252},
|
||||
expectedFinalSize: 261,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// If the test includes existing padding, then make sure
|
||||
// that the number of existing padding entries is equal
|
||||
// to the number of PathID entries.
|
||||
if test.existingPadding != nil {
|
||||
require.Len(t, test.existingPadding,
|
||||
len(test.pathIDs))
|
||||
}
|
||||
|
||||
hopDataSet := make([]*hopData, len(test.pathIDs))
|
||||
for i, l := range test.pathIDs {
|
||||
pathID := tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType6](
|
||||
make([]byte, l),
|
||||
),
|
||||
)
|
||||
data := &record.BlindedRouteData{
|
||||
PathID: pathID,
|
||||
}
|
||||
|
||||
if test.existingPadding != nil {
|
||||
//nolint:lll
|
||||
padding := tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType1](
|
||||
make([]byte, test.existingPadding[i]),
|
||||
),
|
||||
)
|
||||
|
||||
data.Padding = padding
|
||||
}
|
||||
|
||||
hopDataSet[i] = &hopData{data: data}
|
||||
}
|
||||
|
||||
hopInfo, stats, err := padHopInfo(
|
||||
hopDataSet, test.prePad,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.expectedIterations,
|
||||
stats.numIterations)
|
||||
require.Equal(t, test.expectedFinalSize,
|
||||
stats.finalPaddedSize)
|
||||
|
||||
// We expect all resulting blobs to be the same size.
|
||||
for _, info := range hopInfo {
|
||||
require.Len(
|
||||
t, info.PlainText,
|
||||
test.expectedFinalSize,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestPadBlindedHopInfoBlackBox tests the padHopInfo function via the
|
||||
// quick.Check testing function. It generates a random set of hopData and
|
||||
// asserts that the resulting padded set always has the same encoded length.
|
||||
func TestPadBlindedHopInfoBlackBox(t *testing.T) {
|
||||
fn := func(data hopDataList) bool {
|
||||
resultList, _, err := padHopInfo(data, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be a resulting sphinx.HopInfo struct for each
|
||||
// hopData passed to the padHopInfo function.
|
||||
if len(resultList) != len(data) {
|
||||
return false
|
||||
}
|
||||
|
||||
// There is nothing left to check if input set was empty to
|
||||
// start with.
|
||||
if len(data) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Now, assert that the encoded size of each item is the same.
|
||||
// Get the size of the first item as a base point.
|
||||
payloadSize := len(resultList[0].PlainText)
|
||||
|
||||
// All the other entries should have the same encoded size.
|
||||
for i := 1; i < len(resultList); i++ {
|
||||
if len(resultList[i].PlainText) != payloadSize {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
require.NoError(t, quick.Check(fn, nil))
|
||||
}
|
||||
|
||||
type hopDataList []*hopData
|
||||
|
||||
// Generate returns a random instance of the hopDataList type.
|
||||
//
|
||||
// NOTE: this is part of the quick.Generate interface.
|
||||
func (h hopDataList) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
data := make(hopDataList, rand.Intn(size))
|
||||
for i := 0; i < len(data); i++ {
|
||||
data[i] = &hopData{
|
||||
data: genBlindedRouteData(rand),
|
||||
nodeID: pubkey,
|
||||
}
|
||||
}
|
||||
|
||||
return reflect.ValueOf(data)
|
||||
}
|
||||
|
||||
// A compile-time check to ensure that hopDataList implements the
|
||||
// quick.Generator interface.
|
||||
var _ quick.Generator = (*hopDataList)(nil)
|
||||
|
||||
// sometimesDo calls the given function with a 50% probability.
|
||||
func sometimesDo(fn func(), rand *rand.Rand) {
|
||||
if rand.Intn(1) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
fn()
|
||||
}
|
||||
|
||||
// genBlindedRouteData generates a random record.BlindedRouteData object.
|
||||
func genBlindedRouteData(rand *rand.Rand) *record.BlindedRouteData {
|
||||
var data record.BlindedRouteData
|
||||
|
||||
sometimesDo(func() {
|
||||
data.Padding = tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType1](
|
||||
make([]byte, rand.Intn(1000000)),
|
||||
),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.ShortChannelID = tlv.SomeRecordT(
|
||||
tlv.NewRecordT[tlv.TlvType2](lnwire.ShortChannelID{
|
||||
BlockHeight: rand.Uint32(),
|
||||
TxIndex: rand.Uint32(),
|
||||
TxPosition: uint16(rand.Uint32()),
|
||||
}),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.NextNodeID = tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType4](pubkey),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.PathID = tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType6](
|
||||
make([]byte, rand.Intn(100)),
|
||||
),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.NextBlindingOverride = tlv.SomeRecordT(
|
||||
tlv.NewPrimitiveRecord[tlv.TlvType8](pubkey),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.RelayInfo = tlv.SomeRecordT(
|
||||
tlv.NewRecordT[tlv.TlvType10](record.PaymentRelayInfo{
|
||||
CltvExpiryDelta: uint16(rand.Uint32()),
|
||||
FeeRate: rand.Uint32(),
|
||||
BaseFee: lnwire.MilliSatoshi(
|
||||
rand.Uint32(),
|
||||
),
|
||||
}),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
sometimesDo(func() {
|
||||
data.Constraints = tlv.SomeRecordT(
|
||||
tlv.NewRecordT[tlv.TlvType12](record.PaymentConstraints{
|
||||
MaxCltvExpiry: rand.Uint32(),
|
||||
HtlcMinimumMsat: lnwire.MilliSatoshi(
|
||||
rand.Uint32(),
|
||||
),
|
||||
}),
|
||||
)
|
||||
}, rand)
|
||||
|
||||
return &data
|
||||
}
|
||||
|
||||
// TestBuildBlindedPath tests the logic for constructing a blinded path against
|
||||
// an example mentioned in this spec document:
|
||||
// https://github.com/lightning/bolts/blob/master/proposals/route-blinding.md
|
||||
// This example does not use any dummy hops.
|
||||
func TestBuildBlindedPath(t *testing.T) {
|
||||
// Alice chooses the following path to herself for blinded path
|
||||
// construction:
|
||||
// Carol -> Bob -> Alice.
|
||||
// Let's construct the corresponding route.Route for this which will be
|
||||
// returned from the `FindRoutes` config callback.
|
||||
var (
|
||||
privC, pkC = btcec.PrivKeyFromBytes([]byte{1})
|
||||
privB, pkB = btcec.PrivKeyFromBytes([]byte{2})
|
||||
privA, pkA = btcec.PrivKeyFromBytes([]byte{3})
|
||||
|
||||
carol = route.NewVertex(pkC)
|
||||
bob = route.NewVertex(pkB)
|
||||
alice = route.NewVertex(pkA)
|
||||
|
||||
chanCB = uint64(1)
|
||||
chanBA = uint64(2)
|
||||
)
|
||||
|
||||
realRoute := &route.Route{
|
||||
SourcePubKey: carol,
|
||||
Hops: []*route.Hop{
|
||||
{
|
||||
PubKeyBytes: bob,
|
||||
ChannelID: chanCB,
|
||||
},
|
||||
{
|
||||
PubKeyBytes: alice,
|
||||
ChannelID: chanBA,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
realPolicies := map[uint64]*models.ChannelEdgePolicy{
|
||||
chanCB: {
|
||||
ChannelID: chanCB,
|
||||
ToNode: bob,
|
||||
},
|
||||
chanBA: {
|
||||
ChannelID: chanBA,
|
||||
ToNode: alice,
|
||||
},
|
||||
}
|
||||
|
||||
paths, err := BuildBlindedPaymentPaths(&BuildBlindedPathCfg{
|
||||
FindRoutes: func(_ lnwire.MilliSatoshi) ([]*route.Route,
|
||||
error) {
|
||||
|
||||
return []*route.Route{realRoute}, nil
|
||||
},
|
||||
FetchChannelEdgesByID: func(chanID uint64) (
|
||||
*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
|
||||
*models.ChannelEdgePolicy, error) {
|
||||
|
||||
return nil, realPolicies[chanID], nil, nil
|
||||
},
|
||||
BestHeight: func() (uint32, error) {
|
||||
return 1000, nil
|
||||
},
|
||||
// In the spec example, all the policies get replaced with
|
||||
// the same static values.
|
||||
AddPolicyBuffer: func(_ *BlindedHopPolicy) (
|
||||
*BlindedHopPolicy, error) {
|
||||
|
||||
return &BlindedHopPolicy{
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
CLTVExpiryDelta: 144,
|
||||
MinHTLCMsat: 1000,
|
||||
MaxHTLCMsat: lnwire.MaxMilliSatoshi,
|
||||
}, nil
|
||||
},
|
||||
PathID: []byte{1, 2, 3},
|
||||
ValueMsat: 1000,
|
||||
MinFinalCLTVExpiryDelta: 12,
|
||||
BlocksUntilExpiry: 200,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, paths, 1)
|
||||
|
||||
path := paths[0]
|
||||
|
||||
// Check that all the accumulated policy values are correct.
|
||||
require.EqualValues(t, 201, path.FeeBaseMsat)
|
||||
require.EqualValues(t, 1001, path.FeeRate)
|
||||
require.EqualValues(t, 300, path.CltvExpiryDelta)
|
||||
require.EqualValues(t, 1000, path.HTLCMinMsat)
|
||||
require.EqualValues(t, lnwire.MaxMilliSatoshi, path.HTLCMaxMsat)
|
||||
|
||||
// Now we check the hops.
|
||||
require.Len(t, path.Hops, 3)
|
||||
|
||||
// Assert that all the encrypted recipient blobs have been padded such
|
||||
// that they are all the same size.
|
||||
require.Len(t, path.Hops[0].CipherText, len(path.Hops[1].CipherText))
|
||||
require.Len(t, path.Hops[1].CipherText, len(path.Hops[2].CipherText))
|
||||
|
||||
// The first hop, should have the real pub key of the introduction
|
||||
// node: Carol.
|
||||
hop := path.Hops[0]
|
||||
require.True(t, hop.BlindedNodePub.IsEqual(pkC))
|
||||
|
||||
// As Carol, let's decode the hop data and assert that all expected
|
||||
// values have been included.
|
||||
var (
|
||||
blindingPoint = path.FirstEphemeralBlindingPoint
|
||||
data *record.BlindedRouteData
|
||||
)
|
||||
|
||||
// Check that Carol's info is correct.
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privC, blindingPoint, hop.CipherText,
|
||||
)
|
||||
|
||||
require.Equal(
|
||||
t, lnwire.NewShortChanIDFromInt(chanCB),
|
||||
data.ShortChannelID.UnwrapOrFail(t).Val,
|
||||
)
|
||||
|
||||
require.Equal(t, record.PaymentRelayInfo{
|
||||
CltvExpiryDelta: 144,
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
}, data.RelayInfo.UnwrapOrFail(t).Val)
|
||||
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1500,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
|
||||
// Check that all Bob's info is correct.
|
||||
hop = path.Hops[1]
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privB, blindingPoint, hop.CipherText,
|
||||
)
|
||||
|
||||
require.Equal(
|
||||
t, lnwire.NewShortChanIDFromInt(chanBA),
|
||||
data.ShortChannelID.UnwrapOrFail(t).Val,
|
||||
)
|
||||
|
||||
require.Equal(t, record.PaymentRelayInfo{
|
||||
CltvExpiryDelta: 144,
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
}, data.RelayInfo.UnwrapOrFail(t).Val)
|
||||
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1356,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
|
||||
// Check that all Alice's info is correct.
|
||||
hop = path.Hops[2]
|
||||
data, _ = decryptAndDecodeHopData(
|
||||
t, privA, blindingPoint, hop.CipherText,
|
||||
)
|
||||
require.True(t, data.ShortChannelID.IsNone())
|
||||
require.True(t, data.RelayInfo.IsNone())
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1212,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
require.Equal(t, []byte{1, 2, 3}, data.PathID.UnwrapOrFail(t).Val)
|
||||
}
|
||||
|
||||
// TestBuildBlindedPathWithDummyHops tests the construction of a blinded path
|
||||
// which includes dummy hops.
|
||||
func TestBuildBlindedPathWithDummyHops(t *testing.T) {
|
||||
// Alice chooses the following path to herself for blinded path
|
||||
// construction:
|
||||
// Carol -> Bob -> Alice.
|
||||
// Let's construct the corresponding route.Route for this which will be
|
||||
// returned from the `FindRoutes` config callback.
|
||||
var (
|
||||
privC, pkC = btcec.PrivKeyFromBytes([]byte{1})
|
||||
privB, pkB = btcec.PrivKeyFromBytes([]byte{2})
|
||||
privA, pkA = btcec.PrivKeyFromBytes([]byte{3})
|
||||
|
||||
carol = route.NewVertex(pkC)
|
||||
bob = route.NewVertex(pkB)
|
||||
alice = route.NewVertex(pkA)
|
||||
|
||||
chanCB = uint64(1)
|
||||
chanBA = uint64(2)
|
||||
)
|
||||
|
||||
realRoute := &route.Route{
|
||||
SourcePubKey: carol,
|
||||
Hops: []*route.Hop{
|
||||
{
|
||||
PubKeyBytes: bob,
|
||||
ChannelID: chanCB,
|
||||
},
|
||||
{
|
||||
PubKeyBytes: alice,
|
||||
ChannelID: chanBA,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
realPolicies := map[uint64]*models.ChannelEdgePolicy{
|
||||
chanCB: {
|
||||
ChannelID: chanCB,
|
||||
ToNode: bob,
|
||||
},
|
||||
chanBA: {
|
||||
ChannelID: chanBA,
|
||||
ToNode: alice,
|
||||
},
|
||||
}
|
||||
|
||||
paths, err := BuildBlindedPaymentPaths(&BuildBlindedPathCfg{
|
||||
FindRoutes: func(_ lnwire.MilliSatoshi) ([]*route.Route,
|
||||
error) {
|
||||
|
||||
return []*route.Route{realRoute}, nil
|
||||
},
|
||||
FetchChannelEdgesByID: func(chanID uint64) (
|
||||
*models.ChannelEdgeInfo, *models.ChannelEdgePolicy,
|
||||
*models.ChannelEdgePolicy, error) {
|
||||
|
||||
policy, ok := realPolicies[chanID]
|
||||
if !ok {
|
||||
return nil, nil, nil,
|
||||
fmt.Errorf("edge not found")
|
||||
}
|
||||
|
||||
return nil, policy, nil, nil
|
||||
},
|
||||
BestHeight: func() (uint32, error) {
|
||||
return 1000, nil
|
||||
},
|
||||
// In the spec example, all the policies get replaced with
|
||||
// the same static values.
|
||||
AddPolicyBuffer: func(_ *BlindedHopPolicy) (
|
||||
*BlindedHopPolicy, error) {
|
||||
|
||||
return &BlindedHopPolicy{
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
CLTVExpiryDelta: 144,
|
||||
MinHTLCMsat: 1000,
|
||||
MaxHTLCMsat: lnwire.MaxMilliSatoshi,
|
||||
}, nil
|
||||
},
|
||||
PathID: []byte{1, 2, 3},
|
||||
ValueMsat: 1000,
|
||||
MinFinalCLTVExpiryDelta: 12,
|
||||
BlocksUntilExpiry: 200,
|
||||
|
||||
// By setting the minimum number of hops to 4, we force 2 dummy
|
||||
// hops to be added to the real route.
|
||||
MinNumHops: 4,
|
||||
|
||||
DummyHopPolicy: &BlindedHopPolicy{
|
||||
CLTVExpiryDelta: 50,
|
||||
FeeRate: 100,
|
||||
BaseFee: 100,
|
||||
MinHTLCMsat: 1000,
|
||||
MaxHTLCMsat: lnwire.MaxMilliSatoshi,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, paths, 1)
|
||||
|
||||
path := paths[0]
|
||||
|
||||
// Check that all the accumulated policy values are correct.
|
||||
require.EqualValues(t, 403, path.FeeBaseMsat)
|
||||
require.EqualValues(t, 1203, path.FeeRate)
|
||||
require.EqualValues(t, 400, path.CltvExpiryDelta)
|
||||
require.EqualValues(t, 1000, path.HTLCMinMsat)
|
||||
require.EqualValues(t, lnwire.MaxMilliSatoshi, path.HTLCMaxMsat)
|
||||
|
||||
// Now we check the hops.
|
||||
require.Len(t, path.Hops, 5)
|
||||
|
||||
// Assert that all the encrypted recipient blobs have been padded such
|
||||
// that they are all the same size.
|
||||
require.Len(t, path.Hops[0].CipherText, len(path.Hops[1].CipherText))
|
||||
require.Len(t, path.Hops[1].CipherText, len(path.Hops[2].CipherText))
|
||||
require.Len(t, path.Hops[2].CipherText, len(path.Hops[3].CipherText))
|
||||
require.Len(t, path.Hops[3].CipherText, len(path.Hops[4].CipherText))
|
||||
|
||||
// The first hop, should have the real pub key of the introduction
|
||||
// node: Carol.
|
||||
hop := path.Hops[0]
|
||||
require.True(t, hop.BlindedNodePub.IsEqual(pkC))
|
||||
|
||||
// As Carol, let's decode the hop data and assert that all expected
|
||||
// values have been included.
|
||||
var (
|
||||
blindingPoint = path.FirstEphemeralBlindingPoint
|
||||
data *record.BlindedRouteData
|
||||
)
|
||||
|
||||
// Check that Carol's info is correct.
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privC, blindingPoint, hop.CipherText,
|
||||
)
|
||||
|
||||
require.Equal(
|
||||
t, lnwire.NewShortChanIDFromInt(chanCB),
|
||||
data.ShortChannelID.UnwrapOrFail(t).Val,
|
||||
)
|
||||
|
||||
require.Equal(t, record.PaymentRelayInfo{
|
||||
CltvExpiryDelta: 144,
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
}, data.RelayInfo.UnwrapOrFail(t).Val)
|
||||
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1600,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
|
||||
// Check that all Bob's info is correct.
|
||||
hop = path.Hops[1]
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privB, blindingPoint, hop.CipherText,
|
||||
)
|
||||
|
||||
require.Equal(
|
||||
t, lnwire.NewShortChanIDFromInt(chanBA),
|
||||
data.ShortChannelID.UnwrapOrFail(t).Val,
|
||||
)
|
||||
|
||||
require.Equal(t, record.PaymentRelayInfo{
|
||||
CltvExpiryDelta: 144,
|
||||
FeeRate: 500,
|
||||
BaseFee: 100,
|
||||
}, data.RelayInfo.UnwrapOrFail(t).Val)
|
||||
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1456,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
|
||||
// Check that all Alice's info is correct. The payload should contain
|
||||
// a next_node_id field that is equal to Alice's public key. This
|
||||
// indicates to Alice that she should continue peeling the onion.
|
||||
hop = path.Hops[2]
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privA, blindingPoint, hop.CipherText,
|
||||
)
|
||||
require.True(t, data.ShortChannelID.IsNone())
|
||||
require.True(t, data.RelayInfo.IsSome())
|
||||
require.True(t, data.Constraints.IsSome())
|
||||
require.Equal(t, pkA, data.NextNodeID.UnwrapOrFail(t).Val)
|
||||
|
||||
// Alice should be able to decrypt the next payload with her private
|
||||
// key. This next payload is yet another dummy hop.
|
||||
hop = path.Hops[3]
|
||||
data, blindingPoint = decryptAndDecodeHopData(
|
||||
t, privA, blindingPoint, hop.CipherText,
|
||||
)
|
||||
require.True(t, data.ShortChannelID.IsNone())
|
||||
require.True(t, data.RelayInfo.IsSome())
|
||||
require.True(t, data.Constraints.IsSome())
|
||||
require.Equal(t, pkA, data.NextNodeID.UnwrapOrFail(t).Val)
|
||||
|
||||
// Unwrapping one more time should reveal the final hop info for Alice.
|
||||
hop = path.Hops[4]
|
||||
data, _ = decryptAndDecodeHopData(
|
||||
t, privA, blindingPoint, hop.CipherText,
|
||||
)
|
||||
require.True(t, data.ShortChannelID.IsNone())
|
||||
require.True(t, data.RelayInfo.IsNone())
|
||||
require.Equal(t, record.PaymentConstraints{
|
||||
MaxCltvExpiry: 1212,
|
||||
HtlcMinimumMsat: 1000,
|
||||
}, data.Constraints.UnwrapOrFail(t).Val)
|
||||
require.Equal(t, []byte{1, 2, 3}, data.PathID.UnwrapOrFail(t).Val)
|
||||
}
|
||||
|
||||
// TestSingleHopBlindedPath tests that blinded path construction is done
|
||||
// correctly for the case where the destination node is also the introduction
|
||||
// node.
|
||||
func TestSingleHopBlindedPath(t *testing.T) {
|
||||
var (
|
||||
_, pkC = btcec.PrivKeyFromBytes([]byte{1})
|
||||
carol = route.NewVertex(pkC)
|
||||
)
|
||||
|
||||
realRoute := &route.Route{
|
||||
SourcePubKey: carol,
|
||||
// No hops since Carol is both the introduction node and the
|
||||
// final destination node.
|
||||
Hops: []*route.Hop{},
|
||||
}
|
||||
|
||||
paths, err := BuildBlindedPaymentPaths(&BuildBlindedPathCfg{
|
||||
FindRoutes: func(_ lnwire.MilliSatoshi) ([]*route.Route,
|
||||
error) {
|
||||
|
||||
return []*route.Route{realRoute}, nil
|
||||
},
|
||||
BestHeight: func() (uint32, error) {
|
||||
return 1000, nil
|
||||
},
|
||||
PathID: []byte{1, 2, 3},
|
||||
ValueMsat: 1000,
|
||||
MinFinalCLTVExpiryDelta: 12,
|
||||
BlocksUntilExpiry: 200,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, paths, 1)
|
||||
|
||||
path := paths[0]
|
||||
|
||||
// Check that all the accumulated policy values are correct. Since this
|
||||
// is a unique case where the destination node is also the introduction
|
||||
// node, the accumulated fee and HTLC values should be zero and the
|
||||
// CLTV expiry delta should be equal to Carol's MinFinalCLTVExpiryDelta.
|
||||
require.EqualValues(t, 0, path.FeeBaseMsat)
|
||||
require.EqualValues(t, 0, path.FeeRate)
|
||||
require.EqualValues(t, 0, path.HTLCMinMsat)
|
||||
require.EqualValues(t, 0, path.HTLCMaxMsat)
|
||||
require.EqualValues(t, 12, path.CltvExpiryDelta)
|
||||
}
|
||||
|
||||
func decryptAndDecodeHopData(t *testing.T, priv *btcec.PrivateKey,
|
||||
ephem *btcec.PublicKey, cipherText []byte) (*record.BlindedRouteData,
|
||||
*btcec.PublicKey) {
|
||||
|
||||
router := sphinx.NewRouter(
|
||||
&keychain.PrivKeyECDH{PrivKey: priv}, nil,
|
||||
)
|
||||
|
||||
decrypted, err := router.DecryptBlindedHopData(ephem, cipherText)
|
||||
require.NoError(t, err)
|
||||
|
||||
buf := bytes.NewBuffer(decrypted)
|
||||
routeData, err := record.DecodeBlindedRouteData(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
nextEphem, err := router.NextEphemeral(ephem)
|
||||
require.NoError(t, err)
|
||||
|
||||
return routeData, nextEphem
|
||||
}
|
31
routing/blindedpath/log.go
Normal file
31
routing/blindedpath/log.go
Normal file
@ -0,0 +1,31 @@
|
||||
package blindedpath
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/lightningnetwork/lnd/build"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This means the
|
||||
// package will not perform any logging by default until the caller requests
|
||||
// it.
|
||||
var log btclog.Logger
|
||||
|
||||
const Subsystem = "BLPT"
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
UseLogger(build.NewSubLogger(Subsystem, nil))
|
||||
}
|
||||
|
||||
// DisableLog disables all library log output. Logging output is disabled by
|
||||
// default until UseLogger is called.
|
||||
func DisableLog() {
|
||||
UseLogger(btclog.Disabled)
|
||||
}
|
||||
|
||||
// UseLogger uses a specified Logger to output package logging info. This
|
||||
// should be used in preference to SetLogWriter if the caller is also using
|
||||
// btclog.
|
||||
func UseLogger(logger btclog.Logger) {
|
||||
log = logger
|
||||
}
|
12
rpcserver.go
12
rpcserver.go
@ -5772,10 +5772,10 @@ func (r *rpcServer) AddInvoice(ctx context.Context,
|
||||
defaultDelta := r.cfg.Bitcoin.TimeLockDelta
|
||||
|
||||
blindingRestrictions := &routing.BlindedPathRestrictions{
|
||||
MinDistanceFromIntroNode: r.server.cfg.Invoices.BlindedPaths.
|
||||
MinDistanceFromIntroNode: r.server.cfg.Routing.BlindedPaths.
|
||||
MinNumRealHops,
|
||||
NumHops: r.server.cfg.Invoices.BlindedPaths.NumHops,
|
||||
MaxNumPaths: r.server.cfg.Invoices.BlindedPaths.MaxNumPaths,
|
||||
NumHops: r.server.cfg.Routing.BlindedPaths.NumHops,
|
||||
MaxNumPaths: r.server.cfg.Routing.BlindedPaths.MaxNumPaths,
|
||||
}
|
||||
|
||||
addInvoiceCfg := &invoicesrpc.AddInvoiceConfig{
|
||||
@ -5812,9 +5812,9 @@ func (r *rpcServer) AddInvoice(ctx context.Context,
|
||||
},
|
||||
GetAlias: r.server.aliasMgr.GetPeerAlias,
|
||||
BestHeight: r.server.cc.BestBlockTracker.BestHeight,
|
||||
BlindedRoutePolicyIncrMultiplier: r.server.cfg.Invoices.
|
||||
BlindedRoutePolicyIncrMultiplier: r.server.cfg.Routing.
|
||||
BlindedPaths.PolicyIncreaseMultiplier,
|
||||
BlindedRoutePolicyDecrMultiplier: r.server.cfg.Invoices.
|
||||
BlindedRoutePolicyDecrMultiplier: r.server.cfg.Routing.
|
||||
BlindedPaths.PolicyDecreaseMultiplier,
|
||||
QueryBlindedRoutes: func(amt lnwire.MilliSatoshi) (
|
||||
[]*route.Route, error) {
|
||||
@ -5825,7 +5825,7 @@ func (r *rpcServer) AddInvoice(ctx context.Context,
|
||||
blindingRestrictions,
|
||||
)
|
||||
},
|
||||
MinNumHops: r.server.cfg.Invoices.BlindedPaths.NumHops,
|
||||
MinNumHops: r.server.cfg.Routing.BlindedPaths.NumHops,
|
||||
}
|
||||
|
||||
value, err := lnrpc.UnmarshallAmt(invoice.Value, invoice.ValueMsat)
|
||||
|
@ -1649,41 +1649,6 @@
|
||||
; enough to prevent force closes.
|
||||
; invoices.holdexpirydelta=12
|
||||
|
||||
; The minimum number of real (non-dummy) blinded hops to select for a blinded
|
||||
; path. This doesn't include our node, so if the maximum is 1, then the
|
||||
; shortest paths will contain our node along with an introduction node hop.
|
||||
; invoices.blinding.min-num-real-hops=1
|
||||
|
||||
; The number of hops to include in a blinded path. This does not include
|
||||
; our node, so if is is 1, then the path will at least contain our node along
|
||||
; with an introduction node hop. If it is 0, then it will use this node as
|
||||
; the introduction node. This number must be greater than or equal to the
|
||||
; the number of real hops (invoices.blinding.min-num-real-hops). Any paths
|
||||
; shorter than this number will be padded with dummy hops.
|
||||
; invoices.blinding.num-hops=2
|
||||
|
||||
; The maximum number of blinded paths to select and add to an invoice.
|
||||
; invoices.blinding.max-num-paths=3
|
||||
|
||||
; The amount by which to increase certain policy values of hops on a blinded
|
||||
; path in order to add a probing buffer. The higher this multiplier, the more
|
||||
; buffer is added to the policy values of hops along a blinded path meaning
|
||||
; that if they were to increase their policy values before the blinded path
|
||||
; expires, the better the chances that the path would still be valid meaning
|
||||
; that the path is less prone to probing attacks. However, if the multiplier
|
||||
; is too high, the resulting buffered fees might be too much for the payer.
|
||||
; invoices.blinding.policy-increase-multiplier=1.1
|
||||
|
||||
; The amount by which to decrease certain policy values of hops on a blinded
|
||||
; path in order to add a probing buffer. The lower this multiplier, the more
|
||||
; buffer is added to the policy values of hops along a blinded path meaning
|
||||
; that if they were to increase their policy values before the blinded path
|
||||
; expires, the better the chances that the path would still be valid meaning
|
||||
; that the path is less prone to probing attacks. However, since this value
|
||||
; is being applied to the MaxHTLC value of the route, the lower it is, the
|
||||
; lower payment amount will need to be.
|
||||
; invoices.blinding.policy-decrease-multiplier=0.9
|
||||
|
||||
[routing]
|
||||
|
||||
; DEPRECATED: This is now turned on by default for Neutrino (use
|
||||
@ -1697,6 +1662,40 @@
|
||||
; seen as being live from it's PoV.
|
||||
; routing.strictgraphpruning=false
|
||||
|
||||
; The minimum number of real (non-dummy) blinded hops to select for a blinded
|
||||
; path. This doesn't include our node, so if the maximum is 1, then the
|
||||
; shortest paths will contain our node along with an introduction node hop.
|
||||
; routing.blinding.min-num-real-hops=1
|
||||
|
||||
; The number of hops to include in a blinded path. This does not include
|
||||
; our node, so if is is 1, then the path will at least contain our node along
|
||||
; with an introduction node hop. If it is 0, then it will use this node as
|
||||
; the introduction node. This number must be greater than or equal to the
|
||||
; the number of real hops (invoices.blinding.min-num-real-hops). Any paths
|
||||
; shorter than this number will be padded with dummy hops.
|
||||
; routing.blinding.num-hops=2
|
||||
|
||||
; The maximum number of blinded paths to select and add to an invoice.
|
||||
; routing.blinding.max-num-paths=3
|
||||
|
||||
; The amount by which to increase certain policy values of hops on a blinded
|
||||
; path in order to add a probing buffer. The higher this multiplier, the more
|
||||
; buffer is added to the policy values of hops along a blinded path meaning
|
||||
; that if they were to increase their policy values before the blinded path
|
||||
; expires, the better the chances that the path would still be valid meaning
|
||||
; that the path is less prone to probing attacks. However, if the multiplier
|
||||
; is too high, the resulting buffered fees might be too much for the payer.
|
||||
; routing.blinding.policy-increase-multiplier=1.1
|
||||
|
||||
; The amount by which to decrease certain policy values of hops on a blinded
|
||||
; path in order to add a probing buffer. The lower this multiplier, the more
|
||||
; buffer is added to the policy values of hops along a blinded path meaning
|
||||
; that if they were to increase their policy values before the blinded path
|
||||
; expires, the better the chances that the path would still be valid meaning
|
||||
; that the path is less prone to probing attacks. However, since this value
|
||||
; is being applied to the MaxHTLC value of the route, the lower it is, the
|
||||
; lower payment amount will need to be.
|
||||
; routing.blinding.policy-decrease-multiplier=0.9
|
||||
|
||||
[sweeper]
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user