routing+lnrpc: make capacity factor configurable

We make the capacity factor configurable via an lnd.conf routerrpc
apriori parameter. The capacity factor trades off increased success
probability with a reduced set of channel candidates, which may lead to
increased fees. To let users choose whether the factor is active or not,
we add a config setting where a capacity fraction of 1.0 disables the
factor. We limit the capacity fraction to values between 0.75 and 1.0.
Lower values may discard too many channels.
This commit is contained in:
bitromortac 2023-02-24 11:59:44 +01:00
parent 73596ceac2
commit a73581610e
No known key found for this signature in database
GPG Key ID: 1965063FC13BEBE2
10 changed files with 113 additions and 47 deletions

View File

@ -433,6 +433,8 @@ in the lnwire package](https://github.com/lightningnetwork/lnd/pull/7303)
* [A new probability model ("bimodal") is added which models channel based * [A new probability model ("bimodal") is added which models channel based
liquidities within a probability theory framework.]( liquidities within a probability theory framework.](
https://github.com/lightningnetwork/lnd/pull/6815) https://github.com/lightningnetwork/lnd/pull/6815)
* [The a priori capacity factor is made configurable and its effect is
limited.](https://github.com/lightningnetwork/lnd/pull/7444)
## Configuration ## Configuration
* Note that [this pathfinding change](https://github.com/lightningnetwork/lnd/pull/6815) * Note that [this pathfinding change](https://github.com/lightningnetwork/lnd/pull/6815)

View File

@ -50,9 +50,10 @@ func DefaultConfig() *Config {
MaxMcHistory: routing.DefaultMaxMcHistory, MaxMcHistory: routing.DefaultMaxMcHistory,
McFlushInterval: routing.DefaultMcFlushInterval, McFlushInterval: routing.DefaultMcFlushInterval,
AprioriConfig: &AprioriConfig{ AprioriConfig: &AprioriConfig{
HopProbability: routing.DefaultAprioriHopProbability, HopProbability: routing.DefaultAprioriHopProbability,
Weight: routing.DefaultAprioriWeight, Weight: routing.DefaultAprioriWeight,
PenaltyHalfLife: routing.DefaultPenaltyHalfLife, PenaltyHalfLife: routing.DefaultPenaltyHalfLife,
CapacityFraction: routing.DefaultCapacityFraction,
}, },
BimodalConfig: &BimodalConfig{ BimodalConfig: &BimodalConfig{
Scale: int64(routing.DefaultBimodalScaleMsat), Scale: int64(routing.DefaultBimodalScaleMsat),
@ -76,9 +77,10 @@ func GetRoutingConfig(cfg *Config) *RoutingConfig {
MaxMcHistory: cfg.MaxMcHistory, MaxMcHistory: cfg.MaxMcHistory,
McFlushInterval: cfg.McFlushInterval, McFlushInterval: cfg.McFlushInterval,
AprioriConfig: &AprioriConfig{ AprioriConfig: &AprioriConfig{
HopProbability: cfg.AprioriConfig.HopProbability, HopProbability: cfg.AprioriConfig.HopProbability,
Weight: cfg.AprioriConfig.Weight, Weight: cfg.AprioriConfig.Weight,
PenaltyHalfLife: cfg.AprioriConfig.PenaltyHalfLife, PenaltyHalfLife: cfg.AprioriConfig.PenaltyHalfLife,
CapacityFraction: cfg.AprioriConfig.CapacityFraction,
}, },
BimodalConfig: &BimodalConfig{ BimodalConfig: &BimodalConfig{
Scale: cfg.BimodalConfig.Scale, Scale: cfg.BimodalConfig.Scale,

View File

@ -63,6 +63,9 @@ type AprioriConfig struct {
// PenaltyHalfLife defines after how much time a penalized node or // PenaltyHalfLife defines after how much time a penalized node or
// channel is back at 50% probability. // channel is back at 50% probability.
PenaltyHalfLife time.Duration `long:"penaltyhalflife" description:"Defines the duration after which a penalized node or channel is back at 50% probability"` PenaltyHalfLife time.Duration `long:"penaltyhalflife" description:"Defines the duration after which a penalized node or channel is back at 50% probability"`
// CapacityFraction defines the fraction of channels' capacities that is considered liquid.
CapacityFraction float64 `long:"capacityfraction" description:"Defines the fraction of channels' capacities that is considered liquid. Valid values are in [0.75, 1]."`
} }
// BimodalConfig defines parameters for the bimodal probability. // BimodalConfig defines parameters for the bimodal probability.

View File

@ -73,6 +73,7 @@ func newIntegratedRoutingContext(t *testing.T) *integratedRoutingContext {
PenaltyHalfLife: 30 * time.Minute, PenaltyHalfLife: 30 * time.Minute,
AprioriHopProbability: 0.6, AprioriHopProbability: 0.6,
AprioriWeight: 0.5, AprioriWeight: 0.5,
CapacityFraction: testCapacityFraction,
} }
estimator, err := NewAprioriEstimator(aCfg) estimator, err := NewAprioriEstimator(aCfg)
require.NoError(t, err) require.NoError(t, err)

View File

@ -94,6 +94,7 @@ func (ctx *mcTestContext) restartMc() {
PenaltyHalfLife: testPenaltyHalfLife, PenaltyHalfLife: testPenaltyHalfLife,
AprioriHopProbability: testAprioriHopProbability, AprioriHopProbability: testAprioriHopProbability,
AprioriWeight: testAprioriWeight, AprioriWeight: testAprioriWeight,
CapacityFraction: testCapacityFraction,
} }
estimator, err := NewAprioriEstimator(aCfg) estimator, err := NewAprioriEstimator(aCfg)
require.NoError(ctx.t, err) require.NoError(ctx.t, err)

View File

@ -12,11 +12,11 @@ import (
) )
const ( const (
// capacityCutoffFraction and capacitySmearingFraction define how // CapacityFraction and capacitySmearingFraction define how
// capacity-related probability reweighting works. // capacity-related probability reweighting works. CapacityFraction
// capacityCutoffFraction defines the fraction of the channel capacity // defines the fraction of the channel capacity at which the effect
// at which the effect roughly sets in and capacitySmearingFraction // roughly sets in and capacitySmearingFraction defines over which range
// defines over which range the factor changes from 1 to 0. // the factor changes from 1 to 0.
// //
// We may fall below the minimum required probability // We may fall below the minimum required probability
// (DefaultMinRouteProbability) when the amount comes close to the // (DefaultMinRouteProbability) when the amount comes close to the
@ -34,19 +34,23 @@ const (
// fulfill the requirement with capacityFactor(cap, cap) ~ 0.076 (see // fulfill the requirement with capacityFactor(cap, cap) ~ 0.076 (see
// tests). // tests).
// The capacityCutoffFraction is a trade-off between usage of the // DefaultCapacityFraction is the default value for CapacityFraction.
// provided capacity and expected probability reduction when we send the DefaultCapacityFraction = 0.75
// full amount. The success probability in the random balance model can
// be approximated with P(a) = 1 - a/c, for amount a and capacity c. If
// we require a probability P(a) > 0.25, this translates into a value of
// 0.75 for a/c.
capacityCutoffFraction = 0.75
// We don't want to have a sharp drop of the capacity factor to zero at // We don't want to have a sharp drop of the capacity factor to zero at
// capacityCutoffFraction, but a smooth smearing such that some residual // capacityCutoffFraction, but a smooth smearing such that some residual
// probability is left when spending the whole amount, see above. // probability is left when spending the whole amount, see above.
capacitySmearingFraction = 0.1 capacitySmearingFraction = 0.1
// minCapacityFraction is the minimum allowed value for
// CapacityFraction. The success probability in the random balance model
// (which may not be an accurate description of the liquidity
// distribution in the network) can be approximated with P(a) = 1 - a/c,
// for amount a and capacity c. If we require a probability P(a) = 0.25,
// this translates into a value of 0.75 for a/c. We limit this value in
// order to not discard too many channels.
minCapacityFraction = 0.75
// AprioriEstimatorName is used to identify the apriori probability // AprioriEstimatorName is used to identify the apriori probability
// estimator. // estimator.
AprioriEstimatorName = "apriori" AprioriEstimatorName = "apriori"
@ -64,6 +68,11 @@ var (
// ErrInvalidAprioriWeight is returned when we get an apriori weight // ErrInvalidAprioriWeight is returned when we get an apriori weight
// that is out of range. // that is out of range.
ErrInvalidAprioriWeight = errors.New("apriori weight must be in [0, 1]") ErrInvalidAprioriWeight = errors.New("apriori weight must be in [0, 1]")
// ErrInvalidCapacityFraction is returned when we get a capacity
// fraction that is out of range.
ErrInvalidCapacityFraction = fmt.Errorf("capacity fraction must be in "+
"[%v, 1]", minCapacityFraction)
) )
// AprioriConfig contains configuration for our probability estimator. // AprioriConfig contains configuration for our probability estimator.
@ -84,6 +93,12 @@ type AprioriConfig struct {
// probability completely and only base the probability on historical // probability completely and only base the probability on historical
// results, unless there are none available. // results, unless there are none available.
AprioriWeight float64 AprioriWeight float64
// CapacityFraction is the fraction of a channel's capacity that we
// consider to have liquidity. For amounts that come close to or exceed
// the fraction, an additional penalty is applied. A value of 1.0
// disables the capacityFactor.
CapacityFraction float64
} }
// validate checks the configuration of the estimator for allowed values. // validate checks the configuration of the estimator for allowed values.
@ -100,6 +115,10 @@ func (p AprioriConfig) validate() error {
return ErrInvalidAprioriWeight return ErrInvalidAprioriWeight
} }
if p.CapacityFraction < minCapacityFraction || p.CapacityFraction > 1 {
return ErrInvalidCapacityFraction
}
return nil return nil
} }
@ -109,6 +128,7 @@ func DefaultAprioriConfig() AprioriConfig {
PenaltyHalfLife: DefaultPenaltyHalfLife, PenaltyHalfLife: DefaultPenaltyHalfLife,
AprioriHopProbability: DefaultAprioriHopProbability, AprioriHopProbability: DefaultAprioriHopProbability,
AprioriWeight: DefaultAprioriWeight, AprioriWeight: DefaultAprioriWeight,
CapacityFraction: DefaultCapacityFraction,
} }
} }
@ -155,9 +175,10 @@ func (p *AprioriEstimator) Config() estimatorConfig {
func (p *AprioriEstimator) String() string { func (p *AprioriEstimator) String() string {
return fmt.Sprintf("estimator type: %v, penalty halflife time: %v, "+ return fmt.Sprintf("estimator type: %v, penalty halflife time: %v, "+
"apriori hop probability: %v, apriori weight: %v, previous "+ "apriori hop probability: %v, apriori weight: %v, previous "+
"success probability: %v", AprioriEstimatorName, "success probability: %v, capacity fraction: %v",
p.PenaltyHalfLife, p.AprioriHopProbability, p.AprioriWeight, AprioriEstimatorName, p.PenaltyHalfLife,
p.prevSuccessProbability) p.AprioriHopProbability, p.AprioriWeight,
p.prevSuccessProbability, p.CapacityFraction)
} }
// getNodeProbability calculates the probability for connections from a node // getNodeProbability calculates the probability for connections from a node
@ -169,7 +190,9 @@ func (p *AprioriEstimator) getNodeProbability(now time.Time,
// We reduce the apriori hop probability if the amount comes close to // We reduce the apriori hop probability if the amount comes close to
// the capacity. // the capacity.
apriori := p.AprioriHopProbability * capacityFactor(amt, capacity) apriori := p.AprioriHopProbability * capacityFactor(
amt, capacity, p.CapacityFraction,
)
// If the channel history is not to be taken into account, we can return // If the channel history is not to be taken into account, we can return
// early here with the configured a priori probability. // early here with the configured a priori probability.
@ -246,7 +269,15 @@ func (p *AprioriEstimator) getWeight(age time.Duration) float64 {
// and 0 for amt >> cutoffMsat. The function drops significantly when amt // and 0 for amt >> cutoffMsat. The function drops significantly when amt
// reaches cutoffMsat. smearingMsat determines over which scale the reduction // reaches cutoffMsat. smearingMsat determines over which scale the reduction
// takes place. // takes place.
func capacityFactor(amt lnwire.MilliSatoshi, capacity btcutil.Amount) float64 { func capacityFactor(amt lnwire.MilliSatoshi, capacity btcutil.Amount,
capacityCutoffFraction float64) float64 {
// The special value of 1.0 for capacityFactor disables any effect from
// this factor.
if capacityCutoffFraction == 1 {
return 1.0
}
// If we don't have information about the capacity, which can be the // If we don't have information about the capacity, which can be the
// case for hop hints or local channels, we return unity to not alter // case for hop hints or local channels, we return unity to not alter
// anything. // anything.

View File

@ -27,8 +27,9 @@ const (
aprioriPrevSucProb = 0.95 aprioriPrevSucProb = 0.95
// testCapacity is used to define a capacity for some channels. // testCapacity is used to define a capacity for some channels.
testCapacity = btcutil.Amount(100_000) testCapacity = btcutil.Amount(100_000)
testAmount = lnwire.MilliSatoshi(50_000_000) testAmount = lnwire.MilliSatoshi(50_000_000)
testCapacityFraction = 0.75
// Defines the capacityFactor for testAmount and testCapacity. // Defines the capacityFactor for testAmount and testCapacity.
capFactor = 0.9241 capFactor = 0.9241
@ -53,6 +54,7 @@ func newEstimatorTestContext(t *testing.T) *estimatorTestContext {
AprioriHopProbability: aprioriHopProb, AprioriHopProbability: aprioriHopProb,
AprioriWeight: aprioriWeight, AprioriWeight: aprioriWeight,
PenaltyHalfLife: time.Hour, PenaltyHalfLife: time.Hour,
CapacityFraction: testCapacityFraction,
}, },
prevSuccessProbability: aprioriPrevSucProb, prevSuccessProbability: aprioriPrevSucProb,
}, },
@ -227,49 +229,65 @@ func TestCapacityCutoff(t *testing.T) {
capacityMSat := capacitySat * 1000 capacityMSat := capacitySat * 1000
tests := []struct { tests := []struct {
name string name string
amountMsat int capacityFraction float64
expectedFactor float64 amountMsat int
expectedFactor float64
}{ }{
// Minimal CapacityFraction of 0.75.
{ {
name: "zero amount", name: "zero amount",
expectedFactor: 1, capacityFraction: 0.75,
expectedFactor: 1,
}, },
{ {
name: "low amount", name: "low amount",
amountMsat: capacityMSat / 10, capacityFraction: 0.75,
expectedFactor: 0.998, amountMsat: capacityMSat / 10,
expectedFactor: 0.998,
}, },
{ {
name: "half amount", name: "half amount",
amountMsat: capacityMSat / 2, capacityFraction: 0.75,
expectedFactor: 0.924, amountMsat: capacityMSat / 2,
expectedFactor: 0.924,
}, },
{ {
name: "cutoff amount", name: "cutoff amount",
capacityFraction: 0.75,
amountMsat: int( amountMsat: int(
capacityCutoffFraction * float64(capacityMSat), 0.75 * float64(capacityMSat),
), ),
expectedFactor: 0.5, expectedFactor: 0.5,
}, },
{ {
name: "high amount", name: "high amount",
amountMsat: capacityMSat * 80 / 100, capacityFraction: 0.75,
expectedFactor: 0.377, amountMsat: capacityMSat * 80 / 100,
expectedFactor: 0.377,
}, },
{ {
// Even when we spend the full capacity, we still want // Even when we spend the full capacity, we still want
// to have some residual probability to not throw away // to have some residual probability to not throw away
// routes due to a min probability requirement of the // routes due to a min probability requirement of the
// whole path. // whole path.
name: "full amount", name: "full amount",
amountMsat: capacityMSat, capacityFraction: 0.75,
expectedFactor: 0.076, amountMsat: capacityMSat,
expectedFactor: 0.076,
}, },
{ {
name: "more than capacity", name: "more than capacity",
amountMsat: capacityMSat + 1, capacityFraction: 0.75,
expectedFactor: 0.0, amountMsat: capacityMSat + 1,
expectedFactor: 0.0,
},
// Inactive capacity factor.
{
name: "inactive capacity factor",
capacityFraction: 1.0,
amountMsat: capacityMSat,
expectedFactor: 1.00,
}, },
} }
@ -282,6 +300,7 @@ func TestCapacityCutoff(t *testing.T) {
got := capacityFactor( got := capacityFactor(
lnwire.MilliSatoshi(test.amountMsat), lnwire.MilliSatoshi(test.amountMsat),
btcutil.Amount(capacitySat), btcutil.Amount(capacitySat),
test.capacityFraction,
) )
require.InDelta(t, test.expectedFactor, got, 0.001) require.InDelta(t, test.expectedFactor, got, 0.001)
}) })

View File

@ -123,6 +123,7 @@ func createTestCtxFromGraphInstanceAssumeValid(t *testing.T,
PenaltyHalfLife: time.Hour, PenaltyHalfLife: time.Hour,
AprioriHopProbability: 0.9, AprioriHopProbability: 0.9,
AprioriWeight: 0.5, AprioriWeight: 0.5,
CapacityFraction: testCapacityFraction,
} }
estimator, err := NewAprioriEstimator(aCfg) estimator, err := NewAprioriEstimator(aCfg)
require.NoError(t, err) require.NoError(t, err)

View File

@ -1142,6 +1142,11 @@ litecoin.node=ltcd
; probability (default: 1h0m0s) ; probability (default: 1h0m0s)
; routerrpc.apriori.penaltyhalflife=2h ; routerrpc.apriori.penaltyhalflife=2h
; Defines the fraction of channels' capacities that is considered liquid in
; pathfinding, a value between [0.75-1.0]. A value of 1.0 disables this
; feature. (default: 0.75)
; routerrpc.apriori.capacityfraction=0.9
; Describes the scale over which channels still have some liquidity left on ; Describes the scale over which channels still have some liquidity left on
; both channel ends. A very low value (compared to typical channel capacities) ; both channel ends. A very low value (compared to typical channel capacities)
; means that we assume unbalanced channels, a very high value means randomly ; means that we assume unbalanced channels, a very high value means randomly

View File

@ -880,6 +880,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
AprioriHopProbability: aCfg.HopProbability, AprioriHopProbability: aCfg.HopProbability,
PenaltyHalfLife: aCfg.PenaltyHalfLife, PenaltyHalfLife: aCfg.PenaltyHalfLife,
AprioriWeight: aCfg.Weight, AprioriWeight: aCfg.Weight,
CapacityFraction: aCfg.CapacityFraction,
} }
estimator, err = routing.NewAprioriEstimator( estimator, err = routing.NewAprioriEstimator(