2019-09-04 17:40:14 +02:00
|
|
|
package routing
|
|
|
|
|
|
|
|
import (
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-05-26 10:18:12 +02:00
|
|
|
"github.com/btcsuite/btcd/btcutil"
|
2019-09-04 17:40:14 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
"github.com/lightningnetwork/lnd/routing/route"
|
2022-08-17 15:38:01 +02:00
|
|
|
"github.com/stretchr/testify/require"
|
2019-09-04 17:40:14 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2022-02-07 13:58:28 +01:00
|
|
|
// Define node identifiers.
|
2019-09-04 17:40:14 +02:00
|
|
|
node1 = 1
|
|
|
|
node2 = 2
|
|
|
|
node3 = 3
|
|
|
|
|
|
|
|
// untriedNode is a node id for which we don't record any results in
|
|
|
|
// this test. This can be used to assert the probability for untried
|
|
|
|
// ndoes.
|
|
|
|
untriedNode = 255
|
|
|
|
|
|
|
|
// Define test estimator parameters.
|
|
|
|
aprioriHopProb = 0.6
|
|
|
|
aprioriWeight = 0.75
|
|
|
|
aprioriPrevSucProb = 0.95
|
2022-05-26 10:18:12 +02:00
|
|
|
|
|
|
|
// testCapacity is used to define a capacity for some channels.
|
2023-02-24 11:59:44 +01:00
|
|
|
testCapacity = btcutil.Amount(100_000)
|
2023-02-18 09:42:09 +01:00
|
|
|
testAmount = lnwire.MilliSatoshi(90_000_000)
|
|
|
|
testCapacityFraction = 0.9999
|
2022-08-17 15:38:01 +02:00
|
|
|
|
2023-02-18 09:42:09 +01:00
|
|
|
// capFactor is the capacityFactor for testAmount, testCapacity and
|
|
|
|
// testCapacityFraction.
|
|
|
|
capFactor = 0.9909715
|
2019-09-04 17:40:14 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type estimatorTestContext struct {
|
|
|
|
t *testing.T
|
2022-11-18 10:13:45 +01:00
|
|
|
estimator *AprioriEstimator
|
2019-09-04 17:40:14 +02:00
|
|
|
|
|
|
|
// results contains a list of last results. Every element in the list
|
|
|
|
// corresponds to the last result towards a node. The list index equals
|
|
|
|
// the node id. So the first element in the list is the result towards
|
|
|
|
// node 0.
|
2019-09-26 15:31:24 +02:00
|
|
|
results map[int]TimedPairResult
|
2019-09-04 17:40:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func newEstimatorTestContext(t *testing.T) *estimatorTestContext {
|
|
|
|
return &estimatorTestContext{
|
|
|
|
t: t,
|
2022-11-18 10:13:45 +01:00
|
|
|
estimator: &AprioriEstimator{
|
|
|
|
AprioriConfig: AprioriConfig{
|
2021-01-19 09:57:13 +01:00
|
|
|
AprioriHopProbability: aprioriHopProb,
|
|
|
|
AprioriWeight: aprioriWeight,
|
|
|
|
PenaltyHalfLife: time.Hour,
|
2023-02-24 11:59:44 +01:00
|
|
|
CapacityFraction: testCapacityFraction,
|
2021-01-19 09:57:13 +01:00
|
|
|
},
|
2019-09-04 17:40:14 +02:00
|
|
|
prevSuccessProbability: aprioriPrevSucProb,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// assertPairProbability asserts that the calculated success probability is
|
|
|
|
// correct.
|
|
|
|
func (c *estimatorTestContext) assertPairProbability(now time.Time,
|
2022-05-26 10:18:12 +02:00
|
|
|
toNode byte, amt lnwire.MilliSatoshi, capacity btcutil.Amount,
|
|
|
|
expectedProb float64) {
|
2019-09-04 17:40:14 +02:00
|
|
|
|
|
|
|
c.t.Helper()
|
|
|
|
|
|
|
|
results := make(NodeResults)
|
|
|
|
for i, r := range c.results {
|
|
|
|
results[route.Vertex{byte(i)}] = r
|
|
|
|
}
|
|
|
|
|
|
|
|
const tolerance = 0.01
|
|
|
|
|
2022-11-18 10:13:45 +01:00
|
|
|
p := c.estimator.PairProbability(
|
2022-05-26 10:18:12 +02:00
|
|
|
now, results, route.Vertex{toNode}, amt, capacity,
|
|
|
|
)
|
2019-09-04 17:40:14 +02:00
|
|
|
diff := p - expectedProb
|
|
|
|
if diff > tolerance || diff < -tolerance {
|
|
|
|
c.t.Fatalf("expected probability %v for node %v, but got %v",
|
|
|
|
expectedProb, toNode, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestProbabilityEstimatorNoResults tests the probability estimation when no
|
|
|
|
// results are available.
|
|
|
|
func TestProbabilityEstimatorNoResults(t *testing.T) {
|
2022-05-27 11:34:07 +02:00
|
|
|
t.Parallel()
|
|
|
|
|
2019-09-04 17:40:14 +02:00
|
|
|
ctx := newEstimatorTestContext(t)
|
|
|
|
|
2022-08-17 15:38:01 +02:00
|
|
|
// A zero amount does not trigger capacity rescaling.
|
|
|
|
ctx.assertPairProbability(
|
|
|
|
testTime, 0, 0, testCapacity, aprioriHopProb,
|
|
|
|
)
|
|
|
|
|
|
|
|
// We expect a reduced probability when a higher amount is used.
|
|
|
|
expected := aprioriHopProb * capFactor
|
|
|
|
ctx.assertPairProbability(
|
|
|
|
testTime, 0, testAmount, testCapacity, expected,
|
|
|
|
)
|
2019-09-04 17:40:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestProbabilityEstimatorOneSuccess tests the probability estimation for nodes
|
|
|
|
// that have a single success result.
|
|
|
|
func TestProbabilityEstimatorOneSuccess(t *testing.T) {
|
2022-05-27 11:34:07 +02:00
|
|
|
t.Parallel()
|
|
|
|
|
2019-09-04 17:40:14 +02:00
|
|
|
ctx := newEstimatorTestContext(t)
|
|
|
|
|
2019-09-26 15:31:24 +02:00
|
|
|
ctx.results = map[int]TimedPairResult{
|
2019-09-26 17:04:02 +02:00
|
|
|
node1: {
|
2022-08-17 15:38:01 +02:00
|
|
|
SuccessAmt: testAmount,
|
2019-09-26 17:04:02 +02:00
|
|
|
},
|
2019-09-04 17:40:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Because of the previous success, this channel keep reporting a high
|
|
|
|
// probability.
|
|
|
|
ctx.assertPairProbability(
|
2022-05-26 10:18:12 +02:00
|
|
|
testTime, node1, 100, testCapacity, aprioriPrevSucProb,
|
2019-09-04 17:40:14 +02:00
|
|
|
)
|
|
|
|
|
2022-08-17 15:38:01 +02:00
|
|
|
// The apriori success probability indicates that in the past we were
|
|
|
|
// able to send the full amount. We don't want to reduce this
|
|
|
|
// probability with the capacity factor, which is tested here.
|
|
|
|
ctx.assertPairProbability(
|
|
|
|
testTime, node1, testAmount, testCapacity, aprioriPrevSucProb,
|
|
|
|
)
|
|
|
|
|
2019-09-04 17:40:14 +02:00
|
|
|
// Untried channels are also influenced by the success. With a
|
|
|
|
// aprioriWeight of 0.75, the a priori probability is assigned weight 3.
|
|
|
|
expectedP := (3*aprioriHopProb + 1*aprioriPrevSucProb) / 4
|
2022-05-26 10:18:12 +02:00
|
|
|
ctx.assertPairProbability(
|
|
|
|
testTime, untriedNode, 100, testCapacity, expectedP,
|
|
|
|
)
|
2022-08-17 15:38:01 +02:00
|
|
|
|
|
|
|
// Check that the correct probability is computed for larger amounts.
|
|
|
|
apriori := aprioriHopProb * capFactor
|
|
|
|
|
|
|
|
expectedP = (3*apriori + 1*aprioriPrevSucProb) / 4
|
|
|
|
ctx.assertPairProbability(
|
|
|
|
testTime, untriedNode, testAmount, testCapacity, expectedP,
|
|
|
|
)
|
2019-09-04 17:40:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestProbabilityEstimatorOneFailure tests the probability estimation for nodes
|
|
|
|
// that have a single failure.
|
|
|
|
func TestProbabilityEstimatorOneFailure(t *testing.T) {
|
2022-05-27 11:34:07 +02:00
|
|
|
t.Parallel()
|
|
|
|
|
2019-09-04 17:40:14 +02:00
|
|
|
ctx := newEstimatorTestContext(t)
|
|
|
|
|
2019-09-26 15:31:24 +02:00
|
|
|
ctx.results = map[int]TimedPairResult{
|
2019-09-26 17:04:02 +02:00
|
|
|
node1: {
|
|
|
|
FailTime: testTime.Add(-time.Hour),
|
|
|
|
FailAmt: lnwire.MilliSatoshi(50),
|
|
|
|
},
|
2019-09-04 17:40:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// For an untried node, we expected the node probability. The weight for
|
|
|
|
// the failure after one hour is 0.5. This makes the node probability
|
|
|
|
// 0.51:
|
|
|
|
expectedNodeProb := (3*aprioriHopProb + 0.5*0) / 3.5
|
2022-05-26 10:18:12 +02:00
|
|
|
ctx.assertPairProbability(
|
|
|
|
testTime, untriedNode, 100, testCapacity, expectedNodeProb,
|
|
|
|
)
|
2019-09-04 17:40:14 +02:00
|
|
|
|
|
|
|
// The pair probability decays back to the node probability. With the
|
|
|
|
// weight at 0.5, we expected a pair probability of 0.5 * 0.51 = 0.25.
|
2022-05-26 10:18:12 +02:00
|
|
|
ctx.assertPairProbability(
|
|
|
|
testTime, node1, 100, testCapacity, expectedNodeProb/2,
|
|
|
|
)
|
2019-09-04 17:40:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestProbabilityEstimatorMix tests the probability estimation for nodes for
|
|
|
|
// which a mix of successes and failures is recorded.
|
|
|
|
func TestProbabilityEstimatorMix(t *testing.T) {
|
2022-05-27 11:34:07 +02:00
|
|
|
t.Parallel()
|
|
|
|
|
2019-09-04 17:40:14 +02:00
|
|
|
ctx := newEstimatorTestContext(t)
|
|
|
|
|
2019-09-26 15:31:24 +02:00
|
|
|
ctx.results = map[int]TimedPairResult{
|
2019-09-26 17:04:02 +02:00
|
|
|
node1: {
|
|
|
|
SuccessAmt: lnwire.MilliSatoshi(1000),
|
|
|
|
},
|
|
|
|
node2: {
|
|
|
|
FailTime: testTime.Add(-2 * time.Hour),
|
|
|
|
FailAmt: lnwire.MilliSatoshi(50),
|
|
|
|
},
|
|
|
|
node3: {
|
|
|
|
FailTime: testTime.Add(-3 * time.Hour),
|
|
|
|
FailAmt: lnwire.MilliSatoshi(50),
|
|
|
|
},
|
2019-09-04 17:40:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// We expect the probability for a previously successful channel to
|
|
|
|
// remain high.
|
2022-05-26 10:18:12 +02:00
|
|
|
ctx.assertPairProbability(
|
|
|
|
testTime, node1, 100, testCapacity, prevSuccessProbability,
|
|
|
|
)
|
2019-09-04 17:40:14 +02:00
|
|
|
|
|
|
|
// For an untried node, we expected the node probability to be returned.
|
|
|
|
// This is a weighted average of the results above and the a priori
|
|
|
|
// probability: 0.62.
|
|
|
|
expectedNodeProb := (3*aprioriHopProb + 1*prevSuccessProbability) /
|
|
|
|
(3 + 1 + 0.25 + 0.125)
|
|
|
|
|
2022-05-26 10:18:12 +02:00
|
|
|
ctx.assertPairProbability(
|
|
|
|
testTime, untriedNode, 100, testCapacity, expectedNodeProb,
|
|
|
|
)
|
2019-09-04 17:40:14 +02:00
|
|
|
|
|
|
|
// For the previously failed connection with node 1, we expect 0.75 *
|
|
|
|
// the node probability = 0.47.
|
2022-05-26 10:18:12 +02:00
|
|
|
ctx.assertPairProbability(
|
|
|
|
testTime, node2, 100, testCapacity, expectedNodeProb*0.75,
|
|
|
|
)
|
2019-09-04 17:40:14 +02:00
|
|
|
}
|
2022-08-17 15:38:01 +02:00
|
|
|
|
|
|
|
// TestCapacityCutoff tests the mathematical expression and limits for the
|
|
|
|
// capacity factor.
|
|
|
|
func TestCapacityCutoff(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
capacitySat := 1_000_000
|
|
|
|
capacityMSat := capacitySat * 1000
|
|
|
|
|
|
|
|
tests := []struct {
|
2023-02-24 11:59:44 +01:00
|
|
|
name string
|
|
|
|
capacityFraction float64
|
|
|
|
amountMsat int
|
|
|
|
expectedFactor float64
|
2022-08-17 15:38:01 +02:00
|
|
|
}{
|
2023-02-24 11:59:44 +01:00
|
|
|
// Minimal CapacityFraction of 0.75.
|
2022-08-17 15:38:01 +02:00
|
|
|
{
|
2023-02-24 11:59:44 +01:00
|
|
|
name: "zero amount",
|
|
|
|
capacityFraction: 0.75,
|
|
|
|
expectedFactor: 1,
|
2022-08-17 15:38:01 +02:00
|
|
|
},
|
|
|
|
{
|
2023-02-24 11:59:44 +01:00
|
|
|
name: "low amount",
|
|
|
|
capacityFraction: 0.75,
|
|
|
|
amountMsat: capacityMSat / 10,
|
2023-02-18 09:42:09 +01:00
|
|
|
expectedFactor: 1,
|
2022-08-17 15:38:01 +02:00
|
|
|
},
|
|
|
|
{
|
2023-02-24 11:59:44 +01:00
|
|
|
name: "half amount",
|
|
|
|
capacityFraction: 0.75,
|
|
|
|
amountMsat: capacityMSat / 2,
|
2023-02-18 09:42:09 +01:00
|
|
|
expectedFactor: 1,
|
2022-08-17 15:38:01 +02:00
|
|
|
},
|
|
|
|
{
|
2023-02-24 11:59:44 +01:00
|
|
|
name: "cutoff amount",
|
|
|
|
capacityFraction: 0.75,
|
2022-08-17 15:38:01 +02:00
|
|
|
amountMsat: int(
|
2023-02-24 11:59:44 +01:00
|
|
|
0.75 * float64(capacityMSat),
|
2022-08-17 15:38:01 +02:00
|
|
|
),
|
2023-02-18 09:42:09 +01:00
|
|
|
expectedFactor: 0.75,
|
2022-08-17 15:38:01 +02:00
|
|
|
},
|
|
|
|
{
|
2023-02-24 11:59:44 +01:00
|
|
|
name: "high amount",
|
|
|
|
capacityFraction: 0.75,
|
|
|
|
amountMsat: capacityMSat * 80 / 100,
|
2023-02-18 09:42:09 +01:00
|
|
|
expectedFactor: 0.560,
|
2022-08-17 15:38:01 +02:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// Even when we spend the full capacity, we still want
|
|
|
|
// to have some residual probability to not throw away
|
|
|
|
// routes due to a min probability requirement of the
|
|
|
|
// whole path.
|
2023-02-24 11:59:44 +01:00
|
|
|
name: "full amount",
|
|
|
|
capacityFraction: 0.75,
|
|
|
|
amountMsat: capacityMSat,
|
2023-02-18 09:42:09 +01:00
|
|
|
expectedFactor: 0.5,
|
2022-08-17 15:38:01 +02:00
|
|
|
},
|
|
|
|
{
|
2023-02-24 11:59:44 +01:00
|
|
|
name: "more than capacity",
|
|
|
|
capacityFraction: 0.75,
|
|
|
|
amountMsat: capacityMSat + 1,
|
|
|
|
expectedFactor: 0.0,
|
|
|
|
},
|
2023-02-18 09:42:09 +01:00
|
|
|
// Default CapacityFactor of 0.9999.
|
|
|
|
{
|
|
|
|
name: "zero amount",
|
|
|
|
capacityFraction: 0.9999,
|
|
|
|
amountMsat: 0,
|
|
|
|
expectedFactor: 1.00,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "90% of the channel capacity",
|
|
|
|
capacityFraction: 0.9999,
|
|
|
|
amountMsat: capacityMSat * 90 / 100,
|
|
|
|
expectedFactor: 0.990,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// We won't saturate at 0.5 as in the other case but at
|
|
|
|
// a higher value of 0.75 due to the smearing, this
|
|
|
|
// translates to a penalty increase of a factor of 1.33.
|
|
|
|
name: "full amount",
|
|
|
|
capacityFraction: 0.9999,
|
|
|
|
amountMsat: capacityMSat,
|
|
|
|
expectedFactor: 0.75,
|
|
|
|
},
|
2023-02-24 11:59:44 +01:00
|
|
|
// Inactive capacity factor.
|
|
|
|
{
|
|
|
|
name: "inactive capacity factor",
|
|
|
|
capacityFraction: 1.0,
|
|
|
|
amountMsat: capacityMSat,
|
|
|
|
expectedFactor: 1.00,
|
2022-08-17 15:38:01 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
test := test
|
|
|
|
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
got := capacityFactor(
|
|
|
|
lnwire.MilliSatoshi(test.amountMsat),
|
|
|
|
btcutil.Amount(capacitySat),
|
2023-02-24 11:59:44 +01:00
|
|
|
test.capacityFraction,
|
2022-08-17 15:38:01 +02:00
|
|
|
)
|
|
|
|
require.InDelta(t, test.expectedFactor, got, 0.001)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|