diff --git a/docs/release-notes/release-notes-0.15.0.md b/docs/release-notes/release-notes-0.15.0.md index 09f5bf982..4d794f7fa 100644 --- a/docs/release-notes/release-notes-0.15.0.md +++ b/docs/release-notes/release-notes-0.15.0.md @@ -29,6 +29,13 @@ [These premature messages are now saved into a cache and processed once the height has reached.](https://github.com/lightningnetwork/lnd/pull/6054) +* [Fixed failure to limit our number of hop hints in private invoices](https://github.com/lightningnetwork/lnd/pull/6236). + When a private invoice is created, and the node had > 20 (our hop hint limit) + private channels with inbound > invoice amount, hop hint selection would add + too many hop hints. When a node had many channels meeting this criteria, it + could result in an "invoice too large" error when creating invoices. Hints + are now properly limited to our maximum of 20. + ## Misc * [An example systemd service file](https://github.com/lightningnetwork/lnd/pull/6033) diff --git a/lnrpc/invoicesrpc/addinvoice.go b/lnrpc/invoicesrpc/addinvoice.go index 17f58b9cc..3bd6eaa36 100644 --- a/lnrpc/invoicesrpc/addinvoice.go +++ b/lnrpc/invoicesrpc/addinvoice.go @@ -628,6 +628,36 @@ func newSelectHopHintsCfg(invoicesCfg *AddInvoiceConfig) *SelectHopHintsCfg { } } +// sufficientHints checks whether we have sufficient hop hints, based on the +// following criteria: +// - Hop hint count: limit to a set number of hop hints, regardless of whether +// we've reached our invoice amount or not. +// - Total incoming capacity: limit to our invoice amount * scaling factor to +// allow for some of our links going offline. +// +// We limit our number of hop hints like this to keep our invoice size down, +// and to avoid leaking all our private channels when we don't need to. +func sufficientHints(numHints, maxHints, scalingFactor int, amount, + totalHintAmount lnwire.MilliSatoshi) bool { + + if numHints >= maxHints { + log.Debug("Reached maximum number of hop hints") + return true + } + + requiredAmount := amount * lnwire.MilliSatoshi(scalingFactor) + if totalHintAmount >= requiredAmount { + log.Debugf("Total hint amount: %v has reached target hint "+ + "bandwidth: %v (invoice amount: %v * factor: %v)", + totalHintAmount, requiredAmount, amount, + scalingFactor) + + return true + } + + return false +} + // SelectHopHints will select up to numMaxHophints from the set of passed open // channels. The set of hop hints will be returned as a slice of functional // options that'll append the route hint to the set of all route hints. @@ -644,6 +674,17 @@ func SelectHopHints(amtMSat lnwire.MilliSatoshi, cfg *SelectHopHintsCfg, hopHintChans := make(map[wire.OutPoint]struct{}) hopHints := make([][]zpay32.HopHint, 0, numMaxHophints) for _, channel := range openChannels { + enoughHopHints := sufficientHints( + len(hopHints), numMaxHophints, hopHintFactor, amtMSat, + totalHintBandwidth, + ) + if enoughHopHints { + log.Debugf("First pass of hop selection has " + + "sufficient hints") + + return hopHints + } + // If this channel can't be a hop hint, then skip it. edgePolicy, canBeHopHint := chanCanBeHopHint(channel, cfg) if edgePolicy == nil || !canBeHopHint { @@ -664,24 +705,21 @@ func SelectHopHints(amtMSat lnwire.MilliSatoshi, cfg *SelectHopHintsCfg, totalHintBandwidth += channel.RemoteBalance } - // If we have enough hop hints at this point, then we'll exit early. - // Otherwise, we'll continue to add more that may help out mpp users. - if len(hopHints) >= numMaxHophints { - return hopHints - } - // In this second pass we'll add channels, and we'll either stop when // we have 20 hop hints, we've run through all the available channels, // or if the sum of available bandwidth in the routing hints exceeds 2x // the payment amount. We do 2x here to account for a margin of error // if some of the selected channels no longer become operable. for i := 0; i < len(openChannels); i++ { - // If we hit either of our early termination conditions, then - // we'll break the loop here. - if totalHintBandwidth > amtMSat*hopHintFactor || - len(hopHints) >= numMaxHophints { + enoughHopHints := sufficientHints( + len(hopHints), numMaxHophints, hopHintFactor, amtMSat, + totalHintBandwidth, + ) + if enoughHopHints { + log.Debugf("Second pass of hop selection has " + + "sufficient hints") - break + return hopHints } channel := openChannels[i] diff --git a/lnrpc/invoicesrpc/addinvoice_test.go b/lnrpc/invoicesrpc/addinvoice_test.go index fd2aa60b7..556a1c9ed 100644 --- a/lnrpc/invoicesrpc/addinvoice_test.go +++ b/lnrpc/invoicesrpc/addinvoice_test.go @@ -307,6 +307,28 @@ func TestSelectHopHints(t *testing.T) { numHints: 1, expectedHints: nil, }, + { + // This test case asserts that we limit our hop hints + // when we've reached our maximum number of hints. + name: "too many hints", + setupMock: func(h *hopHintsConfigMock) { + setMockChannelUsed( + h, private1ShortID, privateChan1Policy, + ) + }, + // Set our amount to less than our channel balance of + // 100. + amount: 30, + channels: []*HopHintInfo{ + privateChannel1, privateChannel2, + }, + numHints: 1, + expectedHints: [][]zpay32.HopHint{ + { + privateChannel1Hint, + }, + }, + }, { // If a channel has more balance than the amount we're // looking for, it'll be added in our first pass. We @@ -547,3 +569,57 @@ func TestSelectHopHints(t *testing.T) { }) } } + +// TestSufficientHopHints tests limiting our hops to a set number of hints or +// scaled amount of capacity. +func TestSufficientHopHints(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + numHints int + maxHints int + scalingFactor int + amount lnwire.MilliSatoshi + totalHintAmount lnwire.MilliSatoshi + sufficient bool + }{ + { + name: "not enough hints or amount", + numHints: 3, + maxHints: 10, + // We want to have at least 200, and we currently have + // 10. + scalingFactor: 2, + amount: 100, + totalHintAmount: 10, + sufficient: false, + }, + { + name: "enough hints", + numHints: 3, + maxHints: 3, + sufficient: true, + }, + { + name: "not enough hints, insufficient bandwidth", + numHints: 1, + maxHints: 3, + // We want at least 200, and we have enough. + scalingFactor: 2, + amount: 100, + totalHintAmount: 700, + sufficient: true, + }, + } + + for _, testCase := range tests { + sufficient := sufficientHints( + testCase.numHints, testCase.maxHints, + testCase.scalingFactor, testCase.amount, + testCase.totalHintAmount, + ) + + require.Equal(t, testCase.sufficient, sufficient) + } +} diff --git a/lntest/itest/lnd_hold_persistence_test.go b/lntest/itest/lnd_hold_persistence_test.go index f60121742..0782c87ab 100644 --- a/lntest/itest/lnd_hold_persistence_test.go +++ b/lntest/itest/lnd_hold_persistence_test.go @@ -60,6 +60,26 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { "timeout: %v", err) } + // For Carol to include her private channel with Alice as a hop hint, + // we need Alice to be perceived as a "public" node, meaning that she + // has at least one public channel in the graph. We open a public + // channel from Alice -> Bob and wait for Carol to see it. + chanPointBob := openChannelAndAssert( + t, net, net.Alice, net.Bob, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + + // Wait for Alice and Carol to see the open channel + err = net.Alice.WaitForNetworkChannelOpen(chanPointBob) + require.NoError(t.t, err, "alice didn't see the alice->bob "+ + "channel before timeout") + + err = carol.WaitForNetworkChannelOpen(chanPointBob) + require.NoError(t.t, err, "carol didn't see the alice->bob "+ + "channel before timeout") + // Create preimages for all payments we are going to initiate. var preimages []lntypes.Preimage for i := 0; i < numPayments; i++ { @@ -109,6 +129,18 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to subscribe to invoice: %v", err) } + // We expect all of our invoices to have hop hints attached, + // since Carol and Alice are connected with a private channel. + // We assert that we have one hop hint present to ensure that + // we've got coverage for hop hints. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + decodeReq := &lnrpc.PayReqString{ + PayReq: resp.PaymentRequest, + } + invoice, err := net.Alice.DecodePayReq(ctxt, decodeReq) + require.NoError(t.t, err, "could not decode invoice") + require.Len(t.t, invoice.RouteHints, 1) + invoiceStreams = append(invoiceStreams, stream) payReqs = append(payReqs, resp.PaymentRequest) } @@ -229,7 +261,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { } // Now after a restart, we must re-track the payments. We set up a - // goroutine for each to track thir status updates. + // goroutine for each to track their status updates. var ( statusUpdates []chan *lnrpc.Payment wg sync.WaitGroup