mirror of
https://github.com/lightningnetwork/lnd.git
synced 2024-11-19 01:43:16 +01:00
multi: update internals to support updating max htlc
In this commit, we update the router and link to support users updating the max HTLC policy for their channels. By updating these internal systems before updating the RPC server and lncli, we protect users from being shown an option that doesn't actually work.
This commit is contained in:
parent
c80feeb4b3
commit
9a52cb6dab
@ -971,6 +971,47 @@ func TestUpdateForwardingPolicy(t *testing.T) {
|
||||
default:
|
||||
t.Fatalf("expected FailFeeInsufficient instead got: %v", err)
|
||||
}
|
||||
|
||||
// Reset the policy so we can then test updating the max HTLC policy.
|
||||
n.secondBobChannelLink.UpdateForwardingPolicy(n.globalPolicy)
|
||||
|
||||
// As a sanity check, ensure the original payment now succeeds again.
|
||||
_, err = makePayment(
|
||||
n.aliceServer, n.carolServer, firstHop, hops, amountNoFee,
|
||||
htlcAmt, htlcExpiry,
|
||||
).Wait(30 * time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
|
||||
// Now we'll update Bob's policy to lower his max HTLC to an extent
|
||||
// that'll cause him to reject the same HTLC that we just sent.
|
||||
newPolicy = n.globalPolicy
|
||||
newPolicy.MaxHTLC = amountNoFee - 1
|
||||
n.secondBobChannelLink.UpdateForwardingPolicy(newPolicy)
|
||||
|
||||
// Next, we'll send the payment again, using the exact same per-hop
|
||||
// payload for each node. This payment should fail as it won't factor
|
||||
// in Bob's new max HTLC policy.
|
||||
_, err = makePayment(
|
||||
n.aliceServer, n.carolServer, firstHop, hops, amountNoFee,
|
||||
htlcAmt, htlcExpiry,
|
||||
).Wait(30 * time.Second)
|
||||
if err == nil {
|
||||
t.Fatalf("payment should've been rejected")
|
||||
}
|
||||
|
||||
ferr, ok = err.(*ForwardingError)
|
||||
if !ok {
|
||||
t.Fatalf("expected a ForwardingError, instead got (%T): %v",
|
||||
err, err)
|
||||
}
|
||||
switch ferr.FailureMessage.(type) {
|
||||
case *lnwire.FailTemporaryChannelFailure:
|
||||
default:
|
||||
t.Fatalf("expected TemporaryChannelFailure, instead got: %v",
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestChannelLinkMultiHopInsufficientPayment checks that we receive error if
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/discovery"
|
||||
"github.com/lightningnetwork/lnd/htlcswitch"
|
||||
@ -31,6 +30,10 @@ type Manager struct {
|
||||
ForAllOutgoingChannels func(cb func(*channeldb.ChannelEdgeInfo,
|
||||
*channeldb.ChannelEdgePolicy) error) error
|
||||
|
||||
// FetchChannel is used to query local channel parameters.
|
||||
FetchChannel func(chanPoint wire.OutPoint) (*channeldb.OpenChannel,
|
||||
error)
|
||||
|
||||
// policyUpdateLock ensures that the database and the link do not fall
|
||||
// out of sync if there are concurrent fee update calls. Without it,
|
||||
// there is a chance that policy A updates the database, then policy B
|
||||
@ -74,7 +77,7 @@ func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy,
|
||||
}
|
||||
|
||||
// Apply the new policy to the edge.
|
||||
err := r.updateEdge(info.Capacity, edge, newSchema)
|
||||
err := r.updateEdge(info.ChannelPoint, edge, newSchema)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@ -117,7 +120,7 @@ func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy,
|
||||
}
|
||||
|
||||
// updateEdge updates the given edge with the new schema.
|
||||
func (r *Manager) updateEdge(capacity btcutil.Amount,
|
||||
func (r *Manager) updateEdge(chanPoint wire.OutPoint,
|
||||
edge *channeldb.ChannelEdgePolicy,
|
||||
newSchema routing.ChannelPolicy) error {
|
||||
|
||||
@ -128,12 +131,49 @@ func (r *Manager) updateEdge(capacity btcutil.Amount,
|
||||
)
|
||||
edge.TimeLockDelta = uint16(newSchema.TimeLockDelta)
|
||||
|
||||
// Max htlc is currently always set to the channel capacity.
|
||||
// Retrieve negotiated channel htlc amt limits.
|
||||
amtMin, amtMax, err := r.getHtlcAmtLimits(chanPoint)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We now update the edge max htlc value.
|
||||
switch {
|
||||
|
||||
// If a non-zero max htlc was specified, use it to update the edge.
|
||||
// Otherwise keep the value unchanged.
|
||||
case newSchema.MaxHTLC != 0:
|
||||
edge.MaxHTLC = newSchema.MaxHTLC
|
||||
|
||||
// If this edge still doesn't have a max htlc set, set it to the max.
|
||||
// This is an on-the-fly migration.
|
||||
case !edge.MessageFlags.HasMaxHtlc():
|
||||
edge.MaxHTLC = amtMax
|
||||
|
||||
// If this edge has a max htlc that exceeds what the channel can
|
||||
// actually carry, correct it now. This can happen, because we
|
||||
// previously set the max htlc to the channel capacity.
|
||||
case edge.MaxHTLC > amtMax:
|
||||
edge.MaxHTLC = amtMax
|
||||
}
|
||||
|
||||
// If the MaxHtlc flag wasn't already set, we can set it now.
|
||||
edge.MessageFlags |= lnwire.ChanUpdateOptionMaxHtlc
|
||||
edge.MaxHTLC = lnwire.NewMSatFromSatoshis(capacity)
|
||||
|
||||
// Validate htlc amount constraints.
|
||||
if edge.MinHTLC > edge.MaxHTLC {
|
||||
switch {
|
||||
case edge.MinHTLC < amtMin:
|
||||
return fmt.Errorf("min htlc amount of %v msat is below "+
|
||||
"min htlc parameter of %v msat for channel %v",
|
||||
edge.MinHTLC, amtMin,
|
||||
chanPoint)
|
||||
|
||||
case edge.MaxHTLC > amtMax:
|
||||
return fmt.Errorf("max htlc size of %v msat is above "+
|
||||
"max pending amount of %v msat for channel %v",
|
||||
edge.MaxHTLC, amtMax, chanPoint)
|
||||
|
||||
case edge.MinHTLC > edge.MaxHTLC:
|
||||
return fmt.Errorf("min_htlc %v greater than max_htlc %v",
|
||||
edge.MinHTLC, edge.MaxHTLC)
|
||||
}
|
||||
@ -143,3 +183,22 @@ func (r *Manager) updateEdge(capacity btcutil.Amount,
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getHtlcAmtLimits retrieves the negotiated channel min and max htlc amount
|
||||
// constraints.
|
||||
func (r *Manager) getHtlcAmtLimits(chanPoint wire.OutPoint) (
|
||||
lnwire.MilliSatoshi, lnwire.MilliSatoshi, error) {
|
||||
|
||||
ch, err := r.FetchChannel(chanPoint)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// The max htlc policy field must be less than or equal to the channel
|
||||
// capacity AND less than or equal to the max in-flight HTLC value.
|
||||
// Since the latter is always less than or equal to the former, just
|
||||
// return the max in-flight value.
|
||||
maxAmt := ch.LocalChanCfg.ChannelConstraints.MaxPendingAmount
|
||||
|
||||
return ch.LocalChanCfg.MinHTLC, maxAmt, nil
|
||||
}
|
||||
|
@ -18,8 +18,12 @@ import (
|
||||
// TestManager tests that the local channel manager properly propagates fee
|
||||
// updates to gossiper and links.
|
||||
func TestManager(t *testing.T) {
|
||||
chanPoint := wire.OutPoint{Hash: chainhash.Hash{1}, Index: 2}
|
||||
chanCap := btcutil.Amount(1000)
|
||||
var (
|
||||
chanPoint = wire.OutPoint{Hash: chainhash.Hash{1}, Index: 2}
|
||||
chanCap = btcutil.Amount(1000)
|
||||
maxPendingAmount = lnwire.MilliSatoshi(999000)
|
||||
minHTLC = lnwire.MilliSatoshi(2000)
|
||||
)
|
||||
|
||||
newPolicy := routing.ChannelPolicy{
|
||||
FeeSchema: routing.FeeSchema{
|
||||
@ -27,6 +31,12 @@ func TestManager(t *testing.T) {
|
||||
FeeRate: 200,
|
||||
},
|
||||
TimeLockDelta: 80,
|
||||
MaxHTLC: 5000,
|
||||
}
|
||||
|
||||
currentPolicy := channeldb.ChannelEdgePolicy{
|
||||
MinHTLC: minHTLC,
|
||||
MessageFlags: lnwire.ChanUpdateOptionMaxHtlc,
|
||||
}
|
||||
|
||||
updateForwardingPolicies := func(
|
||||
@ -46,7 +56,7 @@ func TestManager(t *testing.T) {
|
||||
if uint32(policy.FeeRate) != newPolicy.FeeRate {
|
||||
t.Fatal("unexpected base fee")
|
||||
}
|
||||
if policy.MaxHTLC != lnwire.NewMSatFromSatoshis(chanCap) {
|
||||
if policy.MaxHTLC != newPolicy.MaxHTLC {
|
||||
t.Fatal("unexpected max htlc")
|
||||
}
|
||||
}
|
||||
@ -71,7 +81,7 @@ func TestManager(t *testing.T) {
|
||||
if uint32(policy.FeeProportionalMillionths) != newPolicy.FeeRate {
|
||||
t.Fatal("unexpected base fee")
|
||||
}
|
||||
if policy.MaxHTLC != lnwire.NewMSatFromSatoshis(chanCap) {
|
||||
if policy.MaxHTLC != newPolicy.MaxHTLC {
|
||||
t.Fatal("unexpected max htlc")
|
||||
}
|
||||
|
||||
@ -86,14 +96,30 @@ func TestManager(t *testing.T) {
|
||||
Capacity: chanCap,
|
||||
ChannelPoint: chanPoint,
|
||||
},
|
||||
&channeldb.ChannelEdgePolicy{},
|
||||
¤tPolicy,
|
||||
)
|
||||
}
|
||||
|
||||
fetchChannel := func(chanPoint wire.OutPoint) (*channeldb.OpenChannel,
|
||||
error) {
|
||||
|
||||
constraints := channeldb.ChannelConstraints{
|
||||
MaxPendingAmount: maxPendingAmount,
|
||||
MinHTLC: minHTLC,
|
||||
}
|
||||
|
||||
return &channeldb.OpenChannel{
|
||||
LocalChanCfg: channeldb.ChannelConfig{
|
||||
ChannelConstraints: constraints,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
manager := Manager{
|
||||
UpdateForwardingPolicies: updateForwardingPolicies,
|
||||
PropagateChanPolicyUpdate: propagateChanPolicyUpdate,
|
||||
ForAllOutgoingChannels: forAllOutgoingChannels,
|
||||
FetchChannel: fetchChannel,
|
||||
}
|
||||
|
||||
// Test updating a specific channels.
|
||||
@ -108,4 +134,15 @@ func TestManager(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// If no max htlc is specified, the max htlc value should be kept
|
||||
// unchanged.
|
||||
currentPolicy.MaxHTLC = newPolicy.MaxHTLC
|
||||
noMaxHtlcPolicy := newPolicy
|
||||
noMaxHtlcPolicy.MaxHTLC = 0
|
||||
|
||||
err = manager.UpdatePolicy(noMaxHtlcPolicy)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -219,6 +219,10 @@ type ChannelPolicy struct {
|
||||
// TimeLockDelta is the required HTLC timelock delta to be used
|
||||
// when forwarding payments.
|
||||
TimeLockDelta uint32
|
||||
|
||||
// MaxHTLC is the maximum HTLC size including fees we are allowed to
|
||||
// forward over this channel.
|
||||
MaxHTLC lnwire.MilliSatoshi
|
||||
}
|
||||
|
||||
// Config defines the configuration for the ChannelRouter. ALL elements within
|
||||
|
@ -742,6 +742,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB,
|
||||
ForAllOutgoingChannels: s.chanRouter.ForAllOutgoingChannels,
|
||||
PropagateChanPolicyUpdate: s.authGossiper.PropagateChanPolicyUpdate,
|
||||
UpdateForwardingPolicies: s.htlcSwitch.UpdateForwardingPolicies,
|
||||
FetchChannel: s.chanDB.FetchChannel,
|
||||
}
|
||||
|
||||
utxnStore, err := newNurseryStore(activeNetParams.GenesisHash, chanDB)
|
||||
|
Loading…
Reference in New Issue
Block a user