Merge pull request #2275 from wpaulino/funding-max-confs

fundingmanager: rejects funding requests with number of confirmations too large
This commit is contained in:
Olaoluwa Osuntokun 2019-01-14 17:04:28 -08:00 committed by GitHub
commit 815c4a9e0d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 285 additions and 122 deletions

View File

@ -1391,8 +1391,8 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa
ChanReserve: 0,
MinHTLC: 0,
MaxAcceptedHtlcs: uint16(rand.Int31()),
CsvDelay: uint16(csvTimeoutAlice),
},
CsvDelay: uint16(csvTimeoutAlice),
MultiSigKey: keychain.KeyDescriptor{
PubKey: aliceKeyPub,
},
@ -1416,8 +1416,8 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa
ChanReserve: 0,
MinHTLC: 0,
MaxAcceptedHtlcs: uint16(rand.Int31()),
CsvDelay: uint16(csvTimeoutBob),
},
CsvDelay: uint16(csvTimeoutBob),
MultiSigKey: keychain.KeyDescriptor{
PubKey: bobKeyPub,
},

View File

@ -20,11 +20,6 @@ const (
// notifierType uniquely identifies this concrete implementation of the
// ChainNotifier interface.
notifierType = "bitcoind"
// reorgSafetyLimit is assumed maximum depth of a chain reorganization.
// After this many confirmation, transaction confirmation info will be
// pruned.
reorgSafetyLimit = 100
)
var (
@ -138,8 +133,8 @@ func (b *BitcoindNotifier) Start() error {
}
b.txNotifier = chainntnfs.NewTxNotifier(
uint32(currentHeight), reorgSafetyLimit, b.confirmHintCache,
b.spendHintCache,
uint32(currentHeight), chainntnfs.ReorgSafetyLimit,
b.confirmHintCache, b.spendHintCache,
)
b.bestBlock = chainntnfs.BlockEpoch{

View File

@ -30,8 +30,8 @@ func (b *BitcoindNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Has
}
b.txNotifier = chainntnfs.NewTxNotifier(
uint32(bestHeight), reorgSafetyLimit, b.confirmHintCache,
b.spendHintCache,
uint32(bestHeight), chainntnfs.ReorgSafetyLimit,
b.confirmHintCache, b.spendHintCache,
)
if generateBlocks != nil {

View File

@ -21,11 +21,6 @@ const (
// notifierType uniquely identifies this concrete implementation of the
// ChainNotifier interface.
notifierType = "btcd"
// reorgSafetyLimit is assumed maximum depth of a chain reorganization.
// After this many confirmation, transaction confirmation info will be
// pruned.
reorgSafetyLimit = 100
)
var (
@ -163,8 +158,8 @@ func (b *BtcdNotifier) Start() error {
}
b.txNotifier = chainntnfs.NewTxNotifier(
uint32(currentHeight), reorgSafetyLimit, b.confirmHintCache,
b.spendHintCache,
uint32(currentHeight), chainntnfs.ReorgSafetyLimit,
b.confirmHintCache, b.spendHintCache,
)
b.bestBlock = chainntnfs.BlockEpoch{

View File

@ -29,8 +29,8 @@ func (b *BtcdNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash,
}
b.txNotifier = chainntnfs.NewTxNotifier(
uint32(bestHeight), reorgSafetyLimit, b.confirmHintCache,
b.spendHintCache,
uint32(bestHeight), chainntnfs.ReorgSafetyLimit,
b.confirmHintCache, b.spendHintCache,
)
b.chainUpdates.Start()

View File

@ -25,12 +25,6 @@ const (
// notifierType uniquely identifies this concrete implementation of the
// ChainNotifier interface.
notifierType = "neutrino"
// reorgSafetyLimit is the chain depth beyond which it is assumed a block
// will not be reorganized out of the chain. This is used to determine when
// to prune old confirmation requests so that reorgs are handled correctly.
// The coinbase maturity period is a reasonable value to use.
reorgSafetyLimit = 100
)
var (
@ -159,7 +153,7 @@ func (n *NeutrinoNotifier) Start() error {
}
n.txNotifier = chainntnfs.NewTxNotifier(
n.bestHeight, reorgSafetyLimit, n.confirmHintCache,
n.bestHeight, chainntnfs.ReorgSafetyLimit, n.confirmHintCache,
n.spendHintCache,
)

View File

@ -49,8 +49,8 @@ func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32,
}
n.txNotifier = chainntnfs.NewTxNotifier(
uint32(bestHeight), reorgSafetyLimit, n.confirmHintCache,
n.spendHintCache,
uint32(bestHeight), chainntnfs.ReorgSafetyLimit,
n.confirmHintCache, n.spendHintCache,
)
n.chainConn = &NeutrinoChainConn{n.p2pNode}

View File

@ -10,6 +10,19 @@ import (
"github.com/btcsuite/btcutil"
)
const (
// ReorgSafetyLimit is the chain depth beyond which it is assumed a
// block will not be reorganized out of the chain. This is used to
// determine when to prune old confirmation requests so that reorgs are
// handled correctly. The average number of blocks in a day is a
// reasonable value to use.
ReorgSafetyLimit = 144
// MaxNumConfs is the maximum number of confirmations that can be
// requested on a transaction.
MaxNumConfs = ReorgSafetyLimit
)
var (
// ErrTxNotifierExiting is an error returned when attempting to interact
// with the TxNotifier but it been shut down.
@ -17,7 +30,8 @@ var (
// ErrTxMaxConfs signals that the user requested a number of
// confirmations beyond the reorg safety limit.
ErrTxMaxConfs = errors.New("too many confirmations requested")
ErrTxMaxConfs = fmt.Errorf("too many confirmations requested, max is %d",
MaxNumConfs)
)
// rescanState indicates the progression of a registration before the notifier

View File

@ -99,6 +99,36 @@ func newMockHintCache() *mockHintCache {
}
}
// TestTxNotifierMaxConfs ensures that we are not able to register for more
// confirmations on a transaction than the maximum supported.
func TestTxNotifierMaxConfs(t *testing.T) {
t.Parallel()
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(
10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
)
// Registering one confirmation above the maximum should fail with
// ErrTxMaxConfs.
ntfn := &chainntnfs.ConfNtfn{
ConfID: 1,
TxID: &zeroHash,
NumConfirmations: chainntnfs.MaxNumConfs + 1,
Event: chainntnfs.NewConfirmationEvent(
chainntnfs.MaxNumConfs,
),
}
if _, err := n.RegisterConf(ntfn); err != chainntnfs.ErrTxMaxConfs {
t.Fatalf("expected chainntnfs.ErrTxMaxConfs, got %v", err)
}
ntfn.NumConfirmations--
if _, err := n.RegisterConf(ntfn); err != nil {
t.Fatalf("unable to register conf ntfn: %v", err)
}
}
// TestTxNotifierFutureConfDispatch tests that the TxNotifier dispatches
// registered notifications when a transaction confirms after registration.
func TestTxNotifierFutureConfDispatch(t *testing.T) {
@ -116,7 +146,9 @@ func TestTxNotifierFutureConfDispatch(t *testing.T) {
)
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(10, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
)
// Create the test transactions and register them with the TxNotifier
// before including them in a block to receive future
@ -294,7 +326,9 @@ func TestTxNotifierHistoricalConfDispatch(t *testing.T) {
)
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(10, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
)
// Create the test transactions at a height before the TxNotifier's
// starting height so that they are confirmed once registering them.
@ -436,7 +470,9 @@ func TestTxNotifierFutureSpendDispatch(t *testing.T) {
t.Parallel()
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(10, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
)
// We'll start off by registering for a spend notification of an
// outpoint.
@ -520,7 +556,10 @@ func TestTxNotifierHistoricalSpendDispatch(t *testing.T) {
const startingHeight = 10
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
hintCache,
)
// We'll start by constructing the spending details of the outpoint
// below.
@ -601,7 +640,10 @@ func TestTxNotifierMultipleHistoricalConfRescans(t *testing.T) {
const startingHeight = 10
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
hintCache,
)
// The first registration for a transaction in the notifier should
// request a historical confirmation rescan as it does not have a
@ -667,7 +709,10 @@ func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) {
const startingHeight = 10
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
hintCache,
)
// The first registration for an outpoint in the notifier should request
// a historical spend rescan as it does not have a historical view of
@ -745,7 +790,10 @@ func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) {
)
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
hintCache,
)
// We'll start off by registered 5 clients for a confirmation
// notification on the same transaction.
@ -900,7 +948,10 @@ func TestTxNotifierCancelSpend(t *testing.T) {
const startingHeight = 10
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
hintCache,
)
// We'll register two notification requests. Only the second one will be
// canceled.
@ -992,7 +1043,9 @@ func TestTxNotifierConfReorg(t *testing.T) {
)
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(7, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
7, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
)
// Tx 1 will be confirmed in block 9 and requires 2 confs.
tx1Hash := tx1.TxHash()
@ -1262,7 +1315,10 @@ func TestTxNotifierSpendReorg(t *testing.T) {
const startingHeight = 10
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
hintCache,
)
// We'll have two outpoints that will be spent throughout the test. The
// first will be spent and will not experience a reorg, while the second
@ -1482,7 +1538,10 @@ func TestTxNotifierConfirmHintCache(t *testing.T) {
// Initialize our TxNotifier instance backed by a height hint cache.
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
hintCache,
)
// Create two test transactions and register them for notifications.
tx1 := wire.MsgTx{Version: 1}
@ -1682,7 +1741,10 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
// Intiialize our TxNotifier instance backed by a height hint cache.
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
hintCache,
)
// Create two test outpoints and register them for spend notifications.
op1 := wire.OutPoint{Hash: zeroHash, Index: 1}
@ -1854,7 +1916,9 @@ func TestTxNotifierTearDown(t *testing.T) {
t.Parallel()
hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier(10, 100, hintCache, hintCache)
n := chainntnfs.NewTxNotifier(
10, chainntnfs.ReorgSafetyLimit, hintCache, hintCache,
)
// To begin the test, we'll register for a confirmation and spend
// notification.

View File

@ -161,6 +161,13 @@ type ChannelConstraints struct {
// acted upon in the case of a unilateral channel closure or a contract
// breach.
MaxAcceptedHtlcs uint16
// CsvDelay is the relative time lock delay expressed in blocks. Any
// settled outputs that pay to the owner of this channel configuration
// MUST ensure that the delay branch uses this value as the relative
// time lock. Similarly, any HTLC's offered by this node should use
// this value as well.
CsvDelay uint16
}
// ChannelConfig is a struct that houses the various configuration opens for
@ -176,13 +183,6 @@ type ChannelConfig struct {
// by a participant.
ChannelConstraints
// CsvDelay is the relative time lock delay expressed in blocks. Any
// settled outputs that pay to the owner of this channel configuration
// MUST ensure that the delay branch uses this value as the relative
// time lock. Similarly, any HTLC's offered by this node should use
// this value as well.
CsvDelay uint16
// MultiSigKey is the key to be used within the 2-of-2 output script
// for the owner of this channel config.
MultiSigKey keychain.KeyDescriptor

View File

@ -136,8 +136,8 @@ func createTestChannelState(cdb *DB) (*OpenChannel, error) {
ChanReserve: btcutil.Amount(rand.Int63()),
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
MaxAcceptedHtlcs: uint16(rand.Int31()),
CsvDelay: uint16(rand.Int31()),
},
CsvDelay: uint16(rand.Int31()),
MultiSigKey: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
},
@ -161,8 +161,8 @@ func createTestChannelState(cdb *DB) (*OpenChannel, error) {
ChanReserve: btcutil.Amount(rand.Int63()),
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
MaxAcceptedHtlcs: uint16(rand.Int31()),
CsvDelay: uint16(rand.Int31()),
},
CsvDelay: uint16(rand.Int31()),
MultiSigKey: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
KeyLocator: keychain.KeyLocator{

View File

@ -1072,20 +1072,25 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) {
return
}
// As we're the responder, we get to specify the number of
// confirmations that we require before both of us consider the channel
// open. We'll use out mapping to derive the proper number of
// confirmations based on the amount of the channel, and also if any
// funds are being pushed to us.
// As we're the responder, we get to specify the number of confirmations
// that we require before both of us consider the channel open. We'll
// use out mapping to derive the proper number of confirmations based on
// the amount of the channel, and also if any funds are being pushed to
// us.
numConfsReq := f.cfg.NumRequiredConfs(msg.FundingAmount, msg.PushAmount)
reservation.SetNumConfsRequired(numConfsReq)
// We'll also validate and apply all the constraints the initiating
// party is attempting to dictate for our commitment transaction.
err = reservation.CommitConstraints(
msg.CsvDelay, msg.MaxAcceptedHTLCs, msg.MaxValueInFlight,
msg.HtlcMinimum, msg.ChannelReserve, msg.DustLimit,
)
channelConstraints := &channeldb.ChannelConstraints{
DustLimit: msg.DustLimit,
ChanReserve: msg.ChannelReserve,
MaxPendingAmount: msg.MaxValueInFlight,
MinHTLC: msg.HtlcMinimum,
MaxAcceptedHtlcs: msg.MaxAcceptedHTLCs,
CsvDelay: msg.CsvDelay,
}
err = reservation.CommitConstraints(channelConstraints)
if err != nil {
fndgLog.Errorf("Unacceptable channel constraints: %v", err)
f.failFundingFlow(fmsg.peer, fmsg.msg.PendingChannelID, err)
@ -1136,8 +1141,8 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) {
ChanReserve: chanReserve,
MinHTLC: minHtlc,
MaxAcceptedHtlcs: maxHtlcs,
CsvDelay: remoteCsvDelay,
},
CsvDelay: remoteCsvDelay,
MultiSigKey: keychain.KeyDescriptor{
PubKey: copyPubKey(msg.FundingKey),
},
@ -1225,14 +1230,31 @@ func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) {
fndgLog.Infof("Recv'd fundingResponse for pendingID(%x)", pendingChanID[:])
// The required number of confirmations should not be greater than the
// maximum number of confirmations required by the ChainNotifier to
// properly dispatch confirmations.
if msg.MinAcceptDepth > chainntnfs.MaxNumConfs {
err := lnwallet.ErrNumConfsTooLarge(
msg.MinAcceptDepth, chainntnfs.MaxNumConfs,
)
fndgLog.Warnf("Unacceptable channel constraints: %v", err)
f.failFundingFlow(fmsg.peer, fmsg.msg.PendingChannelID, err)
return
}
// We'll also specify the responder's preference for the number of
// required confirmations, and also the set of channel constraints
// they've specified for commitment states we can create.
resCtx.reservation.SetNumConfsRequired(uint16(msg.MinAcceptDepth))
err = resCtx.reservation.CommitConstraints(
msg.CsvDelay, msg.MaxAcceptedHTLCs, msg.MaxValueInFlight,
msg.HtlcMinimum, msg.ChannelReserve, msg.DustLimit,
)
channelConstraints := &channeldb.ChannelConstraints{
DustLimit: msg.DustLimit,
ChanReserve: msg.ChannelReserve,
MaxPendingAmount: msg.MaxValueInFlight,
MinHTLC: msg.HtlcMinimum,
MaxAcceptedHtlcs: msg.MaxAcceptedHTLCs,
CsvDelay: msg.CsvDelay,
}
err = resCtx.reservation.CommitConstraints(channelConstraints)
if err != nil {
fndgLog.Warnf("Unacceptable channel constraints: %v", err)
f.failFundingFlow(fmsg.peer, fmsg.msg.PendingChannelID, err)
@ -1259,8 +1281,8 @@ func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) {
ChanReserve: chanReserve,
MinHTLC: resCtx.remoteMinHtlc,
MaxAcceptedHtlcs: maxHtlcs,
CsvDelay: resCtx.remoteCsvDelay,
},
CsvDelay: resCtx.remoteCsvDelay,
MultiSigKey: keychain.KeyDescriptor{
PubKey: copyPubKey(msg.FundingKey),
},
@ -1860,7 +1882,7 @@ func (f *fundingManager) waitForFundingConfirmation(completeChan *channeldb.Open
)
if err != nil {
fndgLog.Errorf("Unable to register for confirmation of "+
"ChannelPoint(%v)", completeChan.FundingOutpoint)
"ChannelPoint(%v): %v", completeChan.FundingOutpoint, err)
return
}

View File

@ -11,6 +11,7 @@ import (
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
@ -2539,3 +2540,72 @@ func TestFundingManagerRejectPush(t *testing.T) {
string(err.Data))
}
}
// TestFundingManagerMaxConfs ensures that we don't accept a funding proposal
// that proposes a MinAcceptDepth greater than the maximum number of
// confirmations we're willing to accept.
func TestFundingManagerMaxConfs(t *testing.T) {
t.Parallel()
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// Create a funding request and start the workflow.
updateChan := make(chan *lnrpc.OpenStatusUpdate)
errChan := make(chan error, 1)
initReq := &openChanReq{
targetPubkey: bob.privKey.PubKey(),
chainHash: *activeNetParams.GenesisHash,
localFundingAmt: 500000,
pushAmt: lnwire.NewMSatFromSatoshis(10),
private: false,
updates: updateChan,
err: errChan,
}
alice.fundingMgr.initFundingWorkflow(bob, initReq)
// Alice should have sent the OpenChannel message to Bob.
var aliceMsg lnwire.Message
select {
case aliceMsg = <-alice.msgChan:
case err := <-initReq.err:
t.Fatalf("error init funding workflow: %v", err)
case <-time.After(time.Second * 5):
t.Fatalf("alice did not send OpenChannel message")
}
openChannelReq, ok := aliceMsg.(*lnwire.OpenChannel)
if !ok {
errorMsg, gotError := aliceMsg.(*lnwire.Error)
if gotError {
t.Fatalf("expected OpenChannel to be sent "+
"from bob, instead got error: %v",
lnwire.ErrorCode(errorMsg.Data[0]))
}
t.Fatalf("expected OpenChannel to be sent from "+
"alice, instead got %T", aliceMsg)
}
// Let Bob handle the init message.
bob.fundingMgr.processFundingOpen(openChannelReq, alice)
// Bob should answer with an AcceptChannel message.
acceptChannelResponse := assertFundingMsgSent(
t, bob.msgChan, "AcceptChannel",
).(*lnwire.AcceptChannel)
// Modify the AcceptChannel message Bob is proposing to including a
// MinAcceptDepth Alice won't be willing to accept.
acceptChannelResponse.MinAcceptDepth = chainntnfs.MaxNumConfs + 1
alice.fundingMgr.processFundingAccept(acceptChannelResponse, bob)
// Alice should respond back with an error indicating MinAcceptDepth is
// too large.
err := assertFundingMsgSent(t, alice.msgChan, "Error").(*lnwire.Error)
if !strings.Contains(string(err.Data), "minimum depth") {
t.Fatalf("expected ErrNumConfsTooLarge, got \"%v\"",
string(err.Data))
}
}

View File

@ -169,6 +169,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
ChanReserve: aliceReserve,
MinHTLC: 0,
MaxAcceptedHtlcs: lnwallet.MaxHTLCNumber / 2,
CsvDelay: uint16(csvTimeoutAlice),
}
bobConstraints := &channeldb.ChannelConstraints{
@ -178,6 +179,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
ChanReserve: bobReserve,
MinHTLC: 0,
MaxAcceptedHtlcs: lnwallet.MaxHTLCNumber / 2,
CsvDelay: uint16(csvTimeoutBob),
}
var hash [sha256.Size]byte
@ -195,7 +197,6 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
aliceCfg := channeldb.ChannelConfig{
ChannelConstraints: *aliceConstraints,
CsvDelay: uint16(csvTimeoutAlice),
MultiSigKey: keychain.KeyDescriptor{
PubKey: aliceKeyPub,
},
@ -214,7 +215,6 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
}
bobCfg := channeldb.ChannelConfig{
ChannelConstraints: *bobConstraints,
CsvDelay: uint16(csvTimeoutBob),
MultiSigKey: keychain.KeyDescriptor{
PubKey: bobKeyPub,
},

View File

@ -123,6 +123,15 @@ func ErrMaxValueInFlightTooSmall(maxValInFlight,
}
}
// ErrNumConfsTooLarge returns an error indicating that the number of
// confirmations required for a channel is too large.
func ErrNumConfsTooLarge(numConfs, maxNumConfs uint32) error {
return ReservationError{
fmt.Errorf("minimum depth of %d is too large, max is %d",
numConfs, maxNumConfs),
}
}
// ErrChanTooSmall returns an error indicating that an incoming channel request
// was too small. We'll reject any incoming channels if they're below our
// configured value for the min channel size we'll accept.

View File

@ -429,11 +429,15 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
t.Fatalf("unable to initialize funding reservation: %v", err)
}
aliceChanReservation.SetNumConfsRequired(numReqConfs)
err = aliceChanReservation.CommitConstraints(
csvDelay, lnwallet.MaxHTLCNumber/2,
lnwire.NewMSatFromSatoshis(fundingAmount), 1, fundingAmount/100,
lnwallet.DefaultDustLimit(),
)
channelConstraints := &channeldb.ChannelConstraints{
DustLimit: lnwallet.DefaultDustLimit(),
ChanReserve: fundingAmount / 100,
MaxPendingAmount: lnwire.NewMSatFromSatoshis(fundingAmount),
MinHTLC: 1,
MaxAcceptedHtlcs: lnwallet.MaxHTLCNumber / 2,
CsvDelay: csvDelay,
}
err = aliceChanReservation.CommitConstraints(channelConstraints)
if err != nil {
t.Fatalf("unable to verify constraints: %v", err)
}
@ -467,11 +471,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
if err != nil {
t.Fatalf("bob unable to init channel reservation: %v", err)
}
err = bobChanReservation.CommitConstraints(
csvDelay, lnwallet.MaxHTLCNumber/2,
lnwire.NewMSatFromSatoshis(fundingAmount), 1, fundingAmount/100,
lnwallet.DefaultDustLimit(),
)
err = bobChanReservation.CommitConstraints(channelConstraints)
if err != nil {
t.Fatalf("unable to verify constraints: %v", err)
}
@ -861,11 +861,15 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
t.Fatalf("unable to init channel reservation: %v", err)
}
aliceChanReservation.SetNumConfsRequired(numReqConfs)
err = aliceChanReservation.CommitConstraints(
csvDelay, lnwallet.MaxHTLCNumber/2,
lnwire.NewMSatFromSatoshis(fundingAmt), 1, fundingAmt/100,
lnwallet.DefaultDustLimit(),
)
channelConstraints := &channeldb.ChannelConstraints{
DustLimit: lnwallet.DefaultDustLimit(),
ChanReserve: fundingAmt / 100,
MaxPendingAmount: lnwire.NewMSatFromSatoshis(fundingAmt),
MinHTLC: 1,
MaxAcceptedHtlcs: lnwallet.MaxHTLCNumber / 2,
CsvDelay: csvDelay,
}
err = aliceChanReservation.CommitConstraints(channelConstraints)
if err != nil {
t.Fatalf("unable to verify constraints: %v", err)
}
@ -899,11 +903,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
if err != nil {
t.Fatalf("unable to create bob reservation: %v", err)
}
err = bobChanReservation.CommitConstraints(
csvDelay, lnwallet.MaxHTLCNumber/2,
lnwire.NewMSatFromSatoshis(fundingAmt), 1, fundingAmt/100,
lnwallet.DefaultDustLimit(),
)
err = bobChanReservation.CommitConstraints(channelConstraints)
if err != nil {
t.Fatalf("unable to verify constraints: %v", err)
}

View File

@ -282,72 +282,72 @@ func (r *ChannelReservation) SetNumConfsRequired(numConfs uint16) {
// of satoshis that can be transferred in a single commitment. This function
// will also attempt to verify the constraints for sanity, returning an error
// if the parameters are seemed unsound.
func (r *ChannelReservation) CommitConstraints(csvDelay, maxHtlcs uint16,
maxValueInFlight, minHtlc lnwire.MilliSatoshi,
chanReserve, dustLimit btcutil.Amount) error {
func (r *ChannelReservation) CommitConstraints(c *channeldb.ChannelConstraints) error {
r.Lock()
defer r.Unlock()
// Fail if we consider csvDelay excessively large.
// TODO(halseth): find a more scientific choice of value.
const maxDelay = 10000
if csvDelay > maxDelay {
return ErrCsvDelayTooLarge(csvDelay, maxDelay)
if c.CsvDelay > maxDelay {
return ErrCsvDelayTooLarge(c.CsvDelay, maxDelay)
}
// The dust limit should always be greater or equal to the channel
// reserve. The reservation request should be denied if otherwise.
if dustLimit > chanReserve {
return ErrChanReserveTooSmall(chanReserve, dustLimit)
if c.DustLimit > c.ChanReserve {
return ErrChanReserveTooSmall(c.ChanReserve, c.DustLimit)
}
// Fail if we consider the channel reserve to be too large. We
// currently fail if it is greater than 20% of the channel capacity.
maxChanReserve := r.partialState.Capacity / 5
if chanReserve > maxChanReserve {
return ErrChanReserveTooLarge(chanReserve, maxChanReserve)
if c.ChanReserve > maxChanReserve {
return ErrChanReserveTooLarge(c.ChanReserve, maxChanReserve)
}
// Fail if the minimum HTLC value is too large. If this is too large,
// the channel won't be useful for sending small payments. This limit
// is currently set to maxValueInFlight, effectively letting the remote
// setting this as large as it wants.
if minHtlc > maxValueInFlight {
return ErrMinHtlcTooLarge(minHtlc, maxValueInFlight)
if c.MinHTLC > c.MaxPendingAmount {
return ErrMinHtlcTooLarge(c.MinHTLC, c.MaxPendingAmount)
}
// Fail if maxHtlcs is above the maximum allowed number of 483. This
// number is specified in BOLT-02.
if maxHtlcs > uint16(MaxHTLCNumber/2) {
return ErrMaxHtlcNumTooLarge(maxHtlcs, uint16(MaxHTLCNumber/2))
if c.MaxAcceptedHtlcs > uint16(MaxHTLCNumber/2) {
return ErrMaxHtlcNumTooLarge(
c.MaxAcceptedHtlcs, uint16(MaxHTLCNumber/2),
)
}
// Fail if we consider maxHtlcs too small. If this is too small we
// cannot offer many HTLCs to the remote.
const minNumHtlc = 5
if maxHtlcs < minNumHtlc {
return ErrMaxHtlcNumTooSmall(maxHtlcs, minNumHtlc)
if c.MaxAcceptedHtlcs < minNumHtlc {
return ErrMaxHtlcNumTooSmall(c.MaxAcceptedHtlcs, minNumHtlc)
}
// Fail if we consider maxValueInFlight too small. We currently require
// the remote to at least allow minNumHtlc * minHtlc in flight.
if maxValueInFlight < minNumHtlc*minHtlc {
return ErrMaxValueInFlightTooSmall(maxValueInFlight,
minNumHtlc*minHtlc)
if c.MaxPendingAmount < minNumHtlc*c.MinHTLC {
return ErrMaxValueInFlightTooSmall(
c.MaxPendingAmount, minNumHtlc*c.MinHTLC,
)
}
// Our dust limit should always be less than or equal our proposed
// Our dust limit should always be less than or equal to our proposed
// channel reserve.
if r.ourContribution.DustLimit > chanReserve {
r.ourContribution.DustLimit = chanReserve
if r.ourContribution.DustLimit > c.ChanReserve {
r.ourContribution.DustLimit = c.ChanReserve
}
r.ourContribution.ChannelConfig.CsvDelay = csvDelay
r.ourContribution.ChannelConfig.ChanReserve = chanReserve
r.ourContribution.ChannelConfig.MaxAcceptedHtlcs = maxHtlcs
r.ourContribution.ChannelConfig.MaxPendingAmount = maxValueInFlight
r.ourContribution.ChannelConfig.MinHTLC = minHtlc
r.ourContribution.ChanReserve = c.ChanReserve
r.ourContribution.MaxPendingAmount = c.MaxPendingAmount
r.ourContribution.MinHTLC = c.MinHTLC
r.ourContribution.MaxAcceptedHtlcs = c.MaxAcceptedHtlcs
r.ourContribution.CsvDelay = c.CsvDelay
return nil
}

View File

@ -144,8 +144,8 @@ func CreateTestChannels() (*LightningChannel, *LightningChannel, func(), error)
ChanReserve: channelCapacity / 100,
MinHTLC: 0,
MaxAcceptedHtlcs: MaxHTLCNumber / 2,
CsvDelay: uint16(csvTimeoutAlice),
},
CsvDelay: uint16(csvTimeoutAlice),
MultiSigKey: keychain.KeyDescriptor{
PubKey: aliceKeys[0].PubKey(),
},
@ -169,8 +169,8 @@ func CreateTestChannels() (*LightningChannel, *LightningChannel, func(), error)
ChanReserve: channelCapacity / 100,
MinHTLC: 0,
MaxAcceptedHtlcs: MaxHTLCNumber / 2,
CsvDelay: uint16(csvTimeoutBob),
},
CsvDelay: uint16(csvTimeoutBob),
MultiSigKey: keychain.KeyDescriptor{
PubKey: bobKeys[0].PubKey(),
},

View File

@ -379,8 +379,8 @@ func TestCommitmentAndHTLCTransactions(t *testing.T) {
DustLimit: tc.dustLimit,
MaxPendingAmount: lnwire.NewMSatFromSatoshis(tc.fundingAmount),
MaxAcceptedHtlcs: MaxHTLCNumber,
CsvDelay: tc.localCsvDelay,
},
CsvDelay: tc.localCsvDelay,
MultiSigKey: keychain.KeyDescriptor{
PubKey: tc.localFundingPubKey,
},

View File

@ -117,8 +117,8 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
ChanReserve: btcutil.Amount(rand.Int63()),
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
MaxAcceptedHtlcs: uint16(rand.Int31()),
CsvDelay: uint16(csvTimeoutAlice),
},
CsvDelay: uint16(csvTimeoutAlice),
MultiSigKey: keychain.KeyDescriptor{
PubKey: aliceKeyPub,
},
@ -142,8 +142,8 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
ChanReserve: btcutil.Amount(rand.Int63()),
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
MaxAcceptedHtlcs: uint16(rand.Int31()),
CsvDelay: uint16(csvTimeoutBob),
},
CsvDelay: uint16(csvTimeoutBob),
MultiSigKey: keychain.KeyDescriptor{
PubKey: bobKeyPub,
},