2017-03-08 23:30:00 +01:00
|
|
|
package routing
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"image/color"
|
|
|
|
"net"
|
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
prand "math/rand"
|
|
|
|
|
2017-03-30 03:01:28 +02:00
|
|
|
"github.com/go-errors/errors"
|
2017-03-08 23:30:00 +01:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2017-03-30 03:01:28 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2017-03-08 23:30:00 +01:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2017-05-11 02:22:26 +02:00
|
|
|
"github.com/lightningnetwork/lnd/routing/chainview"
|
2017-03-08 23:30:00 +01:00
|
|
|
"github.com/roasbeef/btcd/btcec"
|
|
|
|
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/roasbeef/btcd/wire"
|
|
|
|
"github.com/roasbeef/btcutil"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2017-02-17 10:29:23 +01:00
|
|
|
testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}),
|
|
|
|
Port: 9000}
|
|
|
|
testAddrs = []net.Addr{testAddr}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
2017-10-11 20:37:54 +02:00
|
|
|
testFeatures = lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures)
|
2017-03-20 10:24:55 +01:00
|
|
|
|
2017-03-08 23:30:00 +01:00
|
|
|
testHash = [32]byte{
|
|
|
|
0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
|
|
|
|
0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
|
|
|
|
0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
|
|
|
|
0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
|
|
|
}
|
2017-03-30 03:01:28 +02:00
|
|
|
|
|
|
|
priv1, _ = btcec.NewPrivateKey(btcec.S256())
|
|
|
|
bitcoinKey1 = priv1.PubKey()
|
|
|
|
|
|
|
|
priv2, _ = btcec.NewPrivateKey(btcec.S256())
|
|
|
|
bitcoinKey2 = priv2.PubKey()
|
2017-03-08 23:30:00 +01:00
|
|
|
)
|
|
|
|
|
2017-03-30 03:01:28 +02:00
|
|
|
func createTestNode() (*channeldb.LightningNode, error) {
|
2017-03-08 23:30:00 +01:00
|
|
|
updateTime := prand.Int63()
|
|
|
|
|
|
|
|
priv, err := btcec.NewPrivateKey(btcec.S256())
|
|
|
|
if err != nil {
|
2017-03-30 03:01:28 +02:00
|
|
|
return nil, errors.Errorf("unable create private key: %v", err)
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
pub := priv.PubKey().SerializeCompressed()
|
|
|
|
return &channeldb.LightningNode{
|
2017-07-14 21:32:00 +02:00
|
|
|
HaveNodeAnnouncement: true,
|
|
|
|
LastUpdate: time.Unix(updateTime, 0),
|
|
|
|
Addresses: testAddrs,
|
|
|
|
PubKey: priv.PubKey(),
|
|
|
|
Color: color.RGBA{1, 2, 3, 0},
|
|
|
|
Alias: "kek" + string(pub[:]),
|
|
|
|
AuthSig: testSig,
|
|
|
|
Features: testFeatures,
|
2017-03-08 23:30:00 +01:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2017-03-30 03:01:28 +02:00
|
|
|
func randEdgePolicy(chanID *lnwire.ShortChannelID,
|
2017-03-19 19:40:25 +01:00
|
|
|
node *channeldb.LightningNode) *channeldb.ChannelEdgePolicy {
|
2017-03-08 23:30:00 +01:00
|
|
|
|
2017-03-19 19:40:25 +01:00
|
|
|
return &channeldb.ChannelEdgePolicy{
|
2017-03-27 17:01:12 +02:00
|
|
|
Signature: testSig,
|
2017-03-19 19:40:25 +01:00
|
|
|
ChannelID: chanID.ToUint64(),
|
|
|
|
LastUpdate: time.Unix(int64(prand.Int31()), 0),
|
2017-03-08 23:30:00 +01:00
|
|
|
TimeLockDelta: uint16(prand.Int63()),
|
2017-08-22 08:43:20 +02:00
|
|
|
MinHTLC: lnwire.MilliSatoshi(prand.Int31()),
|
|
|
|
FeeBaseMSat: lnwire.MilliSatoshi(prand.Int31()),
|
|
|
|
FeeProportionalMillionths: lnwire.MilliSatoshi(prand.Int31()),
|
2017-03-19 19:40:25 +01:00
|
|
|
Node: node,
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-30 03:01:28 +02:00
|
|
|
func createChannelEdge(ctx *testCtx, bitcoinKey1, bitcoinKey2 []byte,
|
2017-08-22 08:43:20 +02:00
|
|
|
chanValue btcutil.Amount, fundingHeight uint32) (*wire.MsgTx, *wire.OutPoint,
|
2017-03-30 03:01:28 +02:00
|
|
|
*lnwire.ShortChannelID, error) {
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
fundingTx := wire.NewMsgTx(2)
|
2017-03-30 03:01:28 +02:00
|
|
|
_, tx, err := lnwallet.GenFundingPkScript(
|
|
|
|
bitcoinKey1,
|
|
|
|
bitcoinKey2,
|
2017-08-22 08:43:20 +02:00
|
|
|
int64(chanValue),
|
2017-03-30 03:01:28 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
fundingTx.TxOut = append(fundingTx.TxOut, tx)
|
2017-03-08 23:30:00 +01:00
|
|
|
chanUtxo := wire.OutPoint{
|
|
|
|
Hash: fundingTx.TxHash(),
|
|
|
|
Index: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the utxo constructed, we'll mark it as closed.
|
2017-03-30 03:01:28 +02:00
|
|
|
ctx.chain.addUtxo(chanUtxo, tx)
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
// Our fake channel will be "confirmed" at height 101.
|
2017-03-30 03:01:28 +02:00
|
|
|
chanID := &lnwire.ShortChannelID{
|
2017-03-08 23:30:00 +01:00
|
|
|
BlockHeight: fundingHeight,
|
|
|
|
TxIndex: 0,
|
|
|
|
TxPosition: 0,
|
|
|
|
}
|
|
|
|
|
2017-03-30 03:01:28 +02:00
|
|
|
return fundingTx, &chanUtxo, chanID, nil
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
type mockChain struct {
|
|
|
|
blocks map[chainhash.Hash]*wire.MsgBlock
|
|
|
|
blockIndex map[uint32]chainhash.Hash
|
|
|
|
|
|
|
|
utxos map[wire.OutPoint]wire.TxOut
|
|
|
|
|
|
|
|
bestHeight int32
|
|
|
|
bestHash *chainhash.Hash
|
|
|
|
|
|
|
|
sync.RWMutex
|
|
|
|
}
|
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
// A compile time check to ensure mockChain implements the
|
|
|
|
// lnwallet.BlockChainIO interface.
|
|
|
|
var _ lnwallet.BlockChainIO = (*mockChain)(nil)
|
|
|
|
|
2017-03-08 23:30:00 +01:00
|
|
|
func newMockChain(currentHeight uint32) *mockChain {
|
|
|
|
return &mockChain{
|
|
|
|
bestHeight: int32(currentHeight),
|
|
|
|
blocks: make(map[chainhash.Hash]*wire.MsgBlock),
|
|
|
|
utxos: make(map[wire.OutPoint]wire.TxOut),
|
|
|
|
blockIndex: make(map[uint32]chainhash.Hash),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockChain) setBestBlock(height int32) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
|
|
|
m.bestHeight = height
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockChain) GetBestBlock() (*chainhash.Hash, int32, error) {
|
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
2017-10-02 17:54:29 +02:00
|
|
|
blockHash := m.blockIndex[uint32(m.bestHeight)]
|
|
|
|
|
|
|
|
return &blockHash, m.bestHeight, nil
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockChain) GetTransaction(txid *chainhash.Hash) (*wire.MsgTx, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockChain) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
|
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
hash, ok := m.blockIndex[uint32(blockHeight)]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("can't find block hash, for "+
|
|
|
|
"height %v", blockHeight)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &hash, nil
|
|
|
|
}
|
|
|
|
|
2017-03-30 03:01:28 +02:00
|
|
|
func (m *mockChain) addUtxo(op wire.OutPoint, out *wire.TxOut) {
|
2017-03-08 23:30:00 +01:00
|
|
|
m.Lock()
|
2017-03-30 03:01:28 +02:00
|
|
|
m.utxos[op] = *out
|
2017-03-08 23:30:00 +01:00
|
|
|
m.Unlock()
|
|
|
|
}
|
2017-05-11 02:22:26 +02:00
|
|
|
func (m *mockChain) GetUtxo(op *wire.OutPoint, _ uint32) (*wire.TxOut, error) {
|
2017-03-08 23:30:00 +01:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
utxo, ok := m.utxos[*op]
|
2017-03-08 23:30:00 +01:00
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("utxo not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
return &utxo, nil
|
|
|
|
}
|
|
|
|
|
2017-10-02 17:54:29 +02:00
|
|
|
func (m *mockChain) addBlock(block *wire.MsgBlock, height uint32, nonce uint32) {
|
2017-03-08 23:30:00 +01:00
|
|
|
m.Lock()
|
2017-10-02 17:54:29 +02:00
|
|
|
block.Header.Nonce = nonce
|
2017-03-08 23:30:00 +01:00
|
|
|
hash := block.Header.BlockHash()
|
|
|
|
m.blocks[hash] = block
|
|
|
|
m.blockIndex[height] = hash
|
|
|
|
m.Unlock()
|
|
|
|
}
|
|
|
|
func (m *mockChain) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error) {
|
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
block, ok := m.blocks[*blockHash]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("block not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
return block, nil
|
|
|
|
}
|
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
type mockChainView struct {
|
2017-03-08 23:30:00 +01:00
|
|
|
sync.RWMutex
|
2017-05-11 02:22:26 +02:00
|
|
|
|
|
|
|
newBlocks chan *chainview.FilteredBlock
|
|
|
|
staleBlocks chan *chainview.FilteredBlock
|
|
|
|
|
|
|
|
filter map[wire.OutPoint]struct{}
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
// A compile time check to ensure mockChainView implements the
|
|
|
|
// chainview.FilteredChainView.
|
|
|
|
var _ chainview.FilteredChainView = (*mockChainView)(nil)
|
|
|
|
|
|
|
|
func newMockChainView() *mockChainView {
|
|
|
|
return &mockChainView{
|
|
|
|
newBlocks: make(chan *chainview.FilteredBlock, 10),
|
|
|
|
staleBlocks: make(chan *chainview.FilteredBlock, 10),
|
|
|
|
filter: make(map[wire.OutPoint]struct{}),
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
func (m *mockChainView) UpdateFilter(ops []wire.OutPoint, updateHeight uint32) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2017-03-08 23:30:00 +01:00
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
for _, op := range ops {
|
|
|
|
m.filter[op] = struct{}{}
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
return nil
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
func (m *mockChainView) notifyBlock(hash chainhash.Hash, height uint32,
|
|
|
|
txns []*wire.MsgTx) {
|
|
|
|
|
2017-03-08 23:30:00 +01:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
m.newBlocks <- &chainview.FilteredBlock{
|
|
|
|
Hash: hash,
|
|
|
|
Height: height,
|
|
|
|
Transactions: txns,
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-02 17:54:29 +02:00
|
|
|
func (m *mockChainView) notifyStaleBlock(hash chainhash.Hash, height uint32,
|
|
|
|
txns []*wire.MsgTx) {
|
|
|
|
|
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
m.staleBlocks <- &chainview.FilteredBlock{
|
|
|
|
Hash: hash,
|
|
|
|
Height: height,
|
|
|
|
Transactions: txns,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
func (m *mockChainView) FilteredBlocks() <-chan *chainview.FilteredBlock {
|
|
|
|
return m.newBlocks
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
func (m *mockChainView) DisconnectedBlocks() <-chan *chainview.FilteredBlock {
|
|
|
|
return m.staleBlocks
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
func (m *mockChainView) FilterBlock(blockHash *chainhash.Hash) (*chainview.FilteredBlock, error) {
|
2017-10-02 17:54:29 +02:00
|
|
|
return &chainview.FilteredBlock{}, nil
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
func (m *mockChainView) Start() error {
|
2017-03-08 23:30:00 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-11 02:22:26 +02:00
|
|
|
func (m *mockChainView) Stop() error {
|
2017-03-08 23:30:00 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-19 19:40:25 +01:00
|
|
|
// TestEdgeUpdateNotification tests that when edges are updated or added,
|
2017-03-08 23:30:00 +01:00
|
|
|
// a proper notification is sent of to all registered clients.
|
|
|
|
func TestEdgeUpdateNotification(t *testing.T) {
|
2017-06-17 00:59:20 +02:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-03-30 03:01:28 +02:00
|
|
|
ctx, cleanUp, err := createTestCtx(0)
|
2017-03-08 23:30:00 +01:00
|
|
|
defer cleanUp()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create router: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// First we'll create the utxo for the channel to be "closed"
|
2017-03-30 03:01:28 +02:00
|
|
|
const chanValue = 10000
|
|
|
|
fundingTx, chanPoint, chanID, err := createChannelEdge(ctx,
|
|
|
|
bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(),
|
|
|
|
chanValue, 0)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unbale create channel edge: %v", err)
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
// We'll also add a record for the block that included our funding
|
|
|
|
// transaction.
|
|
|
|
fundingBlock := &wire.MsgBlock{
|
|
|
|
Transactions: []*wire.MsgTx{fundingTx},
|
|
|
|
}
|
2017-10-02 17:54:29 +02:00
|
|
|
ctx.chain.addBlock(fundingBlock, chanID.BlockHeight, chanID.BlockHeight)
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
// Next we'll create two test nodes that the fake channel will be open
|
2017-07-14 21:32:00 +02:00
|
|
|
// between.
|
2017-03-19 19:40:25 +01:00
|
|
|
node1, err := createTestNode()
|
2017-03-08 23:30:00 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
2017-03-19 19:40:25 +01:00
|
|
|
node2, err := createTestNode()
|
2017-03-08 23:30:00 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, to conclude our test set up, we'll create a channel
|
2017-03-19 19:40:25 +01:00
|
|
|
// update to announce the created channel between the two nodes.
|
|
|
|
edge := &channeldb.ChannelEdgeInfo{
|
|
|
|
ChannelID: chanID.ToUint64(),
|
|
|
|
NodeKey1: node1.PubKey,
|
|
|
|
NodeKey2: node2.PubKey,
|
2017-03-30 03:01:28 +02:00
|
|
|
BitcoinKey1: bitcoinKey1,
|
|
|
|
BitcoinKey2: bitcoinKey2,
|
2017-03-19 19:40:25 +01:00
|
|
|
AuthProof: &channeldb.ChannelAuthProof{
|
|
|
|
NodeSig1: testSig,
|
|
|
|
NodeSig2: testSig,
|
|
|
|
BitcoinSig1: testSig,
|
|
|
|
BitcoinSig2: testSig,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ctx.router.AddEdge(edge); err != nil {
|
2017-03-30 03:01:28 +02:00
|
|
|
t.Fatalf("unable to add edge: %v", err)
|
2017-03-19 19:40:25 +01:00
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
// With the channel edge now in place, we'll subscribe for topology
|
|
|
|
// notifications.
|
|
|
|
ntfnClient, err := ctx.router.SubscribeTopology()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to subscribe for channel notifications: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create random policy edges that are stemmed to the channel id
|
|
|
|
// created above.
|
2017-03-19 19:40:25 +01:00
|
|
|
edge1 := randEdgePolicy(chanID, node1)
|
2017-03-08 23:30:00 +01:00
|
|
|
edge1.Flags = 0
|
2017-03-19 19:40:25 +01:00
|
|
|
edge2 := randEdgePolicy(chanID, node2)
|
2017-03-08 23:30:00 +01:00
|
|
|
edge2.Flags = 1
|
|
|
|
|
2017-03-19 19:40:25 +01:00
|
|
|
if err := ctx.router.UpdateEdge(edge1); err != nil {
|
|
|
|
t.Fatalf("unable to add edge update: %v", err)
|
|
|
|
}
|
|
|
|
if err := ctx.router.UpdateEdge(edge2); err != nil {
|
|
|
|
t.Fatalf("unable to add edge update: %v", err)
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
2017-03-19 19:40:25 +01:00
|
|
|
assertEdgeCorrect := func(t *testing.T, edgeUpdate *ChannelEdgeUpdate,
|
|
|
|
edgeAnn *channeldb.ChannelEdgePolicy) {
|
|
|
|
if edgeUpdate.ChanID != edgeAnn.ChannelID {
|
2017-03-08 23:30:00 +01:00
|
|
|
t.Fatalf("channel ID of edge doesn't match: "+
|
|
|
|
"expected %v, got %v", chanID.ToUint64(), edgeUpdate.ChanID)
|
|
|
|
}
|
2017-03-30 03:01:28 +02:00
|
|
|
if edgeUpdate.ChanPoint != *chanPoint {
|
2017-03-14 04:32:59 +01:00
|
|
|
t.Fatalf("channel don't match: expected %v, got %v",
|
|
|
|
chanPoint, edgeUpdate.ChanPoint)
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
// TODO(roasbeef): this is a hack, needs to be removed
|
|
|
|
// after commitment fees are dynamic.
|
2017-05-01 20:45:02 +02:00
|
|
|
if edgeUpdate.Capacity != chanValue {
|
2017-03-08 23:30:00 +01:00
|
|
|
t.Fatalf("capacity of edge doesn't match: "+
|
|
|
|
"expected %v, got %v", chanValue, edgeUpdate.Capacity)
|
|
|
|
}
|
2017-08-22 08:43:20 +02:00
|
|
|
if edgeUpdate.MinHTLC != edgeAnn.MinHTLC {
|
2017-03-08 23:30:00 +01:00
|
|
|
t.Fatalf("min HTLC of edge doesn't match: "+
|
2017-08-22 08:43:20 +02:00
|
|
|
"expected %v, got %v", edgeAnn.MinHTLC,
|
2017-03-08 23:30:00 +01:00
|
|
|
edgeUpdate.MinHTLC)
|
|
|
|
}
|
2017-08-22 08:43:20 +02:00
|
|
|
if edgeUpdate.BaseFee != edgeAnn.FeeBaseMSat {
|
2017-03-08 23:30:00 +01:00
|
|
|
t.Fatalf("base fee of edge doesn't match: "+
|
2017-03-19 19:40:25 +01:00
|
|
|
"expected %v, got %v", edgeAnn.FeeBaseMSat,
|
2017-03-08 23:30:00 +01:00
|
|
|
edgeUpdate.BaseFee)
|
|
|
|
}
|
2017-08-22 08:43:20 +02:00
|
|
|
if edgeUpdate.FeeRate != edgeAnn.FeeProportionalMillionths {
|
2017-03-08 23:30:00 +01:00
|
|
|
t.Fatalf("fee rate of edge doesn't match: "+
|
|
|
|
"expected %v, got %v", edgeAnn.FeeProportionalMillionths,
|
|
|
|
edgeUpdate.FeeRate)
|
|
|
|
}
|
|
|
|
if edgeUpdate.TimeLockDelta != edgeAnn.TimeLockDelta {
|
|
|
|
t.Fatalf("time lock delta of edge doesn't match: "+
|
|
|
|
"expected %v, got %v", edgeAnn.TimeLockDelta,
|
|
|
|
edgeUpdate.TimeLockDelta)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
// Create lookup map for notifications we are intending to receive. Entries
|
|
|
|
// are removed from the map when the anticipated notification is received.
|
|
|
|
var waitingFor = map[vertex]int{
|
|
|
|
newVertex(node1.PubKey): 1,
|
|
|
|
newVertex(node2.PubKey): 2,
|
|
|
|
}
|
|
|
|
|
2017-03-08 23:30:00 +01:00
|
|
|
const numEdgePolicies = 2
|
|
|
|
for i := 0; i < numEdgePolicies; i++ {
|
|
|
|
select {
|
|
|
|
case ntfn := <-ntfnClient.TopologyChanges:
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
// For each processed announcement we should only receive a
|
|
|
|
// single announcement in a batch.
|
|
|
|
if len(ntfn.ChannelEdgeUpdates) != 1 {
|
|
|
|
t.Fatalf("expected 1 notification, instead have %v",
|
|
|
|
len(ntfn.ChannelEdgeUpdates))
|
|
|
|
}
|
|
|
|
|
2017-03-08 23:30:00 +01:00
|
|
|
edgeUpdate := ntfn.ChannelEdgeUpdates[0]
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
nodeVertex := newVertex(edgeUpdate.AdvertisingNode)
|
|
|
|
|
|
|
|
if idx, ok := waitingFor[nodeVertex]; ok {
|
|
|
|
switch idx {
|
|
|
|
case 1:
|
|
|
|
// Received notification corresponding to edge1.
|
|
|
|
assertEdgeCorrect(t, edgeUpdate, edge1)
|
|
|
|
if !edgeUpdate.AdvertisingNode.IsEqual(node1.PubKey) {
|
|
|
|
t.Fatal("advertising node mismatch")
|
|
|
|
}
|
|
|
|
if !edgeUpdate.ConnectingNode.IsEqual(node2.PubKey) {
|
|
|
|
t.Fatal("connecting node mismatch")
|
|
|
|
}
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
// Received notification corresponding to edge2.
|
|
|
|
assertEdgeCorrect(t, edgeUpdate, edge2)
|
|
|
|
if !edgeUpdate.AdvertisingNode.IsEqual(node2.PubKey) {
|
|
|
|
t.Fatal("advertising node mismatch")
|
|
|
|
}
|
|
|
|
if !edgeUpdate.ConnectingNode.IsEqual(node1.PubKey) {
|
|
|
|
t.Fatal("connecting node mismatch")
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
t.Fatal("invalid edge index")
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
// Remove entry from waitingFor map to ensure we don't double count a
|
|
|
|
// repeat notification.
|
|
|
|
delete(waitingFor, nodeVertex)
|
2017-03-08 23:30:00 +01:00
|
|
|
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
} else {
|
|
|
|
t.Fatal("unexpected edge update received")
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
|
2017-03-08 23:30:00 +01:00
|
|
|
case <-time.After(time.Second * 5):
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
t.Fatal("edge update not received")
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestNodeUpdateNotification tests that notifications are sent out when nodes
|
|
|
|
// either join the network for the first time, or update their authenticated
|
|
|
|
// attributes with new data.
|
|
|
|
func TestNodeUpdateNotification(t *testing.T) {
|
2017-06-17 00:59:20 +02:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-07-14 21:32:00 +02:00
|
|
|
const startingBlockHeight = 101
|
|
|
|
ctx, cleanUp, err := createTestCtx(startingBlockHeight)
|
2017-03-08 23:30:00 +01:00
|
|
|
defer cleanUp()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create router: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-07-14 21:32:00 +02:00
|
|
|
// We only accept node announcements from nodes having a known channel,
|
|
|
|
// so create one now.
|
|
|
|
const chanValue = 10000
|
|
|
|
fundingTx, _, chanID, err := createChannelEdge(ctx,
|
|
|
|
bitcoinKey1.SerializeCompressed(),
|
|
|
|
bitcoinKey2.SerializeCompressed(),
|
|
|
|
chanValue, startingBlockHeight)
|
2017-03-08 23:30:00 +01:00
|
|
|
if err != nil {
|
2017-07-14 21:32:00 +02:00
|
|
|
t.Fatalf("unable create channel edge: %v", err)
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
|
2017-07-14 21:32:00 +02:00
|
|
|
// We'll also add a record for the block that included our funding
|
|
|
|
// transaction.
|
|
|
|
fundingBlock := &wire.MsgBlock{
|
|
|
|
Transactions: []*wire.MsgTx{fundingTx},
|
|
|
|
}
|
2017-10-02 17:54:29 +02:00
|
|
|
ctx.chain.addBlock(fundingBlock, chanID.BlockHeight, chanID.BlockHeight)
|
2017-07-14 21:32:00 +02:00
|
|
|
|
|
|
|
// Create two nodes acting as endpoints in the created channel, and use
|
|
|
|
// them to trigger notifications by sending updated node announcement
|
|
|
|
// messages.
|
2017-03-19 19:40:25 +01:00
|
|
|
node1, err := createTestNode()
|
2017-03-08 23:30:00 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
2017-03-19 19:40:25 +01:00
|
|
|
node2, err := createTestNode()
|
2017-03-08 23:30:00 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-07-14 21:32:00 +02:00
|
|
|
edge := &channeldb.ChannelEdgeInfo{
|
|
|
|
ChannelID: chanID.ToUint64(),
|
|
|
|
NodeKey1: node1.PubKey,
|
|
|
|
NodeKey2: node2.PubKey,
|
|
|
|
BitcoinKey1: bitcoinKey1,
|
|
|
|
BitcoinKey2: bitcoinKey2,
|
|
|
|
AuthProof: &channeldb.ChannelAuthProof{
|
|
|
|
NodeSig1: testSig,
|
|
|
|
NodeSig2: testSig,
|
|
|
|
BitcoinSig1: testSig,
|
|
|
|
BitcoinSig2: testSig,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adding the edge will add the nodes to the graph, but with no info
|
|
|
|
// except the pubkey known.
|
|
|
|
if err := ctx.router.AddEdge(edge); err != nil {
|
|
|
|
t.Fatalf("unable to add edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new client to receive notifications.
|
|
|
|
ntfnClient, err := ctx.router.SubscribeTopology()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to subscribe for channel notifications: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change network topology by adding the updated info for the two nodes
|
|
|
|
// to the channel router.
|
2017-03-19 19:40:25 +01:00
|
|
|
if err := ctx.router.AddNode(node1); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
if err := ctx.router.AddNode(node2); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
2017-03-19 19:40:25 +01:00
|
|
|
assertNodeNtfnCorrect := func(t *testing.T, ann *channeldb.LightningNode,
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
nodeUpdate *NetworkNodeUpdate) {
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
// The notification received should directly map the
|
|
|
|
// announcement originally sent.
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
if nodeUpdate.Addresses[0] != ann.Addresses[0] {
|
2017-03-08 23:30:00 +01:00
|
|
|
t.Fatalf("node address doesn't match: expected %v, got %v",
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
nodeUpdate.Addresses[0], ann.Addresses[0])
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
if !nodeUpdate.IdentityKey.IsEqual(ann.PubKey) {
|
2017-03-08 23:30:00 +01:00
|
|
|
t.Fatalf("node identity keys don't match: expected %x, "+
|
2017-03-19 19:40:25 +01:00
|
|
|
"got %x", ann.PubKey.SerializeCompressed(),
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
nodeUpdate.IdentityKey.SerializeCompressed())
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
if nodeUpdate.Alias != ann.Alias {
|
2017-03-08 23:30:00 +01:00
|
|
|
t.Fatalf("node alias doesn't match: expected %v, got %v",
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
ann.Alias, nodeUpdate.Alias)
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
// Create lookup map for notifications we are intending to receive. Entries
|
|
|
|
// are removed from the map when the anticipated notification is received.
|
|
|
|
var waitingFor = map[vertex]int{
|
|
|
|
newVertex(node1.PubKey): 1,
|
|
|
|
newVertex(node2.PubKey): 2,
|
|
|
|
}
|
|
|
|
|
2017-03-08 23:30:00 +01:00
|
|
|
// Exactly two notifications should be sent, each corresponding to the
|
|
|
|
// node announcement messages sent above.
|
|
|
|
const numAnns = 2
|
|
|
|
for i := 0; i < numAnns; i++ {
|
|
|
|
select {
|
|
|
|
case ntfn := <-ntfnClient.TopologyChanges:
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
// For each processed announcement we should only receive a
|
|
|
|
// single announcement in a batch.
|
|
|
|
if len(ntfn.NodeUpdates) != 1 {
|
|
|
|
t.Fatalf("expected 1 notification, instead have %v",
|
|
|
|
len(ntfn.NodeUpdates))
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeUpdate := ntfn.NodeUpdates[0]
|
|
|
|
nodeVertex := newVertex(nodeUpdate.IdentityKey)
|
|
|
|
if idx, ok := waitingFor[nodeVertex]; ok {
|
|
|
|
switch idx {
|
|
|
|
case 1:
|
|
|
|
// Received notification corresponding to node1.
|
|
|
|
assertNodeNtfnCorrect(t, node1, nodeUpdate)
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
// Received notification corresponding to node2.
|
|
|
|
assertNodeNtfnCorrect(t, node2, nodeUpdate)
|
|
|
|
|
|
|
|
default:
|
|
|
|
t.Fatal("invalid node index")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove entry from waitingFor map to ensure we don't double count a
|
|
|
|
// repeat notification.
|
|
|
|
delete(waitingFor, nodeVertex)
|
|
|
|
|
|
|
|
} else {
|
|
|
|
t.Fatal("unexpected node update received")
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
case <-time.After(time.Second * 5):
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
t.Fatal("node update not received")
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we receive a new update from a node (with a higher timestamp),
|
|
|
|
// then it should trigger a new notification.
|
|
|
|
// TODO(roasbeef): assume monotonic time.
|
2017-03-19 19:40:25 +01:00
|
|
|
nodeUpdateAnn := *node1
|
|
|
|
nodeUpdateAnn.LastUpdate = node1.LastUpdate.Add(300 * time.Millisecond)
|
2017-03-08 23:30:00 +01:00
|
|
|
|
2017-03-19 19:40:25 +01:00
|
|
|
// Add new node topology update to the channel router.
|
|
|
|
if err := ctx.router.AddNode(&nodeUpdateAnn); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
// Once again a notification should be received reflecting the up to
|
|
|
|
// date node announcement.
|
|
|
|
select {
|
|
|
|
case ntfn := <-ntfnClient.TopologyChanges:
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
// For each processed announcement we should only receive a
|
|
|
|
// single announcement in a batch.
|
|
|
|
if len(ntfn.NodeUpdates) != 1 {
|
|
|
|
t.Fatalf("expected 1 notification, instead have %v",
|
|
|
|
len(ntfn.NodeUpdates))
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeUpdate := ntfn.NodeUpdates[0]
|
|
|
|
assertNodeNtfnCorrect(t, &nodeUpdateAnn, nodeUpdate)
|
|
|
|
|
2017-03-08 23:30:00 +01:00
|
|
|
case <-time.After(time.Second * 5):
|
routing/notifs: order invariant testing of ntfn delivery (#238)
Modifies the test cases in `TestEdgeUpdateNotification` and
`TestNodeUpdateNotification` to check for the possibility of notifications
being delivered out of order. This addresses some sporadic failures that
were observed when running the test suite.
I looked through some of the open issues but didn't see any addressing this
issue in particular, but if someone could point me to any relevant issues
that would be much appreciated!
Issue
-----
Currently the test suite validates notifications received in the order they
are submitted. The check fails because the verification of each
notification is statically linked to the order in which they are delivered,
seen
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L403)
and
[here](https://github.com/cfromknecht/lnd/blob/1be4d67ce41e65feee8ea05332dfc67a3437813d/routing/notifications_test.go#L499)
in `routing/notifications_test.go`. The notifications are typically
delivered in this order, but causes the test to fail otherwise.
Proposed Changes
-------------------
Construct an index that maps a public key to its corresponding edges and/or
nodes. When a notification is received, use its identifying public key and
the index to look up the edge/node to use for validation. Entries are
removed from the index after they are verified to ensure that the same
entry is validated twice. The logic to dynamically handle the verification
of incoming notifications rests can be found here
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L420)
and
[here](https://github.com/cfromknecht/lnd/blob/order-invariant-ntfns/routing/notifications_test.go#L539).
Encountered Errors
--------------------
* `TestEdgeUpdateNotification`: notifications_test.go:379: min HTLC of
edge doesn't match: expected 16.7401473 BTC, got 19.4852751 BTC
* `TestNodeUpdateNotification`: notifications_test.go:485: node identity
keys don't match: expected
027b139b2153ac5f3c83c2022e58b3219297d0fb3170739ee6391cddf2e06fe3e7, got
03921deafb61ee13d18e9d96c3ecd9e572e59c8dbd0bb922b5b6ac609d10fe4ee4
Recreating Failing Behavior
---------------------------
The failures can be somewhat difficult to recreate, I was able to reproduce
them by running the unit tests repeatedly until they showed up. I used the
following commands to bring them out of hiding:
```
./gotest.sh -i
go test -test.v ./routing && while [ $? -eq 0 ]; do go test -test.v ./routing; done
```
I was unable to recreate these errors, or any others in this package, after
making the proposed changes and leaving the script running continuously for
~30 minutes. Previously, I could consistently generate an error after ~20
seconds had elapsed on the latest commit in master at the time of writing:
78f6caf5d2e570fea0e5c05cc440cb7395a99c1d. Moar stability ftw!
2017-08-01 06:38:03 +02:00
|
|
|
t.Fatal("update not received")
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestNotificationCancellation tests that notifications are properly cancelled
|
|
|
|
// when the client wishes to exit.
|
|
|
|
func TestNotificationCancellation(t *testing.T) {
|
2017-06-17 00:59:20 +02:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-03-08 23:30:00 +01:00
|
|
|
const startingBlockHeight = 101
|
|
|
|
ctx, cleanUp, err := createTestCtx(startingBlockHeight)
|
|
|
|
defer cleanUp()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create router: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new client to receive notifications.
|
|
|
|
ntfnClient, err := ctx.router.SubscribeTopology()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to subscribe for channel notifications: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-07-14 21:32:00 +02:00
|
|
|
// We'll create the utxo for a new channel.
|
|
|
|
const chanValue = 10000
|
|
|
|
fundingTx, _, chanID, err := createChannelEdge(ctx,
|
|
|
|
bitcoinKey1.SerializeCompressed(),
|
|
|
|
bitcoinKey2.SerializeCompressed(),
|
|
|
|
chanValue, startingBlockHeight)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable create channel edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll also add a record for the block that included our funding
|
|
|
|
// transaction.
|
|
|
|
fundingBlock := &wire.MsgBlock{
|
|
|
|
Transactions: []*wire.MsgTx{fundingTx},
|
|
|
|
}
|
2017-10-02 17:54:29 +02:00
|
|
|
ctx.chain.addBlock(fundingBlock, chanID.BlockHeight, chanID.BlockHeight)
|
2017-07-14 21:32:00 +02:00
|
|
|
|
2017-03-19 19:40:25 +01:00
|
|
|
// We'll create a fresh new node topology update to feed to the channel
|
2017-03-08 23:30:00 +01:00
|
|
|
// router.
|
2017-07-14 21:32:00 +02:00
|
|
|
node1, err := createTestNode()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
node2, err := createTestNode()
|
2017-03-08 23:30:00 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Before we send the message to the channel router, we'll cancel the
|
|
|
|
// notifications for this client. As a result, the notification
|
2017-07-14 21:32:00 +02:00
|
|
|
// triggered by accepting the channel announcements shouldn't be sent
|
|
|
|
// to the client.
|
2017-03-08 23:30:00 +01:00
|
|
|
ntfnClient.Cancel()
|
|
|
|
|
2017-07-14 21:32:00 +02:00
|
|
|
edge := &channeldb.ChannelEdgeInfo{
|
|
|
|
ChannelID: chanID.ToUint64(),
|
|
|
|
NodeKey1: node1.PubKey,
|
|
|
|
NodeKey2: node2.PubKey,
|
|
|
|
BitcoinKey1: bitcoinKey1,
|
|
|
|
BitcoinKey2: bitcoinKey2,
|
|
|
|
AuthProof: &channeldb.ChannelAuthProof{
|
|
|
|
NodeSig1: testSig,
|
|
|
|
NodeSig2: testSig,
|
|
|
|
BitcoinSig1: testSig,
|
|
|
|
BitcoinSig2: testSig,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := ctx.router.AddEdge(edge); err != nil {
|
|
|
|
t.Fatalf("unable to add edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ctx.router.AddNode(node1); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ctx.router.AddNode(node2); err != nil {
|
2017-03-19 19:40:25 +01:00
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
select {
|
2017-07-14 21:32:00 +02:00
|
|
|
// The notifications shouldn't be sent, however, the channel should be
|
2017-03-08 23:30:00 +01:00
|
|
|
// closed, causing the second read-value to be false.
|
|
|
|
case _, ok := <-ntfnClient.TopologyChanges:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-03-19 19:40:25 +01:00
|
|
|
t.Fatal("notification sent but shouldn't have been")
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
case <-time.After(time.Second * 5):
|
2017-03-19 19:40:25 +01:00
|
|
|
t.Fatal("notification client never cancelled")
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelCloseNotification tests that channel closure notifications are
|
|
|
|
// properly dispatched to all registered clients.
|
|
|
|
func TestChannelCloseNotification(t *testing.T) {
|
2017-06-17 00:59:20 +02:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-03-08 23:30:00 +01:00
|
|
|
const startingBlockHeight = 101
|
|
|
|
ctx, cleanUp, err := createTestCtx(startingBlockHeight)
|
|
|
|
defer cleanUp()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create router: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// First we'll create the utxo for the channel to be "closed"
|
2017-03-30 03:01:28 +02:00
|
|
|
const chanValue = 10000
|
|
|
|
fundingTx, chanUtxo, chanID, err := createChannelEdge(ctx,
|
|
|
|
bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(),
|
|
|
|
chanValue, startingBlockHeight)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable create channel edge: %v", err)
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
// We'll also add a record for the block that included our funding
|
|
|
|
// transaction.
|
|
|
|
fundingBlock := &wire.MsgBlock{
|
|
|
|
Transactions: []*wire.MsgTx{fundingTx},
|
|
|
|
}
|
2017-10-02 17:54:29 +02:00
|
|
|
ctx.chain.addBlock(fundingBlock, chanID.BlockHeight, chanID.BlockHeight)
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
// Next we'll create two test nodes that the fake channel will be open
|
2017-07-14 21:32:00 +02:00
|
|
|
// between.
|
2017-03-19 19:40:25 +01:00
|
|
|
node1, err := createTestNode()
|
2017-03-08 23:30:00 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
2017-03-19 19:40:25 +01:00
|
|
|
node2, err := createTestNode()
|
2017-03-08 23:30:00 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, to conclude our test set up, we'll create a channel
|
|
|
|
// announcement to announce the created channel between the two nodes.
|
2017-03-19 19:40:25 +01:00
|
|
|
edge := &channeldb.ChannelEdgeInfo{
|
|
|
|
ChannelID: chanID.ToUint64(),
|
|
|
|
NodeKey1: node1.PubKey,
|
|
|
|
NodeKey2: node2.PubKey,
|
2017-03-30 03:01:28 +02:00
|
|
|
BitcoinKey1: bitcoinKey1,
|
|
|
|
BitcoinKey2: bitcoinKey2,
|
2017-03-19 19:40:25 +01:00
|
|
|
AuthProof: &channeldb.ChannelAuthProof{
|
|
|
|
NodeSig1: testSig,
|
|
|
|
NodeSig2: testSig,
|
|
|
|
BitcoinSig1: testSig,
|
|
|
|
BitcoinSig2: testSig,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := ctx.router.AddEdge(edge); err != nil {
|
|
|
|
t.Fatalf("unable to add edge: %v", err)
|
|
|
|
}
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
// With the channel edge now in place, we'll subscribe for topology
|
|
|
|
// notifications.
|
|
|
|
ntfnClient, err := ctx.router.SubscribeTopology()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to subscribe for channel notifications: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll simulate the closure of our channel by generating a new
|
|
|
|
// block at height 102 which spends the original multi-sig output of
|
|
|
|
// the channel.
|
|
|
|
blockHeight := uint32(102)
|
|
|
|
newBlock := &wire.MsgBlock{
|
|
|
|
Transactions: []*wire.MsgTx{
|
|
|
|
{
|
|
|
|
TxIn: []*wire.TxIn{
|
2017-03-09 05:44:32 +01:00
|
|
|
{
|
2017-03-30 03:01:28 +02:00
|
|
|
PreviousOutPoint: *chanUtxo,
|
2017-03-08 23:30:00 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2017-10-02 17:54:29 +02:00
|
|
|
ctx.chain.addBlock(newBlock, blockHeight, blockHeight)
|
2017-05-11 02:22:26 +02:00
|
|
|
ctx.chainView.notifyBlock(newBlock.Header.BlockHash(), blockHeight,
|
|
|
|
newBlock.Transactions)
|
2017-03-08 23:30:00 +01:00
|
|
|
|
|
|
|
// The notification registered above should be sent, if not we'll time
|
|
|
|
// out and mark the test as failed.
|
|
|
|
select {
|
|
|
|
case ntfn := <-ntfnClient.TopologyChanges:
|
|
|
|
// We should have exactly a single notification for the channel
|
|
|
|
// "closed" above.
|
|
|
|
closedChans := ntfn.ClosedChannels
|
|
|
|
if len(closedChans) == 0 {
|
2017-03-19 19:40:25 +01:00
|
|
|
t.Fatal("close channel ntfn not populated")
|
2017-03-08 23:30:00 +01:00
|
|
|
} else if len(closedChans) != 1 {
|
|
|
|
t.Fatalf("only one should've been detected as closed, "+
|
|
|
|
"instead %v were", len(closedChans))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the notification we received includes the proper
|
|
|
|
// update the for the channel that was closed in the generated
|
|
|
|
// block.
|
|
|
|
closedChan := closedChans[0]
|
|
|
|
if closedChan.ChanID != chanID.ToUint64() {
|
|
|
|
t.Fatalf("channel ID of closed channel doesn't match: "+
|
|
|
|
"expected %v, got %v", chanID.ToUint64(), closedChan.ChanID)
|
|
|
|
}
|
|
|
|
// TODO(roasbeef): this is a hack, needs to be removed
|
|
|
|
// after commitment fees are dynamic.
|
2017-05-01 20:45:02 +02:00
|
|
|
if closedChan.Capacity != chanValue {
|
2017-03-08 23:30:00 +01:00
|
|
|
t.Fatalf("capacity of closed channel doesn't match: "+
|
|
|
|
"expected %v, got %v", chanValue, closedChan.Capacity)
|
|
|
|
}
|
|
|
|
if closedChan.ClosedHeight != blockHeight {
|
|
|
|
t.Fatalf("close height of closed channel doesn't match: "+
|
|
|
|
"expected %v, got %v", blockHeight, closedChan.ClosedHeight)
|
|
|
|
}
|
2017-03-30 03:01:28 +02:00
|
|
|
if closedChan.ChanPoint != *chanUtxo {
|
2017-03-08 23:30:00 +01:00
|
|
|
t.Fatalf("chan point of closed channel doesn't match: "+
|
|
|
|
"expected %v, got %v", chanUtxo, closedChan.ChanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-time.After(time.Second * 5):
|
2017-03-19 19:40:25 +01:00
|
|
|
t.Fatal("notification not sent")
|
2017-03-08 23:30:00 +01:00
|
|
|
}
|
|
|
|
}
|