1
0
Fork 0
mirror of https://github.com/lightningnetwork/lnd.git synced 2025-03-25 16:04:20 +01:00

multi: move many t.Fatalf calls to require.NoError

This commit is contained in:
Tommy Volk 2022-05-05 20:11:50 +00:00
parent 9e6f0ef46b
commit 9a10c80bcb
92 changed files with 1905 additions and 5565 deletions
autopilot
brontide
chainntnfs
chanbackup
channeldb
cluster
contractcourt
discovery
feature
funding
htlcswitch
input
invoices
keychain
lncfg
lnwallet
lnwire
macaroons
netann
peer
pool
routing
server_test.go
shachain
sweep
tlv
tor
watchtower
zpay32

View file

@ -11,6 +11,7 @@ import (
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/wire"
"github.com/stretchr/testify/require"
)
type moreChansResp struct {
@ -160,9 +161,7 @@ func setup(t *testing.T, initialChans []LocalChannel) (*testContext, func()) {
// First, we'll create all the dependencies that we'll need in order to
// create the autopilot agent.
self, err := randKey()
if err != nil {
t.Fatalf("unable to generate key: %v", err)
}
require.NoError(t, err, "unable to generate key")
quit := make(chan struct{})
heuristic := &mockHeuristic{
@ -216,9 +215,7 @@ func setup(t *testing.T, initialChans []LocalChannel) (*testContext, func()) {
}
agent, err := New(testCfg, initialChans)
if err != nil {
t.Fatalf("unable to create agent: %v", err)
}
require.NoError(t, err, "unable to create agent")
ctx.agent = agent
// With the autopilot agent and all its dependencies we'll start the
@ -331,9 +328,7 @@ func TestAgentHeuristicUpdateSignal(t *testing.T) {
defer cleanup()
pub, err := testCtx.graph.addRandNode()
if err != nil {
t.Fatalf("unable to generate key: %v", err)
}
require.NoError(t, err, "unable to generate key")
// We'll send an initial "no" response to advance the agent past its
// initial check.
@ -397,9 +392,7 @@ func TestAgentChannelFailureSignal(t *testing.T) {
testCtx.chanController = &mockFailingChanController{}
node, err := testCtx.graph.addRandNode()
if err != nil {
t.Fatalf("unable to add node: %v", err)
}
require.NoError(t, err, "unable to add node")
// First ensure the agent will attempt to open a new channel. Return
// that we need more channels, and have 5BTC to use.
@ -664,9 +657,7 @@ func TestAgentPendingChannelState(t *testing.T) {
// We'll only return a single directive for a pre-chosen node.
nodeKey, err := testCtx.graph.addRandNode()
if err != nil {
t.Fatalf("unable to generate key: %v", err)
}
require.NoError(t, err, "unable to generate key")
nodeID := NewNodeID(nodeKey)
nodeDirective := &NodeScore{
NodeID: nodeID,
@ -876,9 +867,7 @@ func TestAgentSkipPendingConns(t *testing.T) {
// We'll only return a single directive for a pre-chosen node.
nodeKey, err := testCtx.graph.addRandNode()
if err != nil {
t.Fatalf("unable to generate key: %v", err)
}
require.NoError(t, err, "unable to generate key")
nodeID := NewNodeID(nodeKey)
nodeDirective := &NodeScore{
NodeID: nodeID,
@ -888,9 +877,7 @@ func TestAgentSkipPendingConns(t *testing.T) {
// We'll also add a second node to the graph, to keep the first one
// company.
nodeKey2, err := testCtx.graph.addRandNode()
if err != nil {
t.Fatalf("unable to generate key: %v", err)
}
require.NoError(t, err, "unable to generate key")
nodeID2 := NewNodeID(nodeKey2)
// We'll send an initial "yes" response to advance the agent past its
@ -1062,9 +1049,7 @@ func TestAgentQuitWhenPendingConns(t *testing.T) {
// We'll only return a single directive for a pre-chosen node.
nodeKey, err := testCtx.graph.addRandNode()
if err != nil {
t.Fatalf("unable to generate key: %v", err)
}
require.NoError(t, err, "unable to generate key")
nodeID := NewNodeID(nodeKey)
nodeDirective := &NodeScore{
NodeID: nodeID,

View file

@ -11,6 +11,7 @@ import (
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/stretchr/testify/require"
)
type genGraphFunc func() (testGraph, func(), error)
@ -77,9 +78,7 @@ func TestPrefAttachmentSelectEmptyGraph(t *testing.T) {
// Create a random public key, which we will query to get a score for.
pub, err := randKey()
if err != nil {
t.Fatalf("unable to generate key: %v", err)
}
require.NoError(t, err, "unable to generate key")
nodes := map[NodeID]struct{}{
NewNodeID(pub): {},

View file

@ -14,6 +14,7 @@ import (
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/tor"
"github.com/stretchr/testify/require"
)
type maybeNetConn struct {
@ -103,9 +104,7 @@ func TestConnectionCorrectness(t *testing.T) {
// into local variables. If the initial crypto handshake fails, then
// we'll get a non-nil error here.
localConn, remoteConn, cleanUp, err := establishTestConnection()
if err != nil {
t.Fatalf("unable to establish test connection: %v", err)
}
require.NoError(t, err, "unable to establish test connection")
defer cleanUp()
// Test out some message full-message reads.
@ -155,9 +154,7 @@ func TestConnectionCorrectness(t *testing.T) {
// stalled.
func TestConcurrentHandshakes(t *testing.T) {
listener, netAddr, err := makeListener()
if err != nil {
t.Fatalf("unable to create listener connection: %v", err)
}
require.NoError(t, err, "unable to create listener connection")
defer listener.Close()
const nblocking = 5
@ -194,9 +191,7 @@ func TestConcurrentHandshakes(t *testing.T) {
// Now, construct a new private key and use the brontide dialer to
// connect to the listener.
remotePriv, err := btcec.NewPrivateKey()
if err != nil {
t.Fatalf("unable to generate private key: %v", err)
}
require.NoError(t, err, "unable to generate private key")
remoteKeyECDH := &keychain.PrivKeyECDH{PrivKey: remotePriv}
go func() {
@ -210,9 +205,7 @@ func TestConcurrentHandshakes(t *testing.T) {
// This connection should be accepted without error, as the brontide
// connection should bypass stalled tcp connections.
conn, err := listener.Accept()
if err != nil {
t.Fatalf("unable to accept dial: %v", err)
}
require.NoError(t, err, "unable to accept dial")
defer conn.Close()
result := <-connChan
@ -265,9 +258,7 @@ func TestWriteMessageChunking(t *testing.T) {
// into local variables. If the initial crypto handshake fails, then
// we'll get a non-nil error here.
localConn, remoteConn, cleanUp, err := establishTestConnection()
if err != nil {
t.Fatalf("unable to establish test connection: %v", err)
}
require.NoError(t, err, "unable to establish test connection")
defer cleanUp()
// Attempt to write a message which is over 3x the max allowed payload
@ -322,9 +313,7 @@ func TestBolt0008TestVectors(t *testing.T) {
// vectors at the appendix of BOLT-0008
initiatorKeyBytes, err := hex.DecodeString("1111111111111111111111" +
"111111111111111111111111111111111111111111")
if err != nil {
t.Fatalf("unable to decode hex: %v", err)
}
require.NoError(t, err, "unable to decode hex")
initiatorPriv, _ := btcec.PrivKeyFromBytes(
initiatorKeyBytes,
)
@ -333,9 +322,7 @@ func TestBolt0008TestVectors(t *testing.T) {
// We'll then do the same for the responder.
responderKeyBytes, err := hex.DecodeString("212121212121212121212121" +
"2121212121212121212121212121212121212121")
if err != nil {
t.Fatalf("unable to decode hex: %v", err)
}
require.NoError(t, err, "unable to decode hex")
responderPriv, responderPub := btcec.PrivKeyFromBytes(
responderKeyBytes,
)
@ -382,15 +369,11 @@ func TestBolt0008TestVectors(t *testing.T) {
// the payload return is _exactly_ the same as what's specified within
// the test vectors.
actOne, err := initiator.GenActOne()
if err != nil {
t.Fatalf("unable to generate act one: %v", err)
}
require.NoError(t, err, "unable to generate act one")
expectedActOne, err := hex.DecodeString("00036360e856310ce5d294e" +
"8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df608655115" +
"1f58b8afe6c195782c6a")
if err != nil {
t.Fatalf("unable to parse expected act one: %v", err)
}
require.NoError(t, err, "unable to parse expected act one")
if !bytes.Equal(expectedActOne, actOne[:]) {
t.Fatalf("act one mismatch: expected %x, got %x",
expectedActOne, actOne)
@ -407,15 +390,11 @@ func TestBolt0008TestVectors(t *testing.T) {
// produce the _exact_ same byte stream as advertised within the spec's
// test vectors.
actTwo, err := responder.GenActTwo()
if err != nil {
t.Fatalf("unable to generate act two: %v", err)
}
require.NoError(t, err, "unable to generate act two")
expectedActTwo, err := hex.DecodeString("0002466d7fcae563e5cb09a0" +
"d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac58" +
"3c9ef6eafca3f730ae")
if err != nil {
t.Fatalf("unable to parse expected act two: %v", err)
}
require.NoError(t, err, "unable to parse expected act two")
if !bytes.Equal(expectedActTwo, actTwo[:]) {
t.Fatalf("act two mismatch: expected %x, got %x",
expectedActTwo, actTwo)
@ -430,15 +409,11 @@ func TestBolt0008TestVectors(t *testing.T) {
// At the final step, we'll generate the last act from the initiator
// and once again verify that it properly matches the test vectors.
actThree, err := initiator.GenActThree()
if err != nil {
t.Fatalf("unable to generate act three: %v", err)
}
require.NoError(t, err, "unable to generate act three")
expectedActThree, err := hex.DecodeString("00b9e3a702e93e3a9948c2e" +
"d6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8f" +
"c28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba")
if err != nil {
t.Fatalf("unable to parse expected act three: %v", err)
}
require.NoError(t, err, "unable to parse expected act three")
if !bytes.Equal(expectedActThree, actThree[:]) {
t.Fatalf("act three mismatch: expected %x, got %x",
expectedActThree, actThree)
@ -454,20 +429,14 @@ func TestBolt0008TestVectors(t *testing.T) {
// proper symmetric encryption keys.
sendingKey, err := hex.DecodeString("969ab31b4d288cedf6218839b27a3e2" +
"140827047f2c0f01bf5c04435d43511a9")
if err != nil {
t.Fatalf("unable to parse sending key: %v", err)
}
require.NoError(t, err, "unable to parse sending key")
recvKey, err := hex.DecodeString("bb9020b8965f4df047e07f955f3c4b884" +
"18984aadc5cdb35096b9ea8fa5c3442")
if err != nil {
t.Fatalf("unable to parse receiving key: %v", err)
}
require.NoError(t, err, "unable to parse receiving key")
chainKey, err := hex.DecodeString("919219dbb2920afa8db80f9a51787a840" +
"bcf111ed8d588caf9ab4be716e42b01")
if err != nil {
t.Fatalf("unable to parse chaining key: %v", err)
}
require.NoError(t, err, "unable to parse chaining key")
if !bytes.Equal(initiator.sendCipher.secretKey[:], sendingKey) {
t.Fatalf("sending key mismatch: expected %x, got %x",

View file

@ -15,6 +15,7 @@ import (
"github.com/lightningnetwork/lnd/blockcache"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/stretchr/testify/require"
)
var (
@ -35,20 +36,14 @@ func initHintCache(t *testing.T) *chainntnfs.HeightHintCache {
t.Helper()
tempDir, err := ioutil.TempDir("", "kek")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
db, err := channeldb.Open(tempDir)
if err != nil {
t.Fatalf("unable to create db: %v", err)
}
require.NoError(t, err, "unable to create db")
testCfg := chainntnfs.CacheConfig{
QueryDisable: false,
}
hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db.Backend)
if err != nil {
t.Fatalf("unable to create hint cache: %v", err)
}
require.NoError(t, err, "unable to create hint cache")
return hintCache
}
@ -81,9 +76,7 @@ func syncNotifierWithMiner(t *testing.T, notifier *BitcoindNotifier,
t.Helper()
_, minerHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to retrieve miner's current height: %v", err)
}
require.NoError(t, err, "unable to retrieve miner's current height")
timeout := time.After(10 * time.Second)
for {
@ -139,13 +132,9 @@ func testHistoricalConfDetailsTxIndex(t *testing.T, rpcPolling bool) {
var unknownHash chainhash.Hash
copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32))
unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript)
if err != nil {
t.Fatalf("unable to create conf request: %v", err)
}
require.NoError(t, err, "unable to create conf request")
_, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
switch txStatus {
case chainntnfs.TxNotFoundIndex:
@ -158,22 +147,16 @@ func testHistoricalConfDetailsTxIndex(t *testing.T, rpcPolling bool) {
// Now, we'll create a test transaction, confirm it, and attempt to
// retrieve its confirmation details.
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
if err != nil {
t.Fatalf("unable to create tx: %v", err)
}
require.NoError(t, err, "unable to create tx")
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
t.Fatal(err)
}
confReq, err := chainntnfs.NewConfRequest(txid, pkScript)
if err != nil {
t.Fatalf("unable to create conf request: %v", err)
}
require.NoError(t, err, "unable to create conf request")
// The transaction should be found in the mempool at this point.
_, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
// Since it has yet to be included in a block, it should have been found
// within the mempool.
@ -193,9 +176,7 @@ func testHistoricalConfDetailsTxIndex(t *testing.T, rpcPolling bool) {
syncNotifierWithMiner(t, notifier, miner)
_, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
// Since the backend node's txindex is enabled and the transaction has
// confirmed, we should be able to retrieve it using the txindex.
@ -238,16 +219,12 @@ func testHistoricalConfDetailsNoTxIndex(t *testing.T, rpcpolling bool) {
var unknownHash chainhash.Hash
copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32))
unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript)
if err != nil {
t.Fatalf("unable to create conf request: %v", err)
}
require.NoError(t, err, "unable to create conf request")
broadcastHeight := syncNotifierWithMiner(t, notifier, miner)
_, txStatus, err := notifier.historicalConfDetails(
unknownConfReq, uint32(broadcastHeight), uint32(broadcastHeight),
)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
switch txStatus {
case chainntnfs.TxNotFoundManually:
@ -267,9 +244,7 @@ func testHistoricalConfDetailsNoTxIndex(t *testing.T, rpcpolling bool) {
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
spendTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
spendTxHash, err := miner.Client.SendRawTransaction(spendTx, true)
if err != nil {
t.Fatalf("unable to broadcast tx: %v", err)
}
require.NoError(t, err, "unable to broadcast tx")
if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
@ -280,16 +255,12 @@ func testHistoricalConfDetailsNoTxIndex(t *testing.T, rpcpolling bool) {
// Ensure the notifier and miner are synced to the same height to ensure
// we can find the transaction when manually scanning the chain.
confReq, err := chainntnfs.NewConfRequest(&outpoint.Hash, output.PkScript)
if err != nil {
t.Fatalf("unable to create conf request: %v", err)
}
require.NoError(t, err, "unable to create conf request")
currentHeight := syncNotifierWithMiner(t, notifier, miner)
_, txStatus, err = notifier.historicalConfDetails(
confReq, uint32(broadcastHeight), uint32(currentHeight),
)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
// Since the backend node's txindex is disabled and the transaction has
// confirmed, we should be able to find it by falling back to scanning

View file

@ -13,6 +13,7 @@ import (
"github.com/lightningnetwork/lnd/blockcache"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/stretchr/testify/require"
)
var (
@ -33,20 +34,14 @@ func initHintCache(t *testing.T) *chainntnfs.HeightHintCache {
t.Helper()
tempDir, err := ioutil.TempDir("", "kek")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
db, err := channeldb.Open(tempDir)
if err != nil {
t.Fatalf("unable to create db: %v", err)
}
require.NoError(t, err, "unable to create db")
testCfg := chainntnfs.CacheConfig{
QueryDisable: false,
}
hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db.Backend)
if err != nil {
t.Fatalf("unable to create hint cache: %v", err)
}
require.NoError(t, err, "unable to create hint cache")
return hintCache
}
@ -61,9 +56,7 @@ func setUpNotifier(t *testing.T, h *rpctest.Harness) *BtcdNotifier {
notifier, err := New(
&rpcCfg, chainntnfs.NetParams, hintCache, hintCache, blockCache,
)
if err != nil {
t.Fatalf("unable to create notifier: %v", err)
}
require.NoError(t, err, "unable to create notifier")
if err := notifier.Start(); err != nil {
t.Fatalf("unable to start notifier: %v", err)
}
@ -90,13 +83,9 @@ func TestHistoricalConfDetailsTxIndex(t *testing.T) {
var unknownHash chainhash.Hash
copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32))
unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript)
if err != nil {
t.Fatalf("unable to create conf request: %v", err)
}
require.NoError(t, err, "unable to create conf request")
_, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
switch txStatus {
case chainntnfs.TxNotFoundIndex:
@ -109,22 +98,16 @@ func TestHistoricalConfDetailsTxIndex(t *testing.T) {
// Now, we'll create a test transaction and attempt to retrieve its
// confirmation details.
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness)
if err != nil {
t.Fatalf("unable to create tx: %v", err)
}
require.NoError(t, err, "unable to create tx")
if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil {
t.Fatalf("unable to find tx in the mempool: %v", err)
}
confReq, err := chainntnfs.NewConfRequest(txid, pkScript)
if err != nil {
t.Fatalf("unable to create conf request: %v", err)
}
require.NoError(t, err, "unable to create conf request")
// The transaction should be found in the mempool at this point.
_, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
// Since it has yet to be included in a block, it should have been found
// within the mempool.
@ -142,9 +125,7 @@ func TestHistoricalConfDetailsTxIndex(t *testing.T) {
}
_, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
// Since the backend node's txindex is enabled and the transaction has
// confirmed, we should be able to retrieve it using the txindex.
@ -174,13 +155,9 @@ func TestHistoricalConfDetailsNoTxIndex(t *testing.T) {
var unknownHash chainhash.Hash
copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32))
unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript)
if err != nil {
t.Fatalf("unable to create conf request: %v", err)
}
require.NoError(t, err, "unable to create conf request")
_, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
switch txStatus {
case chainntnfs.TxNotFoundManually:
@ -194,26 +171,18 @@ func TestHistoricalConfDetailsNoTxIndex(t *testing.T) {
// confirmation details. We'll note its broadcast height to use as the
// height hint when manually scanning the chain.
_, currentHeight, err := harness.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to retrieve current height: %v", err)
}
require.NoError(t, err, "unable to retrieve current height")
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness)
if err != nil {
t.Fatalf("unable to create tx: %v", err)
}
require.NoError(t, err, "unable to create tx")
if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil {
t.Fatalf("unable to find tx in the mempool: %v", err)
}
confReq, err := chainntnfs.NewConfRequest(txid, pkScript)
if err != nil {
t.Fatalf("unable to create conf request: %v", err)
}
require.NoError(t, err, "unable to create conf request")
_, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
// Since it has yet to be included in a block, it should have been found
// within the mempool.
@ -231,9 +200,7 @@ func TestHistoricalConfDetailsNoTxIndex(t *testing.T) {
_, txStatus, err = notifier.historicalConfDetails(
confReq, uint32(currentHeight), uint32(currentHeight)+1,
)
if err != nil {
t.Fatalf("unable to retrieve historical conf details: %v", err)
}
require.NoError(t, err, "unable to retrieve historical conf details")
// Since the backend node's txindex is disabled and the transaction has
// confirmed, we should be able to find it by falling back to scanning

View file

@ -25,17 +25,11 @@ func initHintCacheWithConfig(t *testing.T, cfg CacheConfig) *HeightHintCache {
t.Helper()
tempDir, err := ioutil.TempDir("", "kek")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
db, err := channeldb.Open(tempDir)
if err != nil {
t.Fatalf("unable to create db: %v", err)
}
require.NoError(t, err, "unable to create db")
hintCache, err := NewHeightHintCache(cfg, db.Backend)
if err != nil {
t.Fatalf("unable to create hint cache: %v", err)
}
require.NoError(t, err, "unable to create hint cache")
return hintCache
}
@ -69,9 +63,7 @@ func TestHeightHintCacheConfirms(t *testing.T) {
}
err = hintCache.CommitConfirmHint(height, confRequests...)
if err != nil {
t.Fatalf("unable to add entries to cache: %v", err)
}
require.NoError(t, err, "unable to add entries to cache")
// With the hashes committed, we'll now query the cache to ensure that
// we're able to properly retrieve the confirm hints.
@ -130,9 +122,7 @@ func TestHeightHintCacheSpends(t *testing.T) {
}
err = hintCache.CommitSpendHint(height, spendRequests...)
if err != nil {
t.Fatalf("unable to add entries to cache: %v", err)
}
require.NoError(t, err, "unable to add entries to cache")
// With the outpoints committed, we'll now query the cache to ensure
// that we're able to properly retrieve the confirm hints.

View file

@ -39,17 +39,13 @@ func testSingleConfirmationNotification(miner *rpctest.Harness,
// We're spending from a coinbase output here, so we use the dedicated
// function.
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
if err != nil {
t.Fatalf("unable to create test tx: %v", err)
}
require.NoError(t, err, "unable to create test tx")
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
_, currentHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Now that we have a txid, register a confirmation notification with
// the chainntfn source.
@ -64,16 +60,12 @@ func testSingleConfirmationNotification(miner *rpctest.Harness,
txid, pkScript, numConfs, uint32(currentHeight),
)
}
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
// Now generate a single block, the transaction should be included which
// should trigger a notification event.
blockHash, err := miner.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate single block: %v", err)
}
require.NoError(t, err, "unable to generate single block")
select {
case confInfo := <-confIntent.Confirmed:
@ -113,17 +105,13 @@ func testMultiConfirmationNotification(miner *rpctest.Harness,
// Again, we'll begin by creating a fresh transaction, so we can obtain
// a fresh txid.
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
if err != nil {
t.Fatalf("unable to create test addr: %v", err)
}
require.NoError(t, err, "unable to create test addr")
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
_, currentHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
numConfs := uint32(6)
var confIntent *chainntnfs.ConfirmationEvent
@ -136,9 +124,7 @@ func testMultiConfirmationNotification(miner *rpctest.Harness,
txid, pkScript, numConfs, uint32(currentHeight),
)
}
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
// Now generate a six blocks. The transaction should be included in the
// first block, which will be built upon by the other 5 blocks.
@ -167,9 +153,7 @@ func testBatchConfirmationNotification(miner *rpctest.Harness,
confIntents := make([]*chainntnfs.ConfirmationEvent, len(confSpread))
_, currentHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Create a new txid spending miner coins for each confirmation entry
// in confSpread, we collect each conf intent into a slice so we can
@ -279,9 +263,7 @@ func testSpendNotification(miner *rpctest.Harness,
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
_, currentHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Now that we have an output index and the pkScript, register for a
// spentness notification for the newly created output with multiple
@ -312,9 +294,7 @@ func testSpendNotification(miner *rpctest.Harness,
// Broadcast our spending transaction.
spenderSha, err := miner.Client.SendRawTransaction(spendingTx, true)
if err != nil {
t.Fatalf("unable to broadcast tx: %v", err)
}
require.NoError(t, err, "unable to broadcast tx")
if err := chainntnfs.WaitForMempoolTx(miner, spenderSha); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
@ -354,9 +334,7 @@ func testSpendNotification(miner *rpctest.Harness,
outpoint, output.PkScript, uint32(currentHeight),
)
}
if err != nil {
t.Fatalf("unable to register for spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register for spend ntfn")
select {
case <-spentIntent.Spend:
@ -373,9 +351,7 @@ func testSpendNotification(miner *rpctest.Harness,
}
_, currentHeight, err = miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
for _, c := range spendClients {
select {
@ -464,9 +440,7 @@ func testMultiClientConfirmationNotification(miner *rpctest.Harness,
// We'd like to test the case of a multiple clients registered to
// receive a confirmation notification for the same transaction.
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
if err != nil {
t.Fatalf("unable to create test tx: %v", err)
}
require.NoError(t, err, "unable to create test tx")
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
@ -478,9 +452,7 @@ func testMultiClientConfirmationNotification(miner *rpctest.Harness,
)
_, currentHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Register for a conf notification for the above generated txid with
// numConfsClients distinct clients.
@ -535,9 +507,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
// spending from a coinbase output here, so we use the dedicated
// function.
txid3, pkScript3, err := chainntnfs.GetTestTxidAndScript(miner)
if err != nil {
t.Fatalf("unable to create test tx: %v", err)
}
require.NoError(t, err, "unable to create test tx")
if err := chainntnfs.WaitForMempoolTx(miner, txid3); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
@ -548,36 +518,26 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
// that the TXID hasn't already been included in the chain, otherwise the
// notification will never be sent.
_, err = miner.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
txid1, pkScript1, err := chainntnfs.GetTestTxidAndScript(miner)
if err != nil {
t.Fatalf("unable to create test tx: %v", err)
}
require.NoError(t, err, "unable to create test tx")
if err := chainntnfs.WaitForMempoolTx(miner, txid1); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
txid2, pkScript2, err := chainntnfs.GetTestTxidAndScript(miner)
if err != nil {
t.Fatalf("unable to create test tx: %v", err)
}
require.NoError(t, err, "unable to create test tx")
if err := chainntnfs.WaitForMempoolTx(miner, txid2); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
_, currentHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Now generate another block containing txs 1 & 2.
blockHash, err := miner.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
// Register a confirmation notification with the chainntfn source for tx2,
// which is included in the last block. The height hint is the height before
@ -593,9 +553,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
txid1, pkScript1, 1, uint32(currentHeight),
)
}
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
select {
case confInfo := <-ntfn1.Confirmed:
@ -639,15 +597,11 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
txid2, pkScript2, 3, uint32(currentHeight),
)
}
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
// Fully confirm tx3.
_, err = miner.Client.Generate(2)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
select {
case <-ntfn2.Confirmed:
@ -674,9 +628,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
txid3, pkScript3, 1, uint32(currentHeight-1),
)
}
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
// We'll also register for a confirmation notification with the pkscript
// of a different transaction. This notification shouldn't fire since we
@ -685,9 +637,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
ntfn4, err = notifier.RegisterConfirmationsNtfn(
txid3, pkScript2, 1, uint32(currentHeight-1),
)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
select {
case <-ntfn3.Confirmed:
@ -725,17 +675,13 @@ func testLazyNtfnConsumer(miner *rpctest.Harness,
// Create a transaction to be notified about. We'll register for
// notifications on this transaction but won't be prompt in checking them
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
if err != nil {
t.Fatalf("unable to create test tx: %v", err)
}
require.NoError(t, err, "unable to create test tx")
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
_, currentHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
numConfs := uint32(3)
@ -755,9 +701,7 @@ func testLazyNtfnConsumer(miner *rpctest.Harness,
txid, pkScript, numConfs, uint32(currentHeight),
)
}
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
// Generate another 2 blocks, this should dispatch the confirm notification
if _, err := miner.Client.Generate(2); err != nil {
@ -768,17 +712,13 @@ func testLazyNtfnConsumer(miner *rpctest.Harness,
// if the first transaction has confirmed doesn't mean that we shouldn't
// be able to see if this transaction confirms first
txid, pkScript, err = chainntnfs.GetTestTxidAndScript(miner)
if err != nil {
t.Fatalf("unable to create test tx: %v", err)
}
require.NoError(t, err, "unable to create test tx")
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
_, currentHeight, err = miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
numConfs = 1
var secondConfIntent *chainntnfs.ConfirmationEvent
@ -791,9 +731,7 @@ func testLazyNtfnConsumer(miner *rpctest.Harness,
txid, pkScript, numConfs, uint32(currentHeight),
)
}
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
if _, err := miner.Client.Generate(1); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
@ -829,16 +767,12 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
_, heightHint, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// We'll then spend this output and broadcast the spend transaction.
spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
spenderSha, err := miner.Client.SendRawTransaction(spendingTx, true)
if err != nil {
t.Fatalf("unable to broadcast tx: %v", err)
}
require.NoError(t, err, "unable to broadcast tx")
if err := chainntnfs.WaitForMempoolTx(miner, spenderSha); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
@ -846,18 +780,14 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
// We create an epoch client we can use to make sure the notifier is
// caught up to the mining node's chain.
epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
if err != nil {
t.Fatalf("unable to register for block epoch: %v", err)
}
require.NoError(t, err, "unable to register for block epoch")
// Now we mine an additional block, which should include our spend.
if _, err := miner.Client.Generate(1); err != nil {
t.Fatalf("unable to generate single block: %v", err)
}
_, spendHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// checkSpends registers two clients to be notified of a spend that has
// already happened. The notifier should dispatch a spend notification
@ -948,9 +878,7 @@ func testCancelSpendNtfn(node *rpctest.Harness,
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, node)
_, currentHeight, err := node.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Create two clients that each registered to the spend notification.
// We'll cancel the notification for the first client and leave the
@ -984,9 +912,7 @@ func testCancelSpendNtfn(node *rpctest.Harness,
// Broadcast our spending transaction.
spenderSha, err := node.Client.SendRawTransaction(spendingTx, true)
if err != nil {
t.Fatalf("unable to broadcast tx: %v", err)
}
require.NoError(t, err, "unable to broadcast tx")
if err := chainntnfs.WaitForMempoolTx(node, spenderSha); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
@ -1092,9 +1018,7 @@ func testReorgConf(miner *rpctest.Harness,
miner2, err := rpctest.New(
chainntnfs.NetParams, nil, []string{"--txindex"}, "",
)
if err != nil {
t.Fatalf("unable to create mining node: %v", err)
}
require.NoError(t, err, "unable to create mining node")
if err := miner2.SetUp(false, 0); err != nil {
t.Fatalf("unable to set up mining node: %v", err)
}
@ -1129,22 +1053,16 @@ func testReorgConf(miner *rpctest.Harness,
// We disconnect the two nodes, such that we can start mining on them
// individually without the other one learning about the new blocks.
err = miner.Client.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
if err != nil {
t.Fatalf("unable to remove node: %v", err)
}
require.NoError(t, err, "unable to remove node")
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
if err != nil {
t.Fatalf("unable to create test tx: %v", err)
}
require.NoError(t, err, "unable to create test tx")
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
_, currentHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Now that we have a txid, register a confirmation notification with
// the chainntfn source.
@ -1159,15 +1077,11 @@ func testReorgConf(miner *rpctest.Harness,
txid, pkScript, numConfs, uint32(currentHeight),
)
}
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
// Now generate a single block, the transaction should be included.
_, err = miner.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate single block: %v", err)
}
require.NoError(t, err, "unable to generate single block")
// Transaction only has one confirmation, and the notification is registered
// with 2 confirmations, so we should not be notified yet.
@ -1219,22 +1133,16 @@ func testReorgConf(miner *rpctest.Harness,
// Now confirm the transaction on the longest chain and verify that we
// receive the notification.
tx, err := miner.Client.GetRawTransaction(txid)
if err != nil {
t.Fatalf("unable to get raw tx: %v", err)
}
require.NoError(t, err, "unable to get raw tx")
txid, err = miner2.Client.SendRawTransaction(tx.MsgTx(), false)
if err != nil {
t.Fatalf("unable to get send tx: %v", err)
}
require.NoError(t, err, "unable to get send tx")
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
_, err = miner.Client.Generate(3)
if err != nil {
t.Fatalf("unable to generate single block: %v", err)
}
require.NoError(t, err, "unable to generate single block")
select {
case <-confIntent.Confirmed:
@ -1253,9 +1161,7 @@ func testReorgSpend(miner *rpctest.Harness,
// notification for it.
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
_, heightHint, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to retrieve current height: %v", err)
}
require.NoError(t, err, "unable to retrieve current height")
var spendIntent *chainntnfs.SpendEvent
if scriptDispatch {
@ -1267,17 +1173,13 @@ func testReorgSpend(miner *rpctest.Harness,
outpoint, output.PkScript, uint32(heightHint),
)
}
if err != nil {
t.Fatalf("unable to register for spend: %v", err)
}
require.NoError(t, err, "unable to register for spend")
// Set up a new miner that we can use to cause a reorg.
miner2, err := rpctest.New(
chainntnfs.NetParams, nil, []string{"--txindex"}, "",
)
if err != nil {
t.Fatalf("unable to create mining node: %v", err)
}
require.NoError(t, err, "unable to create mining node")
if err := miner2.SetUp(false, 0); err != nil {
t.Fatalf("unable to set up mining node: %v", err)
}
@ -1294,13 +1196,9 @@ func testReorgSpend(miner *rpctest.Harness,
t.Fatalf("unable to sync miners: %v", err)
}
_, minerHeight1, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get miner1's current height: %v", err)
}
require.NoError(t, err, "unable to get miner1's current height")
_, minerHeight2, err := miner2.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get miner2's current height: %v", err)
}
require.NoError(t, err, "unable to get miner2's current height")
if minerHeight1 != minerHeight2 {
t.Fatalf("expected both miners to be on the same height: "+
"%v vs %v", minerHeight1, minerHeight2)
@ -1309,17 +1207,13 @@ func testReorgSpend(miner *rpctest.Harness,
// We disconnect the two nodes, such that we can start mining on them
// individually without the other one learning about the new blocks.
err = miner.Client.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
if err != nil {
t.Fatalf("unable to disconnect miners: %v", err)
}
require.NoError(t, err, "unable to disconnect miners")
// Craft the spending transaction for the outpoint created above and
// confirm it under the chain of the original miner.
spendTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
spendTxHash, err := miner.Client.SendRawTransaction(spendTx, true)
if err != nil {
t.Fatalf("unable to broadcast spend tx: %v", err)
}
require.NoError(t, err, "unable to broadcast spend tx")
if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil {
t.Fatalf("spend tx not relayed to miner: %v", err)
}
@ -1328,9 +1222,7 @@ func testReorgSpend(miner *rpctest.Harness,
t.Fatalf("unable to generate blocks: %v", err)
}
_, spendHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get spend height: %v", err)
}
require.NoError(t, err, "unable to get spend height")
// We should see a spend notification dispatched with the correct spend
// details.
@ -1356,13 +1248,9 @@ func testReorgSpend(miner *rpctest.Harness,
t.Fatalf("unable to sync miners: %v", err)
}
_, minerHeight1, err = miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get miner1's current height: %v", err)
}
require.NoError(t, err, "unable to get miner1's current height")
_, minerHeight2, err = miner2.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get miner2's current height: %v", err)
}
require.NoError(t, err, "unable to get miner2's current height")
if minerHeight1 != minerHeight2 {
t.Fatalf("expected both miners to be on the same height: "+
"%v vs %v", minerHeight1, minerHeight2)
@ -1391,9 +1279,7 @@ func testReorgSpend(miner *rpctest.Harness,
t.Fatalf("unable to generate single block: %v", err)
}
_, spendHeight, err = miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to retrieve current height: %v", err)
}
require.NoError(t, err, "unable to retrieve current height")
select {
case spendDetails := <-spendIntent.Spend:
@ -1416,9 +1302,7 @@ func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness,
var wg sync.WaitGroup
outdatedHash, outdatedHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to retrieve current height: %v", err)
}
require.NoError(t, err, "unable to retrieve current height")
// This function is used by UnsafeStart to ensure all notifications
// are fully drained before clients register for notifications.
@ -1432,9 +1316,7 @@ func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness,
// client may not receive all historical notifications.
bestHeight := outdatedHeight + numBlocks
err = notifier.UnsafeStart(bestHeight, nil, bestHeight, generateBlocks)
if err != nil {
t.Fatalf("unable to unsafe start the notifier: %v", err)
}
require.NoError(t, err, "unable to unsafe start the notifier")
defer notifier.Stop()
// Create numClients clients whose best known block is 10 blocks behind
@ -1523,9 +1405,7 @@ func testCatchUpOnMissedBlocks(miner *rpctest.Harness,
err = notifier.UnsafeStart(
bestHeight, nil, bestHeight+numBlocks, generateBlocks,
)
if err != nil {
t.Fatalf("unable to unsafe start the notifier: %v", err)
}
require.NoError(t, err, "unable to unsafe start the notifier")
defer notifier.Stop()
// Create numClients clients who will listen for block notifications.
@ -1622,9 +1502,7 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
miner2, err := rpctest.New(
chainntnfs.NetParams, nil, []string{"--txindex"}, "",
)
if err != nil {
t.Fatalf("unable to create mining node: %v", err)
}
require.NoError(t, err, "unable to create mining node")
if err := miner2.SetUp(false, 0); err != nil {
t.Fatalf("unable to set up mining node: %v", err)
}
@ -1659,22 +1537,16 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
// We disconnect the two nodes, such that we can start mining on them
// individually without the other one learning about the new blocks.
err = miner1.Client.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
if err != nil {
t.Fatalf("unable to remove node: %v", err)
}
require.NoError(t, err, "unable to remove node")
// Now mine on each chain separately
blocks, err := miner1.Client.Generate(numBlocks)
if err != nil {
t.Fatalf("unable to generate single block: %v", err)
}
require.NoError(t, err, "unable to generate single block")
// We generate an extra block on miner 2's chain to ensure it is the
// longer chain.
_, err = miner2.Client.Generate(numBlocks + 1)
if err != nil {
t.Fatalf("unable to generate single block: %v", err)
}
require.NoError(t, err, "unable to generate single block")
// Sync the two chains to ensure they will sync to miner2's chain.
if err := rpctest.ConnectNode(miner1, miner2); err != nil {
@ -1717,9 +1589,7 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
err = notifier.UnsafeStart(
nodeHeight1+numBlocks, blocks[numBlocks-1], syncHeight, nil,
)
if err != nil {
t.Fatalf("Unable to unsafe start the notifier: %v", err)
}
require.NoError(t, err, "Unable to unsafe start the notifier")
defer notifier.Stop()
// Create numClients clients who will listen for block notifications.
@ -1745,9 +1615,7 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
// Generate a single block, which should trigger the notifier to rewind
// to the common ancestor and dispatch notifications from there.
_, err = miner2.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate single block: %v", err)
}
require.NoError(t, err, "unable to generate single block")
// If the chain backend to the notifier stores information about reorged
// blocks, the notifier is able to rewind the chain to the common

View file

@ -27,6 +27,7 @@ import (
"github.com/lightninglabs/neutrino"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
var (
@ -127,14 +128,10 @@ func CreateSpendableOutput(t *testing.T,
// Create a transaction that only has one output, the one destined for
// the recipient.
pkScript, privKey, err := randPubKeyHashScript()
if err != nil {
t.Fatalf("unable to generate pkScript: %v", err)
}
require.NoError(t, err, "unable to generate pkScript")
output := &wire.TxOut{Value: 2e8, PkScript: pkScript}
txid, err := miner.SendOutputsWithoutChange([]*wire.TxOut{output}, 10)
if err != nil {
t.Fatalf("unable to create tx: %v", err)
}
require.NoError(t, err, "unable to create tx")
// Mine the transaction to mark the output as spendable.
if err := WaitForMempoolTx(miner, txid); err != nil {
@ -161,9 +158,7 @@ func CreateSpendTx(t *testing.T, prevOutPoint *wire.OutPoint,
spendingTx, 0, prevOutput.PkScript, txscript.SigHashAll,
privKey, true,
)
if err != nil {
t.Fatalf("unable to sign tx: %v", err)
}
require.NoError(t, err, "unable to sign tx")
spendingTx.TxIn[0].SignatureScript = sigScript
return spendingTx
@ -181,9 +176,7 @@ func NewMiner(t *testing.T, extraArgs []string, createChain bool,
extraArgs = append(extraArgs, trickle)
node, err := rpctest.New(NetParams, nil, extraArgs, "")
if err != nil {
t.Fatalf("unable to create backend node: %v", err)
}
require.NoError(t, err, "unable to create backend node")
if err := node.SetUp(createChain, spendableOutputs); err != nil {
node.TearDown()
t.Fatalf("unable to set up backend node: %v", err)
@ -204,9 +197,7 @@ func NewBitcoindBackend(t *testing.T, minerAddr string, txindex,
t.Helper()
tempBitcoindDir, err := ioutil.TempDir("", "bitcoind")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
rpcPort := rand.Intn(65536-1024) + 1024
zmqBlockHost := "ipc:///" + tempBitcoindDir + "/blocks.socket"
@ -289,9 +280,7 @@ func NewNeutrinoBackend(t *testing.T, minerAddr string) (*neutrino.ChainService,
t.Helper()
spvDir, err := ioutil.TempDir("", "neutrino")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
dbName := filepath.Join(spvDir, "neutrino.db")
spvDatabase, err := walletdb.Create(

View file

@ -224,17 +224,13 @@ func TestTxNotifierFutureConfDispatch(t *testing.T) {
tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx1Hash := tx1.TxHash()
ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
tx2 := wire.MsgTx{Version: 2}
tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx2Hash := tx2.TxHash()
ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
// We should not receive any notifications from both transactions
// since they have not been included in a block yet.
@ -261,9 +257,7 @@ func TestTxNotifierFutureConfDispatch(t *testing.T) {
})
err = n.ConnectTip(block1.Hash(), 11, block1.Transactions())
if err != nil {
t.Fatalf("Failed to connect block: %v", err)
}
require.NoError(t, err, "Failed to connect block")
if err := n.NotifyHeight(11); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -323,9 +317,7 @@ func TestTxNotifierFutureConfDispatch(t *testing.T) {
// This should confirm tx2.
block2 := btcutil.NewBlock(&wire.MsgBlock{})
err = n.ConnectTip(block2.Hash(), 12, block2.Transactions())
if err != nil {
t.Fatalf("Failed to connect block: %v", err)
}
require.NoError(t, err, "Failed to connect block")
if err := n.NotifyHeight(12); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -396,15 +388,11 @@ func TestTxNotifierHistoricalConfDispatch(t *testing.T) {
// starting height so that they are confirmed once registering them.
tx1Hash := tx1.TxHash()
ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
tx2Hash := tx2.TxHash()
ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
// Update tx1 with its confirmation details. We should only receive one
// update since it only requires one confirmation and it already met it.
@ -415,9 +403,7 @@ func TestTxNotifierHistoricalConfDispatch(t *testing.T) {
Tx: &tx1,
}
err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, &txConf1)
if err != nil {
t.Fatalf("unable to update conf details: %v", err)
}
require.NoError(t, err, "unable to update conf details")
select {
case numConfsLeft := <-ntfn1.Event.Updates:
const expected = 0
@ -449,9 +435,7 @@ func TestTxNotifierHistoricalConfDispatch(t *testing.T) {
Tx: &tx2,
}
err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, &txConf2)
if err != nil {
t.Fatalf("unable to update conf details: %v", err)
}
require.NoError(t, err, "unable to update conf details")
select {
case numConfsLeft := <-ntfn2.Event.Updates:
const expected = 1
@ -477,9 +461,7 @@ func TestTxNotifierHistoricalConfDispatch(t *testing.T) {
})
err = n.ConnectTip(block.Hash(), 11, block.Transactions())
if err != nil {
t.Fatalf("Failed to connect block: %v", err)
}
require.NoError(t, err, "Failed to connect block")
if err := n.NotifyHeight(11); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -532,9 +514,7 @@ func TestTxNotifierFutureSpendDispatch(t *testing.T) {
// outpoint.
op := wire.OutPoint{Index: 1}
ntfn, err := n.RegisterSpend(&op, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
// We should not receive a notification as the outpoint has not been
// spent yet.
@ -557,9 +537,7 @@ func TestTxNotifierFutureSpendDispatch(t *testing.T) {
Transactions: []*wire.MsgTx{spendTx},
})
err = n.ConnectTip(block.Hash(), 11, block.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(11); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -592,9 +570,7 @@ func TestTxNotifierFutureSpendDispatch(t *testing.T) {
Transactions: []*wire.MsgTx{spendOfSpend},
})
err = n.ConnectTip(block.Hash(), 12, block.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(12); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -626,21 +602,15 @@ func TestTxNotifierFutureConfDispatchReuseSafe(t *testing.T) {
tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx1Hash := tx1.TxHash()
ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
scriptNtfn1, err := n.RegisterConf(nil, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
block := btcutil.NewBlock(&wire.MsgBlock{
Transactions: []*wire.MsgTx{&tx1},
})
currentBlock++
err = n.ConnectTip(block.Hash(), currentBlock, block.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(currentBlock); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -685,21 +655,15 @@ func TestTxNotifierFutureConfDispatchReuseSafe(t *testing.T) {
tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx2Hash := tx2.TxHash()
ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
scriptNtfn2, err := n.RegisterConf(nil, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
block2 := btcutil.NewBlock(&wire.MsgBlock{
Transactions: []*wire.MsgTx{&tx2},
})
currentBlock++
err = n.ConnectTip(block2.Hash(), currentBlock, block2.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(currentBlock); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -800,9 +764,7 @@ func TestTxNotifierHistoricalSpendDispatch(t *testing.T) {
// We'll register for a spend notification of the outpoint and ensure
// that a notification isn't dispatched.
ntfn, err := n.RegisterSpend(&spentOutpoint, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
select {
case <-ntfn.Event.Spend:
@ -817,9 +779,7 @@ func TestTxNotifierHistoricalSpendDispatch(t *testing.T) {
err = n.UpdateSpendDetails(
ntfn.HistoricalDispatch.SpendRequest, expectedSpendDetails,
)
if err != nil {
t.Fatalf("unable to update spend details: %v", err)
}
require.NoError(t, err, "unable to update spend details")
// Now that we have the spending details, we should receive a spend
// notification. We'll ensure that the details match as intended.
@ -842,9 +802,7 @@ func TestTxNotifierHistoricalSpendDispatch(t *testing.T) {
Transactions: []*wire.MsgTx{spendOfSpend},
})
err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(startingHeight + 1); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -872,9 +830,7 @@ func TestTxNotifierMultipleHistoricalConfRescans(t *testing.T) {
// request a historical confirmation rescan as it does not have a
// historical view of the chain.
ntfn1, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
if ntfn1.HistoricalDispatch == nil {
t.Fatal("expected to receive historical dispatch request")
}
@ -883,9 +839,7 @@ func TestTxNotifierMultipleHistoricalConfRescans(t *testing.T) {
// transaction. This should not request a historical confirmation rescan
// since the first one is still pending.
ntfn2, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
if ntfn2.HistoricalDispatch != nil {
t.Fatal("received unexpected historical rescan request")
}
@ -898,14 +852,10 @@ func TestTxNotifierMultipleHistoricalConfRescans(t *testing.T) {
BlockHeight: startingHeight - 1,
}
err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, confDetails)
if err != nil {
t.Fatalf("unable to update conf details: %v", err)
}
require.NoError(t, err, "unable to update conf details")
ntfn3, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
if ntfn3.HistoricalDispatch != nil {
t.Fatal("received unexpected historical rescan request")
}
@ -928,9 +878,7 @@ func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) {
// the chain.
op := wire.OutPoint{Index: 1}
ntfn1, err := n.RegisterSpend(&op, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
if ntfn1.HistoricalDispatch == nil {
t.Fatal("expected to receive historical dispatch request")
}
@ -939,9 +887,7 @@ func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) {
// should not request a historical spend rescan since the first one is
// still pending.
ntfn2, err := n.RegisterSpend(&op, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
if ntfn2.HistoricalDispatch != nil {
t.Fatal("received unexpected historical rescan request")
}
@ -960,14 +906,10 @@ func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) {
err = n.UpdateSpendDetails(
ntfn1.HistoricalDispatch.SpendRequest, spendDetails,
)
if err != nil {
t.Fatalf("unable to update spend details: %v", err)
}
require.NoError(t, err, "unable to update spend details")
ntfn3, err := n.RegisterSpend(&op, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
if ntfn3.HistoricalDispatch != nil {
t.Fatal("received unexpected historical rescan request")
}
@ -1026,9 +968,7 @@ func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) {
err := n.UpdateConfDetails(
confNtfns[0].HistoricalDispatch.ConfRequest, expectedConfDetails,
)
if err != nil {
t.Fatalf("unable to update conf details: %v", err)
}
require.NoError(t, err, "unable to update conf details")
// With the confirmation details retrieved, each client should now have
// been notified of the confirmation.
@ -1047,9 +987,7 @@ func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) {
// see a historical rescan request and the confirmation notification
// should come through immediately.
extraConfNtfn, err := n.RegisterConf(&txid, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register conf ntfn: %v", err)
}
require.NoError(t, err, "unable to register conf ntfn")
if extraConfNtfn.HistoricalDispatch != nil {
t.Fatal("received unexpected historical rescan request")
}
@ -1095,9 +1033,7 @@ func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) {
err = n.UpdateSpendDetails(
spendNtfns[0].HistoricalDispatch.SpendRequest, expectedSpendDetails,
)
if err != nil {
t.Fatalf("unable to update spend details: %v", err)
}
require.NoError(t, err, "unable to update spend details")
// With the spend details retrieved, each client should now have been
// notified of the spend.
@ -1116,9 +1052,7 @@ func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) {
// should not see a historical rescan request and the spend notification
// should come through immediately.
extraSpendNtfn, err := n.RegisterSpend(&op, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
if extraSpendNtfn.HistoricalDispatch != nil {
t.Fatal("received unexpected historical rescan request")
}
@ -1146,27 +1080,19 @@ func TestTxNotifierCancelConf(t *testing.T) {
tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx1Hash := tx1.TxHash()
ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
tx2 := wire.NewMsgTx(2)
tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx2Hash := tx2.TxHash()
ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
ntfn3, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
// This request will have a three block num confs.
ntfn4, err := n.RegisterConf(&tx2Hash, testRawScript, 3, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
// Extend the chain with a block that will confirm both transactions.
// This will queue confirmation notifications to dispatch once their
@ -1185,9 +1111,7 @@ func TestTxNotifierCancelConf(t *testing.T) {
ntfn2.Event.Cancel()
err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
// Cancel the third notification before notifying to ensure its queued
// confirmation notification gets removed as well.
@ -1232,9 +1156,7 @@ func TestTxNotifierCancelConf(t *testing.T) {
})
err = n.ConnectTip(block1.Hash(), startingHeight+2, block1.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(startingHeight + 2); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
@ -1266,9 +1188,7 @@ func TestTxNotifierCancelConf(t *testing.T) {
})
err = n.ConnectTip(block2.Hash(), startingHeight+3, block2.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(startingHeight + 3); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
@ -1291,15 +1211,11 @@ func TestTxNotifierCancelSpend(t *testing.T) {
// canceled.
op1 := wire.OutPoint{Index: 1}
ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
op2 := wire.OutPoint{Index: 2}
ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
// Construct the spending details of the outpoint and create a dummy
// block containing it.
@ -1326,9 +1242,7 @@ func TestTxNotifierCancelSpend(t *testing.T) {
n.CancelSpend(ntfn2.HistoricalDispatch.SpendRequest, 2)
err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(startingHeight + 1); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -1377,42 +1291,30 @@ func TestTxNotifierConfReorg(t *testing.T) {
tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx1Hash := tx1.TxHash()
ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, nil)
if err != nil {
t.Fatalf("unable to deliver conf details: %v", err)
}
require.NoError(t, err, "unable to deliver conf details")
// Tx 2 will be confirmed in block 10 and requires 1 conf.
tx2 := wire.MsgTx{Version: 2}
tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx2Hash := tx2.TxHash()
ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, nil)
if err != nil {
t.Fatalf("unable to deliver conf details: %v", err)
}
require.NoError(t, err, "unable to deliver conf details")
// Tx 3 will be confirmed in block 10 and requires 2 confs.
tx3 := wire.MsgTx{Version: 3}
tx3.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx3Hash := tx3.TxHash()
ntfn3, err := n.RegisterConf(&tx3Hash, testRawScript, tx3NumConfs, 1)
if err != nil {
t.Fatalf("unable to register ntfn: %v", err)
}
require.NoError(t, err, "unable to register ntfn")
err = n.UpdateConfDetails(ntfn3.HistoricalDispatch.ConfRequest, nil)
if err != nil {
t.Fatalf("unable to deliver conf details: %v", err)
}
require.NoError(t, err, "unable to deliver conf details")
// Sync chain to block 10. Txs 1 & 2 should be confirmed.
block1 := btcutil.NewBlock(&wire.MsgBlock{
@ -1555,17 +1457,13 @@ func TestTxNotifierConfReorg(t *testing.T) {
block4 := btcutil.NewBlock(&wire.MsgBlock{})
err = n.ConnectTip(block3.Hash(), 12, block3.Transactions())
if err != nil {
t.Fatalf("Failed to connect block: %v", err)
}
require.NoError(t, err, "Failed to connect block")
if err := n.NotifyHeight(12); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
err = n.ConnectTip(block4.Hash(), 13, block4.Transactions())
if err != nil {
t.Fatalf("Failed to connect block: %v", err)
}
require.NoError(t, err, "Failed to connect block")
if err := n.NotifyHeight(13); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -1692,14 +1590,10 @@ func TestTxNotifierSpendReorg(t *testing.T) {
// We'll register for a spend notification for each outpoint above.
ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
// We'll extend the chain by connecting a new block at tip. This block
// will only contain the spending transaction of the first outpoint.
@ -1707,9 +1601,7 @@ func TestTxNotifierSpendReorg(t *testing.T) {
Transactions: []*wire.MsgTx{spendTx1},
})
err = n.ConnectTip(block1.Hash(), startingHeight+1, block1.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(startingHeight + 1); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -1737,9 +1629,7 @@ func TestTxNotifierSpendReorg(t *testing.T) {
Transactions: []*wire.MsgTx{spendTx2},
})
err = n.ConnectTip(block2.Hash(), startingHeight+2, block2.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(startingHeight + 2); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -1793,9 +1683,7 @@ func TestTxNotifierSpendReorg(t *testing.T) {
err = n.ConnectTip(
emptyBlock.Hash(), startingHeight+2, emptyBlock.Transactions(),
)
if err != nil {
t.Fatalf("unable to disconnect block: %v", err)
}
require.NoError(t, err, "unable to disconnect block")
if err := n.NotifyHeight(startingHeight + 2); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -1818,9 +1706,7 @@ func TestTxNotifierSpendReorg(t *testing.T) {
err = n.ConnectTip(
block2.Hash(), startingHeight+3, block2.Transactions(),
)
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(startingHeight + 3); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -1882,9 +1768,7 @@ func TestTxNotifierSpendReorgMissed(t *testing.T) {
Transactions: []*wire.MsgTx{spendTx},
})
err := n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(startingHeight + 1); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -1892,9 +1776,7 @@ func TestTxNotifierSpendReorgMissed(t *testing.T) {
// We register for the spend now and will not get a spend notification
// until we call UpdateSpendDetails.
ntfn, err := n.RegisterSpend(&op, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend: %v", err)
}
require.NoError(t, err, "unable to register spend")
// Assert that the HistoricalDispatch variable is non-nil. We'll use
// the SpendRequest member to update the spend details.
@ -1962,17 +1844,13 @@ func TestTxNotifierConfirmHintCache(t *testing.T) {
tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx1Hash := tx1.TxHash()
ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register tx1: %v", err)
}
require.NoError(t, err, "unable to register tx1")
tx2 := wire.MsgTx{Version: 2}
tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript})
tx2Hash := tx2.TxHash()
ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 2, 1)
if err != nil {
t.Fatalf("unable to register tx2: %v", err)
}
require.NoError(t, err, "unable to register tx2")
// Both transactions should not have a height hint set, as RegisterConf
// should not alter the cache state.
@ -1998,9 +1876,7 @@ func TestTxNotifierConfirmHintCache(t *testing.T) {
})
err = n.ConnectTip(block1.Hash(), txDummyHeight, block1.Transactions())
if err != nil {
t.Fatalf("Failed to connect block: %v", err)
}
require.NoError(t, err, "Failed to connect block")
if err := n.NotifyHeight(txDummyHeight); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -2026,13 +1902,9 @@ func TestTxNotifierConfirmHintCache(t *testing.T) {
// Now, update the conf details reporting that the neither txn was found
// in the historical dispatch.
err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, nil)
if err != nil {
t.Fatalf("unable to update conf details: %v", err)
}
require.NoError(t, err, "unable to update conf details")
err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, nil)
if err != nil {
t.Fatalf("unable to update conf details: %v", err)
}
require.NoError(t, err, "unable to update conf details")
// We'll create another block that will include the first transaction
// and extend the chain.
@ -2041,9 +1913,7 @@ func TestTxNotifierConfirmHintCache(t *testing.T) {
})
err = n.ConnectTip(block2.Hash(), tx1Height, block2.Transactions())
if err != nil {
t.Fatalf("Failed to connect block: %v", err)
}
require.NoError(t, err, "Failed to connect block")
if err := n.NotifyHeight(tx1Height); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -2052,18 +1922,14 @@ func TestTxNotifierConfirmHintCache(t *testing.T) {
// they should have their height hints updated to the latest block
// height.
hint, err = hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest)
if err != nil {
t.Fatalf("unable to query for hint: %v", err)
}
require.NoError(t, err, "unable to query for hint")
if hint != tx1Height {
t.Fatalf("expected hint %d, got %d",
tx1Height, hint)
}
hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest)
if err != nil {
t.Fatalf("unable to query for hint: %v", err)
}
require.NoError(t, err, "unable to query for hint")
if hint != tx1Height {
t.Fatalf("expected hint %d, got %d",
tx2Height, hint)
@ -2076,18 +1942,14 @@ func TestTxNotifierConfirmHintCache(t *testing.T) {
})
err = n.ConnectTip(block3.Hash(), tx2Height, block3.Transactions())
if err != nil {
t.Fatalf("Failed to connect block: %v", err)
}
require.NoError(t, err, "Failed to connect block")
if err := n.NotifyHeight(tx2Height); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
// The height hint for the first transaction should remain the same.
hint, err = hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest)
if err != nil {
t.Fatalf("unable to query for hint: %v", err)
}
require.NoError(t, err, "unable to query for hint")
if hint != tx1Height {
t.Fatalf("expected hint %d, got %d",
tx1Height, hint)
@ -2096,9 +1958,7 @@ func TestTxNotifierConfirmHintCache(t *testing.T) {
// The height hint for the second transaction should now be updated to
// reflect its confirmation.
hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest)
if err != nil {
t.Fatalf("unable to query for hint: %v", err)
}
require.NoError(t, err, "unable to query for hint")
if hint != tx2Height {
t.Fatalf("expected hint %d, got %d",
tx2Height, hint)
@ -2113,9 +1973,7 @@ func TestTxNotifierConfirmHintCache(t *testing.T) {
// This should update the second transaction's height hint within the
// cache to the previous height.
hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest)
if err != nil {
t.Fatalf("unable to query for hint: %v", err)
}
require.NoError(t, err, "unable to query for hint")
if hint != tx1Height {
t.Fatalf("expected hint %d, got %d",
tx1Height, hint)
@ -2124,9 +1982,7 @@ func TestTxNotifierConfirmHintCache(t *testing.T) {
// The first transaction's height hint should remain at the original
// confirmation height.
hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest)
if err != nil {
t.Fatalf("unable to query for hint: %v", err)
}
require.NoError(t, err, "unable to query for hint")
if hint != tx1Height {
t.Fatalf("expected hint %d, got %d",
tx1Height, hint)
@ -2158,14 +2014,10 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
// Create two test outpoints and register them for spend notifications.
op1 := wire.OutPoint{Index: 1}
ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend for op1: %v", err)
}
require.NoError(t, err, "unable to register spend for op1")
op2 := wire.OutPoint{Index: 2}
ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend for op2: %v", err)
}
require.NoError(t, err, "unable to register spend for op2")
// Both outpoints should not have a spend hint set upon registration, as
// we must first determine whether they have already been spent in the
@ -2188,9 +2040,7 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
err = n.ConnectTip(
emptyBlock.Hash(), dummyHeight, emptyBlock.Transactions(),
)
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(dummyHeight); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -2215,13 +2065,9 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
// calling UpdateSpendDetails. This should allow their spend hints to be
// updated upon every block connected/disconnected.
err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil)
if err != nil {
t.Fatalf("unable to update spend details: %v", err)
}
require.NoError(t, err, "unable to update spend details")
err = n.UpdateSpendDetails(ntfn2.HistoricalDispatch.SpendRequest, nil)
if err != nil {
t.Fatalf("unable to update spend details: %v", err)
}
require.NoError(t, err, "unable to update spend details")
// We'll create a new block that only contains the spending transaction
// of the first outpoint.
@ -2234,9 +2080,7 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
Transactions: []*wire.MsgTx{spendTx1},
})
err = n.ConnectTip(block1.Hash(), op1Height, block1.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(op1Height); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -2245,16 +2089,12 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
// the new block being connected due to the first outpoint being spent
// at this height, and the second outpoint still being unspent.
op1Hint, err := hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
if err != nil {
t.Fatalf("unable to query for spend hint of op1: %v", err)
}
require.NoError(t, err, "unable to query for spend hint of op1")
if op1Hint != op1Height {
t.Fatalf("expected hint %d, got %d", op1Height, op1Hint)
}
op2Hint, err := hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest)
if err != nil {
t.Fatalf("unable to query for spend hint of op2: %v", err)
}
require.NoError(t, err, "unable to query for spend hint of op2")
if op2Hint != op1Height {
t.Fatalf("expected hint %d, got %d", op1Height, op2Hint)
}
@ -2269,9 +2109,7 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
Transactions: []*wire.MsgTx{spendTx2},
})
err = n.ConnectTip(block2.Hash(), op2Height, block2.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(op2Height); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -2280,16 +2118,12 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
// being spent within the new block. The first outpoint's spend hint
// should remain the same as it's already been spent before.
op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
if err != nil {
t.Fatalf("unable to query for spend hint of op1: %v", err)
}
require.NoError(t, err, "unable to query for spend hint of op1")
if op1Hint != op1Height {
t.Fatalf("expected hint %d, got %d", op1Height, op1Hint)
}
op2Hint, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest)
if err != nil {
t.Fatalf("unable to query for spend hint of op2: %v", err)
}
require.NoError(t, err, "unable to query for spend hint of op2")
if op2Hint != op2Height {
t.Fatalf("expected hint %d, got %d", op2Height, op2Hint)
}
@ -2305,16 +2139,12 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
// included in within the chain. The first outpoint's spend hint should
// remain the same.
op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
if err != nil {
t.Fatalf("unable to query for spend hint of op1: %v", err)
}
require.NoError(t, err, "unable to query for spend hint of op1")
if op1Hint != op1Height {
t.Fatalf("expected hint %d, got %d", op1Height, op1Hint)
}
op2Hint, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest)
if err != nil {
t.Fatalf("unable to query for spend hint of op2: %v", err)
}
require.NoError(t, err, "unable to query for spend hint of op2")
if op2Hint != op1Height {
t.Fatalf("expected hint %d, got %d", op1Height, op2Hint)
}
@ -2340,9 +2170,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
// Create a test outpoint and register it for spend notifications.
op1 := wire.OutPoint{Index: 1}
ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend for op1: %v", err)
}
require.NoError(t, err, "unable to register spend for op1")
// A historical rescan should be initiated from the height hint to the
// current height.
@ -2369,9 +2197,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
err = n.ConnectTip(
emptyBlock.Hash(), height, emptyBlock.Transactions(),
)
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(height); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -2426,9 +2252,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
op1Hint, err := hintCache.QuerySpendHint(
ntfn1.HistoricalDispatch.SpendRequest,
)
if err != nil {
t.Fatalf("unable to query for spend hint of op1: %v", err)
}
require.NoError(t, err, "unable to query for spend hint of op1")
if op1Hint != spendHeight {
t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint)
}
@ -2445,16 +2269,12 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
// tip while the rescan was in progress, the height hint should not be
// updated to the latest height, but stay at the spend height.
err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil)
if err != nil {
t.Fatalf("unable to update spend details: %v", err)
}
require.NoError(t, err, "unable to update spend details")
op1Hint, err = hintCache.QuerySpendHint(
ntfn1.HistoricalDispatch.SpendRequest,
)
if err != nil {
t.Fatalf("unable to query for spend hint of op1: %v", err)
}
require.NoError(t, err, "unable to query for spend hint of op1")
if op1Hint != spendHeight {
t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint)
}
@ -2471,9 +2291,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
Transactions: []*wire.MsgTx{spendTx2},
})
err = n.ConnectTip(block2.Hash(), height, block2.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(height); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -2481,9 +2299,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
// The outpoint's spend hint should remain the same as it's already
// been spent before.
op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
if err != nil {
t.Fatalf("unable to query for spend hint of op1: %v", err)
}
require.NoError(t, err, "unable to query for spend hint of op1")
if op1Hint != spendHeight {
t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint)
}
@ -2516,9 +2332,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
// Finally, check that the height hint is still there, unchanged.
op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest)
if err != nil {
t.Fatalf("unable to query for spend hint of op1: %v", err)
}
require.NoError(t, err, "unable to query for spend hint of op1")
if op1Hint != spendHeight {
t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint)
}
@ -2537,13 +2351,9 @@ func TestTxNotifierNtfnDone(t *testing.T) {
// We'll start by creating two notification requests: one confirmation
// and one spend.
confNtfn, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register conf ntfn: %v", err)
}
require.NoError(t, err, "unable to register conf ntfn")
spendNtfn, err := n.RegisterSpend(&chainntnfs.ZeroOutPoint, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend: %v", err)
}
require.NoError(t, err, "unable to register spend")
// We'll create two transactions that will satisfy the notification
// requests above and include them in the next block of the chain.
@ -2559,9 +2369,7 @@ func TestTxNotifierNtfnDone(t *testing.T) {
})
err = n.ConnectTip(block.Hash(), 11, block.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(11); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -2611,9 +2419,7 @@ func TestTxNotifierNtfnDone(t *testing.T) {
// We'll reconnect the block that satisfies both of these requests.
// We should see notifications dispatched for both once again.
err = n.ConnectTip(block.Hash(), 11, block.Transactions())
if err != nil {
t.Fatalf("unable to connect block: %v", err)
}
require.NoError(t, err, "unable to connect block")
if err := n.NotifyHeight(11); err != nil {
t.Fatalf("unable to dispatch notifications: %v", err)
}
@ -2667,13 +2473,9 @@ func TestTxNotifierTearDown(t *testing.T) {
// To begin the test, we'll register for a confirmation and spend
// notification.
confNtfn, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1)
if err != nil {
t.Fatalf("unable to register conf ntfn: %v", err)
}
require.NoError(t, err, "unable to register conf ntfn")
spendNtfn, err := n.RegisterSpend(&chainntnfs.ZeroOutPoint, testRawScript, 1)
if err != nil {
t.Fatalf("unable to register spend ntfn: %v", err)
}
require.NoError(t, err, "unable to register spend ntfn")
// With the notifications registered, we'll now tear down the notifier.
// The notification channels should be closed for notifications, whether

View file

@ -9,6 +9,7 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/stretchr/testify/require"
)
type mockChannelSource struct {
@ -86,13 +87,9 @@ func TestFetchBackupForChan(t *testing.T) {
// First, we'll make two channels, only one of them will have all the
// information we need to construct set of backups for them.
randomChan1, err := genRandomOpenChannelShell()
if err != nil {
t.Fatalf("unable to generate chan: %v", err)
}
require.NoError(t, err, "unable to generate chan")
randomChan2, err := genRandomOpenChannelShell()
if err != nil {
t.Fatalf("unable to generate chan: %v", err)
}
require.NoError(t, err, "unable to generate chan")
chanSource := newMockChannelSource()
chanSource.chans[randomChan1.FundingOutpoint] = randomChan1
@ -152,13 +149,9 @@ func TestFetchStaticChanBackups(t *testing.T) {
// channel source.
const numChans = 2
randomChan1, err := genRandomOpenChannelShell()
if err != nil {
t.Fatalf("unable to generate chan: %v", err)
}
require.NoError(t, err, "unable to generate chan")
randomChan2, err := genRandomOpenChannelShell()
if err != nil {
t.Fatalf("unable to generate chan: %v", err)
}
require.NoError(t, err, "unable to generate chan")
chanSource := newMockChannelSource()
chanSource.chans[randomChan1.FundingOutpoint] = randomChan1
@ -170,9 +163,7 @@ func TestFetchStaticChanBackups(t *testing.T) {
// of backups for all the channels. This should succeed, as all items
// are populated within the channel source.
backups, err := FetchStaticChanBackups(chanSource, chanSource)
if err != nil {
t.Fatalf("unable to create chan back ups: %v", err)
}
require.NoError(t, err, "unable to create chan back ups")
if len(backups) != numChans {
t.Fatalf("expected %v chans, instead got %v", numChans,

View file

@ -8,6 +8,8 @@ import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
func makeFakePackedMulti() (PackedMulti, error) {
@ -25,9 +27,7 @@ func assertBackupMatches(t *testing.T, filePath string,
t.Helper()
packedBackup, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fatalf("unable to test file: %v", err)
}
require.NoError(t, err, "unable to test file")
if !bytes.Equal(packedBackup, currentBackup) {
t.Fatalf("backups don't match after first swap: "+
@ -53,9 +53,7 @@ func TestUpdateAndSwap(t *testing.T) {
t.Parallel()
tempTestDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("unable to make temp dir: %v", err)
}
require.NoError(t, err, "unable to make temp dir")
defer os.Remove(tempTestDir)
testCases := []struct {
@ -193,9 +191,7 @@ func TestExtractMulti(t *testing.T) {
// First, as prep, we'll create a single chan backup, then pack that
// fully into a multi backup.
channel, err := genRandomOpenChannelShell()
if err != nil {
t.Fatalf("unable to gen chan: %v", err)
}
require.NoError(t, err, "unable to gen chan")
singleBackup := NewSingle(channel, nil)
@ -204,24 +200,18 @@ func TestExtractMulti(t *testing.T) {
StaticBackups: []Single{singleBackup},
}
err = unpackedMulti.PackToWriter(&b, keyRing)
if err != nil {
t.Fatalf("unable to pack to writer: %v", err)
}
require.NoError(t, err, "unable to pack to writer")
packedMulti := PackedMulti(b.Bytes())
// Finally, we'll make a new temporary file, then write out the packed
// multi directly to to it.
tempFile, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("unable to create temp file: %v", err)
}
require.NoError(t, err, "unable to create temp file")
defer os.Remove(tempFile.Name())
_, err = tempFile.Write(packedMulti)
if err != nil {
t.Fatalf("unable to write temp file: %v", err)
}
require.NoError(t, err, "unable to write temp file")
if err := tempFile.Sync(); err != nil {
t.Fatalf("unable to sync temp file: %v", err)
}

View file

@ -4,6 +4,8 @@ import (
"bytes"
"net"
"testing"
"github.com/stretchr/testify/require"
)
// TestMultiPackUnpack...
@ -126,9 +128,7 @@ func TestPackedMultiUnpack(t *testing.T) {
// First, we'll make a new unpacked multi with a random channel.
testChannel, err := genRandomOpenChannelShell()
if err != nil {
t.Fatalf("unable to gen random channel: %v", err)
}
require.NoError(t, err, "unable to gen random channel")
var multi Multi
multi.StaticBackups = append(
multi.StaticBackups, NewSingle(testChannel, nil),
@ -143,9 +143,7 @@ func TestPackedMultiUnpack(t *testing.T) {
// We should be able to properly unpack this typed packed multi.
packedMulti := PackedMulti(b.Bytes())
unpackedMulti, err := packedMulti.Unpack(keyRing)
if err != nil {
t.Fatalf("unable to unpack multi: %v", err)
}
require.NoError(t, err, "unable to unpack multi")
// Finally, the versions should match, and the unpacked singles also
// identical.

View file

@ -7,6 +7,7 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/keychain"
"github.com/stretchr/testify/require"
)
type mockSwapper struct {
@ -157,9 +158,7 @@ func TestSubSwapperIdempotentStartStop(t *testing.T) {
swapper := newMockSwapper(keyRing)
subSwapper, err := NewSubSwapper(nil, &chanNotifier, keyRing, swapper)
if err != nil {
t.Fatalf("unable to init subSwapper: %v", err)
}
require.NoError(t, err, "unable to init subSwapper")
if err := subSwapper.Start(); err != nil {
t.Fatalf("unable to start swapper: %v", err)
@ -226,9 +225,7 @@ func TestSubSwapperUpdater(t *testing.T) {
subSwapper, err := NewSubSwapper(
initialChanSet, chanNotifier, keyRing, swapper,
)
if err != nil {
t.Fatalf("unable to make swapper: %v", err)
}
require.NoError(t, err, "unable to make swapper")
if err := subSwapper.Start(); err != nil {
t.Fatalf("unable to start sub swapper: %v", err)
}
@ -241,9 +238,7 @@ func TestSubSwapperUpdater(t *testing.T) {
// Now that the sub-swapper is active, we'll notify to add a brand new
// channel to the channel state.
newChannel, err := genRandomOpenChannelShell()
if err != nil {
t.Fatalf("unable to create new chan: %v", err)
}
require.NoError(t, err, "unable to create new chan")
// With the new channel created, we'll send a new update to the main
// goroutine telling it about this new channel.

View file

@ -7,6 +7,7 @@ import (
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/stretchr/testify/require"
)
type mockChannelRestorer struct {
@ -108,9 +109,7 @@ func TestUnpackAndRecoverSingles(t *testing.T) {
err = UnpackAndRecoverSingles(
packedBackups, keyRing, &chanRestorer, &peerConnector,
)
if err != nil {
t.Fatalf("unable to recover chans: %v", err)
}
require.NoError(t, err, "unable to recover chans")
// Both the restorer, and connector should have been called 10 times,
// once for each backup.
@ -204,9 +203,7 @@ func TestUnpackAndRecoverMulti(t *testing.T) {
err = UnpackAndRecoverMulti(
packedMulti, keyRing, &chanRestorer, &peerConnector,
)
if err != nil {
t.Fatalf("unable to recover chans: %v", err)
}
require.NoError(t, err, "unable to recover chans")
// Both the restorer, and connector should have been called 10 times,
// once for each backup.

View file

@ -16,6 +16,7 @@ import (
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/shachain"
"github.com/stretchr/testify/require"
)
var (
@ -202,9 +203,7 @@ func TestSinglePackUnpack(t *testing.T) {
// contains all the information we need to create a static channel
// backup.
channel, err := genRandomOpenChannelShell()
if err != nil {
t.Fatalf("unable to gen open channel: %v", err)
}
require.NoError(t, err, "unable to gen open channel")
singleChanBackup := NewSingle(channel, []net.Addr{addr1, addr2})
@ -340,9 +339,7 @@ func TestPackedSinglesUnpack(t *testing.T) {
// With all singles packed, we'll create the grouped type and attempt
// to Unpack all of them in a single go.
freshSingles, err := PackedSingles(packedSingles).Unpack(keyRing)
if err != nil {
t.Fatalf("unable to unpack singles: %v", err)
}
require.NoError(t, err, "unable to unpack singles")
// The set of freshly unpacked singles should exactly match the initial
// set of singles that we packed before.
@ -386,9 +383,7 @@ func TestSinglePackStaticChanBackups(t *testing.T) {
// Now that we have all of our singles are created, we'll attempt to
// pack them all in a single batch.
packedSingleMap, err := PackStaticChanBackups(unpackedSingles, keyRing)
if err != nil {
t.Fatalf("unable to pack backups: %v", err)
}
require.NoError(t, err, "unable to pack backups")
// With our packed singles obtained, we'll ensure that each of them
// match their unpacked counterparts after they themselves have been
@ -432,9 +427,7 @@ func TestSingleUnconfirmedChannel(t *testing.T) {
// we need to create a static channel backup but simulate an
// unconfirmed channel by setting the block height to 0.
channel, err := genRandomOpenChannelShell()
if err != nil {
t.Fatalf("unable to gen open channel: %v", err)
}
require.NoError(t, err, "unable to gen open channel")
channel.ShortChannelID.BlockHeight = 0
channel.FundingBroadcastHeight = fundingBroadcastHeight
@ -450,9 +443,7 @@ func TestSingleUnconfirmedChannel(t *testing.T) {
}
var unpackedSingle Single
err = unpackedSingle.UnpackFromReader(&b, keyRing)
if err != nil {
t.Fatalf("unable to unpack single: %v", err)
}
require.NoError(t, err, "unable to unpack single")
if unpackedSingle.ShortChannelID.BlockHeight != fundingBroadcastHeight {
t.Fatalf("invalid block height. got %d expected %d.",
unpackedSingle.ShortChannelID.BlockHeight,

View file

@ -203,9 +203,7 @@ func createTestChannel(t *testing.T, cdb *ChannelStateDB,
// Mark the channel as open with the short channel id provided.
err = params.channel.MarkAsOpen(params.channel.ShortChannelID)
if err != nil {
t.Fatalf("unable to mark channel open: %v", err)
}
require.NoError(t, err, "unable to mark channel open")
return params.channel
}
@ -213,9 +211,7 @@ func createTestChannel(t *testing.T, cdb *ChannelStateDB,
func createTestChannelState(t *testing.T, cdb *ChannelStateDB) *OpenChannel {
// Simulate 1000 channel updates.
producer, err := shachain.NewRevocationProducerFromBytes(key[:])
if err != nil {
t.Fatalf("could not get producer: %v", err)
}
require.NoError(t, err, "could not get producer")
store := shachain.NewRevocationStore()
for i := 0; i < 1; i++ {
preImage, err := producer.AtIndex(uint64(i))
@ -351,9 +347,7 @@ func TestOpenChannelPutGetDelete(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -388,9 +382,7 @@ func TestOpenChannelPutGetDelete(t *testing.T) {
)
openChannels, err := cdb.FetchOpenChannels(state.IdentityPub)
if err != nil {
t.Fatalf("unable to fetch open channel: %v", err)
}
require.NoError(t, err, "unable to fetch open channel")
newState := openChannels[0]
@ -405,17 +397,13 @@ func TestOpenChannelPutGetDelete(t *testing.T) {
// next revocation for the state machine. This tests the initial
// post-funding revocation exchange.
nextRevKey, err := btcec.NewPrivateKey()
if err != nil {
t.Fatalf("unable to create new private key: %v", err)
}
require.NoError(t, err, "unable to create new private key")
if err := state.InsertNextRevocation(nextRevKey.PubKey()); err != nil {
t.Fatalf("unable to update revocation: %v", err)
}
openChannels, err = cdb.FetchOpenChannels(state.IdentityPub)
if err != nil {
t.Fatalf("unable to fetch open channel: %v", err)
}
require.NoError(t, err, "unable to fetch open channel")
updatedChan := openChannels[0]
// Ensure that the revocation was set properly.
@ -442,9 +430,7 @@ func TestOpenChannelPutGetDelete(t *testing.T) {
// As the channel is now closed, attempting to fetch all open channels
// for our fake node ID should return an empty slice.
openChans, err := cdb.FetchOpenChannels(state.IdentityPub)
if err != nil {
t.Fatalf("unable to fetch open channels: %v", err)
}
require.NoError(t, err, "unable to fetch open channels")
if len(openChans) != 0 {
t.Fatalf("all channels not deleted, found %v", len(openChans))
}
@ -587,9 +573,7 @@ func TestChannelStateTransition(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -661,15 +645,11 @@ func TestChannelStateTransition(t *testing.T) {
}
err = channel.UpdateCommitment(&commitment, unsignedAckedUpdates)
if err != nil {
t.Fatalf("unable to update commitment: %v", err)
}
require.NoError(t, err, "unable to update commitment")
// Assert that update is correctly written to the database.
dbUnsignedAckedUpdates, err := channel.UnsignedAckedUpdates()
if err != nil {
t.Fatalf("unable to fetch dangling remote updates: %v", err)
}
require.NoError(t, err, "unable to fetch dangling remote updates")
if len(dbUnsignedAckedUpdates) != 1 {
t.Fatalf("unexpected number of dangling remote updates")
}
@ -686,14 +666,10 @@ func TestChannelStateTransition(t *testing.T) {
// commitment transaction along with the modified signature should all
// have been updated.
updatedChannel, err := cdb.FetchOpenChannels(channel.IdentityPub)
if err != nil {
t.Fatalf("unable to fetch updated channel: %v", err)
}
require.NoError(t, err, "unable to fetch updated channel")
assertCommitmentEqual(t, &commitment, &updatedChannel[0].LocalCommitment)
numDiskUpdates, err := updatedChannel[0].CommitmentHeight()
if err != nil {
t.Fatalf("unable to read commitment height from disk: %v", err)
}
require.NoError(t, err, "unable to read commitment height from disk")
if numDiskUpdates != uint64(commitment.CommitHeight) {
t.Fatalf("num disk updates doesn't match: %v vs %v",
numDiskUpdates, commitment.CommitHeight)
@ -757,9 +733,7 @@ func TestChannelStateTransition(t *testing.T) {
// The commitment tip should now match the commitment that we just
// inserted.
diskCommitDiff, err := channel.RemoteCommitChainTip()
if err != nil {
t.Fatalf("unable to fetch commit diff: %v", err)
}
require.NoError(t, err, "unable to fetch commit diff")
if !reflect.DeepEqual(commitDiff, diskCommitDiff) {
t.Fatalf("commit diffs don't match: %v vs %v", spew.Sdump(remoteCommit),
spew.Sdump(diskCommitDiff))
@ -775,9 +749,7 @@ func TestChannelStateTransition(t *testing.T) {
// by the remote party.
channel.RemoteCurrentRevocation = channel.RemoteNextRevocation
newPriv, err := btcec.NewPrivateKey()
if err != nil {
t.Fatalf("unable to generate key: %v", err)
}
require.NoError(t, err, "unable to generate key")
channel.RemoteNextRevocation = newPriv.PubKey()
fwdPkg := NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight,
@ -786,9 +758,7 @@ func TestChannelStateTransition(t *testing.T) {
err = channel.AdvanceCommitChainTail(
fwdPkg, nil, dummyLocalOutputIndex, dummyRemoteOutIndex,
)
if err != nil {
t.Fatalf("unable to append to revocation log: %v", err)
}
require.NoError(t, err, "unable to append to revocation log")
// At this point, the remote commit chain should be nil, and the posted
// remote commitment should match the one we added as a diff above.
@ -801,9 +771,7 @@ func TestChannelStateTransition(t *testing.T) {
diskPrevCommit, _, err := channel.FindPreviousState(
oldRemoteCommit.CommitHeight,
)
if err != nil {
t.Fatalf("unable to fetch past delta: %v", err)
}
require.NoError(t, err, "unable to fetch past delta")
// Check the output indexes are saved as expected.
require.EqualValues(
@ -820,9 +788,7 @@ func TestChannelStateTransition(t *testing.T) {
// The state number recovered from the tail of the revocation log
// should be identical to this current state.
logTailHeight, err := channel.revocationLogTailCommitHeight()
if err != nil {
t.Fatalf("unable to retrieve log: %v", err)
}
require.NoError(t, err, "unable to retrieve log")
if logTailHeight != oldRemoteCommit.CommitHeight {
t.Fatal("update number doesn't match")
}
@ -844,17 +810,13 @@ func TestChannelStateTransition(t *testing.T) {
err = channel.AdvanceCommitChainTail(
fwdPkg, nil, dummyLocalOutputIndex, dummyRemoteOutIndex,
)
if err != nil {
t.Fatalf("unable to append to revocation log: %v", err)
}
require.NoError(t, err, "unable to append to revocation log")
// Once again, fetch the state and ensure it has been properly updated.
prevCommit, _, err := channel.FindPreviousState(
oldRemoteCommit.CommitHeight,
)
if err != nil {
t.Fatalf("unable to fetch past delta: %v", err)
}
require.NoError(t, err, "unable to fetch past delta")
// Check the output indexes are saved as expected.
require.EqualValues(
@ -869,18 +831,14 @@ func TestChannelStateTransition(t *testing.T) {
// Once again, state number recovered from the tail of the revocation
// log should be identical to this current state.
logTailHeight, err = channel.revocationLogTailCommitHeight()
if err != nil {
t.Fatalf("unable to retrieve log: %v", err)
}
require.NoError(t, err, "unable to retrieve log")
if logTailHeight != oldRemoteCommit.CommitHeight {
t.Fatal("update number doesn't match")
}
// The revocation state stored on-disk should now also be identical.
updatedChannel, err = cdb.FetchOpenChannels(channel.IdentityPub)
if err != nil {
t.Fatalf("unable to fetch updated channel: %v", err)
}
require.NoError(t, err, "unable to fetch updated channel")
if !channel.RemoteCurrentRevocation.IsEqual(updatedChannel[0].RemoteCurrentRevocation) {
t.Fatalf("revocation state was not synced")
}
@ -908,9 +866,7 @@ func TestChannelStateTransition(t *testing.T) {
// If we attempt to fetch the target channel again, it shouldn't be
// found.
channels, err := cdb.FetchOpenChannels(channel.IdentityPub)
if err != nil {
t.Fatalf("unable to fetch updated channels: %v", err)
}
require.NoError(t, err, "unable to fetch updated channels")
if len(channels) != 0 {
t.Fatalf("%v channels, found, but none should be",
len(channels))
@ -934,9 +890,7 @@ func TestFetchPendingChannels(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -946,9 +900,7 @@ func TestFetchPendingChannels(t *testing.T) {
createTestChannel(t, cdb, pendingHeightOption(broadcastHeight))
pendingChannels, err := cdb.FetchPendingChannels()
if err != nil {
t.Fatalf("unable to list pending channels: %v", err)
}
require.NoError(t, err, "unable to list pending channels")
if len(pendingChannels) != 1 {
t.Fatalf("incorrect number of pending channels: expecting %v,"+
@ -969,9 +921,7 @@ func TestFetchPendingChannels(t *testing.T) {
TxPosition: 15,
}
err = pendingChannels[0].MarkAsOpen(chanOpenLoc)
if err != nil {
t.Fatalf("unable to mark channel as open: %v", err)
}
require.NoError(t, err, "unable to mark channel as open")
if pendingChannels[0].IsPending {
t.Fatalf("channel marked open should no longer be pending")
@ -986,9 +936,7 @@ func TestFetchPendingChannels(t *testing.T) {
// Next, we'll re-fetch the channel to ensure that the open height was
// properly set.
openChans, err := cdb.FetchAllChannels()
if err != nil {
t.Fatalf("unable to fetch channels: %v", err)
}
require.NoError(t, err, "unable to fetch channels")
if openChans[0].ShortChanID() != chanOpenLoc {
t.Fatalf("channel opening heights don't match: expected %v, "+
"got %v", spew.Sdump(openChans[0].ShortChanID()),
@ -1001,9 +949,7 @@ func TestFetchPendingChannels(t *testing.T) {
}
pendingChannels, err = cdb.FetchPendingChannels()
if err != nil {
t.Fatalf("unable to list pending channels: %v", err)
}
require.NoError(t, err, "unable to list pending channels")
if len(pendingChannels) != 0 {
t.Fatalf("incorrect number of pending channels: expecting %v,"+
@ -1015,9 +961,7 @@ func TestFetchClosedChannels(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -1046,9 +990,7 @@ func TestFetchClosedChannels(t *testing.T) {
// closed. We should get the same result whether querying for pending
// channels only, or not.
pendingClosed, err := cdb.FetchClosedChannels(true)
if err != nil {
t.Fatalf("failed fetching closed channels: %v", err)
}
require.NoError(t, err, "failed fetching closed channels")
if len(pendingClosed) != 1 {
t.Fatalf("incorrect number of pending closed channels: expecting %v,"+
"got %v", 1, len(pendingClosed))
@ -1058,9 +1000,7 @@ func TestFetchClosedChannels(t *testing.T) {
spew.Sdump(summary), spew.Sdump(pendingClosed[0]))
}
closed, err := cdb.FetchClosedChannels(false)
if err != nil {
t.Fatalf("failed fetching all closed channels: %v", err)
}
require.NoError(t, err, "failed fetching all closed channels")
if len(closed) != 1 {
t.Fatalf("incorrect number of closed channels: expecting %v, "+
"got %v", 1, len(closed))
@ -1072,24 +1012,18 @@ func TestFetchClosedChannels(t *testing.T) {
// Mark the channel as fully closed.
err = cdb.MarkChanFullyClosed(&state.FundingOutpoint)
if err != nil {
t.Fatalf("failed fully closing channel: %v", err)
}
require.NoError(t, err, "failed fully closing channel")
// The channel should no longer be considered pending, but should still
// be retrieved when fetching all the closed channels.
closed, err = cdb.FetchClosedChannels(false)
if err != nil {
t.Fatalf("failed fetching closed channels: %v", err)
}
require.NoError(t, err, "failed fetching closed channels")
if len(closed) != 1 {
t.Fatalf("incorrect number of closed channels: expecting %v, "+
"got %v", 1, len(closed))
}
pendingClose, err := cdb.FetchClosedChannels(true)
if err != nil {
t.Fatalf("failed fetching channels pending close: %v", err)
}
require.NoError(t, err, "failed fetching channels pending close")
if len(pendingClose) != 0 {
t.Fatalf("incorrect number of closed channels: expecting %v, "+
"got %v", 0, len(closed))
@ -1108,9 +1042,7 @@ func TestFetchWaitingCloseChannels(t *testing.T) {
// them will have their funding transaction confirmed on-chain, while
// the other one will remain unconfirmed.
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -1172,9 +1104,7 @@ func TestFetchWaitingCloseChannels(t *testing.T) {
// database. We should expect to see both channels above, even if any of
// them haven't had their funding transaction confirm on-chain.
waitingCloseChannels, err := cdb.FetchWaitingCloseChannels()
if err != nil {
t.Fatalf("unable to fetch all waiting close channels: %v", err)
}
require.NoError(t, err, "unable to fetch all waiting close channels")
if len(waitingCloseChannels) != numChannels {
t.Fatalf("expected %d channels waiting to be closed, got %d", 2,
len(waitingCloseChannels))
@ -1225,9 +1155,7 @@ func TestRefreshShortChanID(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -1262,9 +1190,7 @@ func TestRefreshShortChanID(t *testing.T) {
}
err = state.MarkAsOpen(chanOpenLoc)
if err != nil {
t.Fatalf("unable to mark channel open: %v", err)
}
require.NoError(t, err, "unable to mark channel open")
// The short_chan_id of the receiver to MarkAsOpen should reflect the
// open location, but the other pending channel should remain unchanged.
@ -1285,9 +1211,7 @@ func TestRefreshShortChanID(t *testing.T) {
// Now, refresh the short channel ID of the pending channel.
err = pendingChannel.RefreshShortChanID()
if err != nil {
t.Fatalf("unable to refresh short_chan_id: %v", err)
}
require.NoError(t, err, "unable to refresh short_chan_id")
// This should result in both OpenChannel's now having the same
// ShortChanID.
@ -1447,9 +1371,7 @@ func TestCloseChannelStatus(t *testing.T) {
histChan, err := channel.Db.FetchHistoricalChannel(
&channel.FundingOutpoint,
)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
require.NoError(t, err, "unexpected error")
if !histChan.HasChanStatus(ChanStatusRemoteCloseInitiator) {
t.Fatalf("channel should have status")

View file

@ -33,23 +33,17 @@ func TestOpenWithCreate(t *testing.T) {
// First, create a temporary directory to be used for the duration of
// this test.
tempDirName, err := ioutil.TempDir("", "channeldb")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
defer os.RemoveAll(tempDirName)
// Next, open thereby creating channeldb for the first time.
dbPath := filepath.Join(tempDirName, "cdb")
backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb")
if err != nil {
t.Fatalf("unable to get test db backend: %v", err)
}
require.NoError(t, err, "unable to get test db backend")
defer cleanup()
cdb, err := CreateWithBackend(backend)
if err != nil {
t.Fatalf("unable to create channeldb: %v", err)
}
require.NoError(t, err, "unable to create channeldb")
if err := cdb.Close(); err != nil {
t.Fatalf("unable to close channeldb: %v", err)
}
@ -62,9 +56,7 @@ func TestOpenWithCreate(t *testing.T) {
// Now, reopen the same db in dry run migration mode. Since we have not
// applied any migrations, this should ignore the flag and not fail.
cdb, err = Open(dbPath, OptionDryRunMigration(true))
if err != nil {
t.Fatalf("unable to create channeldb: %v", err)
}
require.NoError(t, err, "unable to create channeldb")
if err := cdb.Close(); err != nil {
t.Fatalf("unable to close channeldb: %v", err)
}
@ -79,23 +71,17 @@ func TestWipe(t *testing.T) {
// First, create a temporary directory to be used for the duration of
// this test.
tempDirName, err := ioutil.TempDir("", "channeldb")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
defer os.RemoveAll(tempDirName)
// Next, open thereby creating channeldb for the first time.
dbPath := filepath.Join(tempDirName, "cdb")
backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb")
if err != nil {
t.Fatalf("unable to get test db backend: %v", err)
}
require.NoError(t, err, "unable to get test db backend")
defer cleanup()
fullDB, err := CreateWithBackend(backend)
if err != nil {
t.Fatalf("unable to create channeldb: %v", err)
}
require.NoError(t, err, "unable to create channeldb")
defer fullDB.Close()
if err := fullDB.Wipe(); err != nil {
@ -122,9 +108,7 @@ func TestFetchClosedChannelForID(t *testing.T) {
const numChans = 101
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -195,9 +179,7 @@ func TestAddrsForNode(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
graph := fullDB.ChannelGraph()
@ -206,9 +188,7 @@ func TestAddrsForNode(t *testing.T) {
// node, but this node will only have half the number of addresses it
// usually does.
testNode, err := createTestVertex(fullDB)
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
testNode.Addresses = []net.Addr{testAddr}
if err := graph.SetSourceNode(testNode); err != nil {
t.Fatalf("unable to set source node: %v", err)
@ -217,9 +197,7 @@ func TestAddrsForNode(t *testing.T) {
// Next, we'll make a link node with the same pubkey, but with an
// additional address.
nodePub, err := testNode.PubKey()
if err != nil {
t.Fatalf("unable to recv node pub: %v", err)
}
require.NoError(t, err, "unable to recv node pub")
linkNode := NewLinkNode(
fullDB.channelStateDB.linkNodeDB, wire.MainNet, nodePub,
anotherAddr,
@ -231,9 +209,7 @@ func TestAddrsForNode(t *testing.T) {
// Now that we've created a link node, as well as a vertex for the
// node, we'll query for all its addresses.
nodeAddrs, err := fullDB.AddrsForNode(nodePub)
if err != nil {
t.Fatalf("unable to obtain node addrs: %v", err)
}
require.NoError(t, err, "unable to obtain node addrs")
expectedAddrs := make(map[string]struct{})
expectedAddrs[testAddr.String()] = struct{}{}
@ -257,9 +233,7 @@ func TestFetchChannel(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -269,9 +243,7 @@ func TestFetchChannel(t *testing.T) {
// Next, attempt to fetch the channel by its chan point.
dbChannel, err := cdb.FetchChannel(nil, channelState.FundingOutpoint)
if err != nil {
t.Fatalf("unable to fetch channel: %v", err)
}
require.NoError(t, err, "unable to fetch channel")
// The decoded channel state should be identical to what we stored
// above.
@ -283,9 +255,7 @@ func TestFetchChannel(t *testing.T) {
// If we attempt to query for a non-exist ante channel, then we should
// get an error.
channelState2 := createTestChannelState(t, cdb)
if err != nil {
t.Fatalf("unable to create channel state: %v", err)
}
require.NoError(t, err, "unable to create channel state")
channelState2.FundingOutpoint.Index ^= 1
_, err = cdb.FetchChannel(nil, channelState2.FundingOutpoint)
@ -361,9 +331,7 @@ func TestRestoreChannelShells(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -372,9 +340,7 @@ func TestRestoreChannelShells(t *testing.T) {
// amount of information required for us to initiate the data loss
// protection feature.
channelShell, err := genRandomChannelShell()
if err != nil {
t.Fatalf("unable to gen channel shell: %v", err)
}
require.NoError(t, err, "unable to gen channel shell")
// With the channel shell constructed, we'll now insert it into the
// database with the restoration method.
@ -388,9 +354,7 @@ func TestRestoreChannelShells(t *testing.T) {
// First, we'll attempt to query for all channels that we have with the
// node public key that was restored.
nodeChans, err := cdb.FetchOpenChannels(channelShell.Chan.IdentityPub)
if err != nil {
t.Fatalf("unable find channel: %v", err)
}
require.NoError(t, err, "unable find channel")
// We should now find a single channel from the database.
if len(nodeChans) != 1 {
@ -432,18 +396,14 @@ func TestRestoreChannelShells(t *testing.T) {
// We should also be able to find the channel if we query for it
// directly.
_, err = cdb.FetchChannel(nil, channelShell.Chan.FundingOutpoint)
if err != nil {
t.Fatalf("unable to fetch channel: %v", err)
}
require.NoError(t, err, "unable to fetch channel")
// We should also be able to find the link node that was inserted by
// its public key.
linkNode, err := fullDB.channelStateDB.linkNodeDB.FetchLinkNode(
channelShell.Chan.IdentityPub,
)
if err != nil {
t.Fatalf("unable to fetch link node: %v", err)
}
require.NoError(t, err, "unable to fetch link node")
// The node should have the same address, as specified in the channel
// shell.
@ -461,9 +421,7 @@ func TestAbandonChannel(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -483,9 +441,7 @@ func TestAbandonChannel(t *testing.T) {
// We should now be able to abandon the channel without any errors.
closeHeight := uint32(11)
err = cdb.AbandonChannel(&chanState.FundingOutpoint, closeHeight)
if err != nil {
t.Fatalf("unable to abandon channel: %v", err)
}
require.NoError(t, err, "unable to abandon channel")
// At this point, the channel should no longer be found in the set of
// open channels.
@ -497,16 +453,12 @@ func TestAbandonChannel(t *testing.T) {
// However we should be able to retrieve a close channel summary for
// the channel.
_, err = cdb.FetchClosedChannel(&chanState.FundingOutpoint)
if err != nil {
t.Fatalf("unable to fetch closed channel: %v", err)
}
require.NoError(t, err, "unable to fetch closed channel")
// Finally, if we attempt to abandon the channel again, we should get a
// nil error as the channel has already been abandoned.
err = cdb.AbandonChannel(&chanState.FundingOutpoint, closeHeight)
if err != nil {
t.Fatalf("unable to abandon channel: %v", err)
}
require.NoError(t, err, "unable to abandon channel")
}
// TestFetchChannels tests the filtering of open channels in fetchChannels.
@ -707,9 +659,7 @@ func TestFetchChannels(t *testing.T) {
// TestFetchHistoricalChannel tests lookup of historical channels.
func TestFetchHistoricalChannel(t *testing.T) {
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -737,9 +687,7 @@ func TestFetchHistoricalChannel(t *testing.T) {
}
histChannel, err := cdb.FetchHistoricalChannel(&channel.FundingOutpoint)
if err != nil {
t.Fatalf("unexpected error getting channel: %v", err)
}
require.NoError(t, err, "unexpected error getting channel")
// FetchHistoricalChannel will attach the cdb to channel.Db, we set it
// here so that we can check that all other fields on the channel equal

View file

@ -9,6 +9,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestForwardingLogBasicStorageAndQuery tests that we're able to store and
@ -20,9 +21,7 @@ func TestForwardingLogBasicStorageAndQuery(t *testing.T) {
// forwarding event log that we'll be using for the duration of the
// test.
db, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
defer cleanUp()
log := ForwardingLog{
@ -63,9 +62,7 @@ func TestForwardingLogBasicStorageAndQuery(t *testing.T) {
NumMaxEvents: 1000,
}
timeSlice, err := log.Query(eventQuery)
if err != nil {
t.Fatalf("unable to query for events: %v", err)
}
require.NoError(t, err, "unable to query for events")
// The set of returned events should match identically, as they should
// be returned in sorted order.
@ -93,9 +90,7 @@ func TestForwardingLogQueryOptions(t *testing.T) {
// forwarding event log that we'll be using for the duration of the
// test.
db, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
defer cleanUp()
log := ForwardingLog{
@ -136,9 +131,7 @@ func TestForwardingLogQueryOptions(t *testing.T) {
NumMaxEvents: 10,
}
timeSlice, err := log.Query(eventQuery)
if err != nil {
t.Fatalf("unable to query for events: %v", err)
}
require.NoError(t, err, "unable to query for events")
// We should get exactly 10 events back.
if len(timeSlice.ForwardingEvents) != 10 {
@ -164,9 +157,7 @@ func TestForwardingLogQueryOptions(t *testing.T) {
// more events, that are the last 10 events we wrote.
eventQuery.IndexOffset = 10
timeSlice, err = log.Query(eventQuery)
if err != nil {
t.Fatalf("unable to query for events: %v", err)
}
require.NoError(t, err, "unable to query for events")
// We should get exactly 10 events back once again.
if len(timeSlice.ForwardingEvents) != 10 {
@ -199,9 +190,7 @@ func TestForwardingLogQueryLimit(t *testing.T) {
// forwarding event log that we'll be using for the duration of the
// test.
db, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
defer cleanUp()
log := ForwardingLog{
@ -242,9 +231,7 @@ func TestForwardingLogQueryLimit(t *testing.T) {
NumMaxEvents: 100,
}
timeSlice, err := log.Query(eventQuery)
if err != nil {
t.Fatalf("unable to query for events: %v", err)
}
require.NoError(t, err, "unable to query for events")
// We should get exactly 100 events back.
if len(timeSlice.ForwardingEvents) != 100 {
@ -315,9 +302,7 @@ func TestForwardingLogStoreEvent(t *testing.T) {
// forwarding event log that we'll be using for the duration of the
// test.
db, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
defer cleanUp()
log := ForwardingLog{
@ -362,9 +347,7 @@ func TestForwardingLogStoreEvent(t *testing.T) {
NumMaxEvents: uint32(numEvents * 3),
}
timeSlice, err := log.Query(eventQuery)
if err != nil {
t.Fatalf("unable to query for events: %v", err)
}
require.NoError(t, err, "unable to query for events")
// We should get exactly 40 events back.
if len(timeSlice.ForwardingEvents) != numEvents*2 {

View file

@ -855,9 +855,7 @@ func makeFwdPkgDB(t *testing.T, path string) kvdb.Backend { // nolint:unparam
bdb, err := kvdb.Create(
kvdb.BoltBackendName, path, true, kvdb.DefaultDBTimeout,
)
if err != nil {
t.Fatalf("unable to open boltdb: %v", err)
}
require.NoError(t, err, "unable to open boltdb")
return bdb
}

File diff suppressed because it is too large Load diff

View file

@ -151,16 +151,12 @@ func TestInvoiceWorkflow(t *testing.T) {
func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) {
db, cleanUp, err := MakeTestDB()
defer cleanUp()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
// Create a fake invoice which we'll use several times in the tests
// below.
fakeInvoice, err := randInvoice(10000)
if err != nil {
t.Fatalf("unable to create invoice: %v", err)
}
require.NoError(t, err, "unable to create invoice")
invPayHash := fakeInvoice.Terms.PaymentPreimage.Hash()
// Select the payment hash and payment address we will use to lookup or
@ -216,13 +212,9 @@ func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) {
// SettledDate
payAmt := fakeInvoice.Terms.Value * 2
_, err = db.UpdateInvoice(ref, nil, getUpdateInvoice(payAmt))
if err != nil {
t.Fatalf("unable to settle invoice: %v", err)
}
require.NoError(t, err, "unable to settle invoice")
dbInvoice2, err := db.LookupInvoice(ref)
if err != nil {
t.Fatalf("unable to fetch invoice: %v", err)
}
require.NoError(t, err, "unable to fetch invoice")
if dbInvoice2.State != ContractSettled {
t.Fatalf("invoice should now be settled but isn't")
}
@ -284,9 +276,7 @@ func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) {
}
response, err := db.QueryInvoices(query)
if err != nil {
t.Fatalf("invoice query failed: %v", err)
}
require.NoError(t, err, "invoice query failed")
// The retrieve list of invoices should be identical as since we're
// using big endian, the invoices should be retrieved in ascending
@ -443,9 +433,7 @@ func TestInvoiceCancelSingleHtlc(t *testing.T) {
db, cleanUp, err := MakeTestDB()
defer cleanUp()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
preimage := lntypes.Preimage{1}
paymentHash := preimage.Hash()
@ -479,9 +467,7 @@ func TestInvoiceCancelSingleHtlc(t *testing.T) {
},
}, nil
})
if err != nil {
t.Fatalf("unable to add invoice htlc: %v", err)
}
require.NoError(t, err, "unable to add invoice htlc")
if len(invoice.Htlcs) != 1 {
t.Fatalf("expected the htlc to be added")
}
@ -498,9 +484,7 @@ func TestInvoiceCancelSingleHtlc(t *testing.T) {
},
}, nil
})
if err != nil {
t.Fatalf("unable to cancel htlc: %v", err)
}
require.NoError(t, err, "unable to cancel htlc")
if len(invoice.Htlcs) != 1 {
t.Fatalf("expected the htlc to be present")
}
@ -571,9 +555,7 @@ func TestInvoiceCancelSingleHtlcAMP(t *testing.T) {
SetID: (*SetID)(setID1),
}, nil
})
if err != nil {
t.Fatalf("unable to cancel htlc: %v", err)
}
require.NoError(t, err, "unable to cancel htlc")
freshInvoice, err := db.LookupInvoice(ref)
require.Nil(t, err)
@ -623,9 +605,7 @@ func TestInvoiceCancelSingleHtlcAMP(t *testing.T) {
SetID: (*SetID)(setID2),
}, nil
})
if err != nil {
t.Fatalf("unable to cancel htlc: %v", err)
}
require.NoError(t, err, "unable to cancel htlc")
freshInvoice, err = db.LookupInvoice(ref)
require.Nil(t, err)
@ -653,9 +633,7 @@ func TestInvoiceCancelSingleHtlcAMP(t *testing.T) {
SetID: (*SetID)(setID2),
}, nil
})
if err != nil {
t.Fatalf("unable to cancel htlc: %v", err)
}
require.NoError(t, err, "unable to cancel htlc")
freshInvoice, err = db.LookupInvoice(ref)
require.Nil(t, err)
@ -680,9 +658,7 @@ func TestInvoiceAddTimeSeries(t *testing.T) {
db, cleanUp, err := MakeTestDB(OptionClock(testClock))
defer cleanUp()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
_, err = db.InvoicesAddedSince(0)
require.NoError(t, err)
@ -995,9 +971,7 @@ func TestScanInvoices(t *testing.T) {
db, cleanup, err := MakeTestDB()
defer cleanup()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
var invoices map[lntypes.Hash]*Invoice
callCount := 0
@ -1056,16 +1030,12 @@ func TestDuplicateSettleInvoice(t *testing.T) {
db, cleanUp, err := MakeTestDB(OptionClock(testClock))
defer cleanUp()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
// We'll start out by creating an invoice and writing it to the DB.
amt := lnwire.NewMSatFromSatoshis(1000)
invoice, err := randInvoice(amt)
if err != nil {
t.Fatalf("unable to create invoice: %v", err)
}
require.NoError(t, err, "unable to create invoice")
payHash := invoice.Terms.PaymentPreimage.Hash()
@ -1076,9 +1046,7 @@ func TestDuplicateSettleInvoice(t *testing.T) {
// With the invoice in the DB, we'll now attempt to settle the invoice.
ref := InvoiceRefByHash(payHash)
dbInvoice, err := db.UpdateInvoice(ref, nil, getUpdateInvoice(amt))
if err != nil {
t.Fatalf("unable to settle invoice: %v", err)
}
require.NoError(t, err, "unable to settle invoice")
// We'll update what we expect the settle invoice to be so that our
// comparison below has the correct assumption.
@ -1121,9 +1089,7 @@ func TestQueryInvoices(t *testing.T) {
db, cleanUp, err := MakeTestDB(OptionClock(testClock))
defer cleanUp()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
// To begin the test, we'll add 50 invoices to the database. We'll
// assume that the index of the invoice within the database is the same
@ -1436,9 +1402,7 @@ func TestCustomRecords(t *testing.T) {
db, cleanUp, err := MakeTestDB()
defer cleanUp()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
require.NoError(t, err, "unable to make test db")
preimage := lntypes.Preimage{1}
paymentHash := preimage.Hash()
@ -1477,16 +1441,12 @@ func TestCustomRecords(t *testing.T) {
}, nil
},
)
if err != nil {
t.Fatalf("unable to add invoice htlc: %v", err)
}
require.NoError(t, err, "unable to add invoice htlc")
// Retrieve the invoice from that database and verify that the custom
// records are present.
dbInvoice, err := db.LookupInvoice(ref)
if err != nil {
t.Fatalf("unable to lookup invoice: %v", err)
}
require.NoError(t, err, "unable to lookup invoice")
if len(dbInvoice.Htlcs) != 1 {
t.Fatalf("expected the htlc to be added")

View file

@ -8,6 +8,7 @@ import (
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/stretchr/testify/require"
)
// applyMigration is a helper test function that encapsulates the general steps
@ -425,14 +426,10 @@ func TestMigrationReversion(t *testing.T) {
defer func() {
os.RemoveAll(tempDirName)
}()
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
backend, cleanup, err := kvdb.GetTestBackend(tempDirName, "cdb")
if err != nil {
t.Fatalf("unable to get test db backend: %v", err)
}
require.NoError(t, err, "unable to get test db backend")
cdb, err := CreateWithBackend(backend)
if err != nil {
@ -454,14 +451,10 @@ func TestMigrationReversion(t *testing.T) {
cdb.Close()
cleanup()
if err != nil {
t.Fatalf("unable to increase db version: %v", err)
}
require.NoError(t, err, "unable to increase db version")
backend, cleanup, err = kvdb.GetTestBackend(tempDirName, "cdb")
if err != nil {
t.Fatalf("unable to get test db backend: %v", err)
}
require.NoError(t, err, "unable to get test db backend")
defer cleanup()
_, err = CreateWithBackend(backend)

View file

@ -8,15 +8,14 @@ import (
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/wire"
"github.com/stretchr/testify/require"
)
func TestLinkNodeEncodeDecode(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()
@ -26,13 +25,9 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
_, pub1 := btcec.PrivKeyFromBytes(key[:])
_, pub2 := btcec.PrivKeyFromBytes(rev[:])
addr1, err := net.ResolveTCPAddr("tcp", "10.0.0.1:9000")
if err != nil {
t.Fatalf("unable to create test addr: %v", err)
}
require.NoError(t, err, "unable to create test addr")
addr2, err := net.ResolveTCPAddr("tcp", "10.0.0.2:9000")
if err != nil {
t.Fatalf("unable to create test addr: %v", err)
}
require.NoError(t, err, "unable to create test addr")
// Create two fresh link node instances with the above dummy data, then
// fully sync both instances to disk.
@ -49,9 +44,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
// match the two created above.
originalNodes := []*LinkNode{node2, node1}
linkNodes, err := cdb.linkNodeDB.FetchAllLinkNodes()
if err != nil {
t.Fatalf("unable to fetch nodes: %v", err)
}
require.NoError(t, err, "unable to fetch nodes")
for i, node := range linkNodes {
if originalNodes[i].Network != node.Network {
t.Fatalf("node networks don't match: expected %v, got %v",
@ -85,9 +78,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
// Fetch the same node from the database according to its public key.
node1DB, err := cdb.linkNodeDB.FetchLinkNode(pub1)
if err != nil {
t.Fatalf("unable to find node: %v", err)
}
require.NoError(t, err, "unable to find node")
// Both the last seen timestamp and the list of reachable addresses for
// the node should be updated.
@ -113,9 +104,7 @@ func TestDeleteLinkNode(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
cdb := fullDB.ChannelStateDB()

View file

@ -56,22 +56,16 @@ func TestPaymentControlSwitchFail(t *testing.T) {
db, cleanup, err := MakeTestDB()
defer cleanup()
if err != nil {
t.Fatalf("unable to init db: %v", err)
}
require.NoError(t, err, "unable to init db")
pControl := NewPaymentControl(db)
info, attempt, preimg, err := genInfo()
if err != nil {
t.Fatalf("unable to generate htlc message: %v", err)
}
require.NoError(t, err, "unable to generate htlc message")
// Sends base htlc message which initiate StatusInFlight.
err = pControl.InitPayment(info.PaymentIdentifier, info)
if err != nil {
t.Fatalf("unable to send htlc message: %v", err)
}
require.NoError(t, err, "unable to send htlc message")
assertPaymentIndex(t, pControl, info.PaymentIdentifier)
assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusInFlight)
@ -82,9 +76,7 @@ func TestPaymentControlSwitchFail(t *testing.T) {
// Fail the payment, which should moved it to Failed.
failReason := FailureReasonNoRoute
_, err = pControl.Fail(info.PaymentIdentifier, failReason)
if err != nil {
t.Fatalf("unable to fail payment hash: %v", err)
}
require.NoError(t, err, "unable to fail payment hash")
// Verify the status is indeed Failed.
assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusFailed)
@ -100,9 +92,7 @@ func TestPaymentControlSwitchFail(t *testing.T) {
// Sends the htlc again, which should succeed since the prior payment
// failed.
err = pControl.InitPayment(info.PaymentIdentifier, info)
if err != nil {
t.Fatalf("unable to send htlc message: %v", err)
}
require.NoError(t, err, "unable to send htlc message")
// Check that our index has been updated, and the old index has been
// removed.
@ -118,9 +108,7 @@ func TestPaymentControlSwitchFail(t *testing.T) {
// However, this is not communicated to control tower in the current
// implementation. It only registers the initiation of the attempt.
_, err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt)
if err != nil {
t.Fatalf("unable to register attempt: %v", err)
}
require.NoError(t, err, "unable to register attempt")
htlcReason := HTLCFailUnreadable
_, err = pControl.FailAttempt(
@ -144,9 +132,7 @@ func TestPaymentControlSwitchFail(t *testing.T) {
// Record another attempt.
attempt.AttemptID = 1
_, err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt)
if err != nil {
t.Fatalf("unable to send htlc message: %v", err)
}
require.NoError(t, err, "unable to send htlc message")
assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusInFlight)
htlc = &htlcStatus{
@ -165,9 +151,7 @@ func TestPaymentControlSwitchFail(t *testing.T) {
Preimage: preimg,
},
)
if err != nil {
t.Fatalf("error shouldn't have been received, got: %v", err)
}
require.NoError(t, err, "error shouldn't have been received, got")
if len(payment.HTLCs) != 2 {
t.Fatalf("payment should have two htlcs, got: %d",
@ -204,23 +188,17 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) {
db, cleanup, err := MakeTestDB()
defer cleanup()
if err != nil {
t.Fatalf("unable to init db: %v", err)
}
require.NoError(t, err, "unable to init db")
pControl := NewPaymentControl(db)
info, attempt, preimg, err := genInfo()
if err != nil {
t.Fatalf("unable to generate htlc message: %v", err)
}
require.NoError(t, err, "unable to generate htlc message")
// Sends base htlc message which initiate base status and move it to
// StatusInFlight and verifies that it was changed.
err = pControl.InitPayment(info.PaymentIdentifier, info)
if err != nil {
t.Fatalf("unable to send htlc message: %v", err)
}
require.NoError(t, err, "unable to send htlc message")
assertPaymentIndex(t, pControl, info.PaymentIdentifier)
assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusInFlight)
@ -239,9 +217,7 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) {
// Record an attempt.
_, err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt)
if err != nil {
t.Fatalf("unable to send htlc message: %v", err)
}
require.NoError(t, err, "unable to send htlc message")
assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusInFlight)
htlc := &htlcStatus{
@ -265,9 +241,7 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) {
Preimage: preimg,
},
)
if err != nil {
t.Fatalf("error shouldn't have been received, got: %v", err)
}
require.NoError(t, err, "error shouldn't have been received, got")
assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusSucceeded)
htlc.settle = &preimg
@ -287,16 +261,12 @@ func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) {
db, cleanup, err := MakeTestDB()
defer cleanup()
if err != nil {
t.Fatalf("unable to init db: %v", err)
}
require.NoError(t, err, "unable to init db")
pControl := NewPaymentControl(db)
info, _, preimg, err := genInfo()
if err != nil {
t.Fatalf("unable to generate htlc message: %v", err)
}
require.NoError(t, err, "unable to generate htlc message")
// Attempt to complete the payment should fail.
_, err = pControl.SettleAttempt(
@ -320,16 +290,12 @@ func TestPaymentControlFailsWithoutInFlight(t *testing.T) {
db, cleanup, err := MakeTestDB()
defer cleanup()
if err != nil {
t.Fatalf("unable to init db: %v", err)
}
require.NoError(t, err, "unable to init db")
pControl := NewPaymentControl(db)
info, _, _, err := genInfo()
if err != nil {
t.Fatalf("unable to generate htlc message: %v", err)
}
require.NoError(t, err, "unable to generate htlc message")
// Calling Fail should return an error.
_, err = pControl.Fail(info.PaymentIdentifier, FailureReasonNoRoute)
@ -348,9 +314,7 @@ func TestPaymentControlDeleteNonInFligt(t *testing.T) {
db, cleanup, err := MakeTestDB()
defer cleanup()
if err != nil {
t.Fatalf("unable to init db: %v", err)
}
require.NoError(t, err, "unable to init db")
// Create a sequence number for duplicate payments that will not collide
// with the sequence numbers for the payments we create. These values
@ -963,22 +927,16 @@ func TestPaymentControlMPPRecordValidation(t *testing.T) {
db, cleanup, err := MakeTestDB()
defer cleanup()
if err != nil {
t.Fatalf("unable to init db: %v", err)
}
require.NoError(t, err, "unable to init db")
pControl := NewPaymentControl(db)
info, attempt, _, err := genInfo()
if err != nil {
t.Fatalf("unable to generate htlc message: %v", err)
}
require.NoError(t, err, "unable to generate htlc message")
// Init the payment.
err = pControl.InitPayment(info.PaymentIdentifier, info)
if err != nil {
t.Fatalf("unable to send htlc message: %v", err)
}
require.NoError(t, err, "unable to send htlc message")
// Create three unique attempts we'll use for the test, and
// register them with the payment control. We set each
@ -991,9 +949,7 @@ func TestPaymentControlMPPRecordValidation(t *testing.T) {
)
_, err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt)
if err != nil {
t.Fatalf("unable to send htlc message: %v", err)
}
require.NoError(t, err, "unable to send htlc message")
// Now try to register a non-MPP attempt, which should fail.
b := *attempt
@ -1025,20 +981,14 @@ func TestPaymentControlMPPRecordValidation(t *testing.T) {
// Create and init a new payment. This time we'll check that we cannot
// register an MPP attempt if we already registered a non-MPP one.
info, attempt, _, err = genInfo()
if err != nil {
t.Fatalf("unable to generate htlc message: %v", err)
}
require.NoError(t, err, "unable to generate htlc message")
err = pControl.InitPayment(info.PaymentIdentifier, info)
if err != nil {
t.Fatalf("unable to send htlc message: %v", err)
}
require.NoError(t, err, "unable to send htlc message")
attempt.Route.FinalHop().MPP = nil
_, err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt)
if err != nil {
t.Fatalf("unable to send htlc message: %v", err)
}
require.NoError(t, err, "unable to send htlc message")
// Attempt to register an MPP attempt, which should fail.
b = *attempt

View file

@ -87,9 +87,7 @@ func TestSentPaymentSerialization(t *testing.T) {
}
newCreationInfo, err := deserializePaymentCreationInfo(&b)
if err != nil {
t.Fatalf("unable to deserialize creation info: %v", err)
}
require.NoError(t, err, "unable to deserialize creation info")
if !reflect.DeepEqual(c, newCreationInfo) {
t.Fatalf("Payments do not match after "+
@ -104,9 +102,7 @@ func TestSentPaymentSerialization(t *testing.T) {
}
newWireInfo, err := deserializeHTLCAttemptInfo(&b)
if err != nil {
t.Fatalf("unable to deserialize info: %v", err)
}
require.NoError(t, err, "unable to deserialize info")
newWireInfo.AttemptID = s.AttemptID
// First we verify all the records match up porperly, as they aren't
@ -673,9 +669,7 @@ func appendDuplicatePayment(t *testing.T, db *DB, paymentHash lntypes.Hash,
return nil
}, func() {})
if err != nil {
t.Fatalf("could not create payment: %v", err)
}
require.NoError(t, err, "could not create payment")
}
// putDuplicatePayment creates a duplicate payment in the duplicates bucket

View file

@ -7,6 +7,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
// TestWaitingProofStore tests add/get/remove functions of the waiting proof
@ -15,9 +16,7 @@ func TestWaitingProofStore(t *testing.T) {
t.Parallel()
db, cleanup, err := MakeTestDB()
if err != nil {
t.Fatalf("failed to make test database: %s", err)
}
require.NoError(t, err, "failed to make test database")
defer cleanup()
proof1 := NewWaitingProof(true, &lnwire.AnnounceSignatures{
@ -37,9 +36,7 @@ func TestWaitingProofStore(t *testing.T) {
}
proof2, err := store.Get(proof1.Key())
if err != nil {
t.Fatalf("unable retrieve proof from storage: %v", err)
}
require.NoError(t, err, "unable retrieve proof from storage")
if !reflect.DeepEqual(proof1, proof2) {
t.Fatalf("wrong proof retrieved: expected %v, got %v",
spew.Sdump(proof1), spew.Sdump(proof2))

View file

@ -5,6 +5,7 @@ import (
"testing"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
// TestWitnessCacheSha256Retrieval tests that we're able to add and lookup new
@ -13,9 +14,7 @@ func TestWitnessCacheSha256Retrieval(t *testing.T) {
t.Parallel()
cdb, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
wCache := cdb.NewWitnessCache()
@ -30,9 +29,7 @@ func TestWitnessCacheSha256Retrieval(t *testing.T) {
// First, we'll attempt to add the preimages to the database.
err = wCache.AddSha256Witnesses(preimages...)
if err != nil {
t.Fatalf("unable to add witness: %v", err)
}
require.NoError(t, err, "unable to add witness")
// With the preimages stored, we'll now attempt to look them up.
for i, hash := range hashes {
@ -58,9 +55,7 @@ func TestWitnessCacheSha256Deletion(t *testing.T) {
t.Parallel()
cdb, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
wCache := cdb.NewWitnessCache()
@ -83,9 +78,7 @@ func TestWitnessCacheSha256Deletion(t *testing.T) {
// We'll now delete the first preimage. If we attempt to look it up, we
// should get ErrNoWitnesses.
err = wCache.DeleteSha256Witness(hash1)
if err != nil {
t.Fatalf("unable to delete witness: %v", err)
}
require.NoError(t, err, "unable to delete witness")
_, err = wCache.LookupSha256Witness(hash1)
if err != ErrNoWitnesses {
t.Fatalf("expected ErrNoWitnesses instead got: %v", err)
@ -109,9 +102,7 @@ func TestWitnessCacheUnknownWitness(t *testing.T) {
t.Parallel()
cdb, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
wCache := cdb.NewWitnessCache()
@ -128,9 +119,7 @@ func TestWitnessCacheUnknownWitness(t *testing.T) {
// identically to the insertion via the generalized interface.
func TestAddSha256Witnesses(t *testing.T) {
cdb, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v", err)
}
require.NoError(t, err, "unable to make test database")
defer cleanUp()
wCache := cdb.NewWitnessCache()
@ -152,9 +141,7 @@ func TestAddSha256Witnesses(t *testing.T) {
)
err = wCache.legacyAddWitnesses(Sha256HashWitness, witnesses...)
if err != nil {
t.Fatalf("unable to add witness: %v", err)
}
require.NoError(t, err, "unable to add witness")
for i, hash := range hashes {
preimage := preimages[i]
@ -181,9 +168,7 @@ func TestAddSha256Witnesses(t *testing.T) {
// Now, add the same witnesses using the type-safe interface for
// lntypes.Preimages..
err = wCache.AddSha256Witnesses(preimages...)
if err != nil {
t.Fatalf("unable to add sha256 preimage: %v", err)
}
require.NoError(t, err, "unable to add sha256 preimage")
// Finally, iterate over the keys and assert that the returned witnesses
// match the original witnesses. This asserts that the specialized

View file

@ -42,9 +42,7 @@ func TestEtcdElector(t *testing.T) {
defer guard()
tmpDir, err := ioutil.TempDir("", "etcd")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
etcdCfg, cleanup, err := etcd.NewEmbeddedEtcdInstance(tmpDir, 0, 0, "")
require.NoError(t, err)

View file

@ -711,9 +711,7 @@ func countRetributions(t *testing.T, rs RetributionStorer) int {
}, func() {
count = 0
})
if err != nil {
t.Fatalf("unable to list retributions in db: %v", err)
}
require.NoError(t, err, "unable to list retributions in db")
return count
}
@ -979,9 +977,7 @@ func initBreachedState(t *testing.T) (*BreachArbiter,
// a spend of the funding transaction. Alice's channel will be the on
// observing a breach.
alice, bob, cleanUpChans, err := createInitChannels(1)
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}
require.NoError(t, err, "unable to create test channels")
// Instantiate a breach arbiter to handle the breach of alice's channel.
contractBreaches := make(chan *ContractBreachEvent)
@ -989,9 +985,7 @@ func initBreachedState(t *testing.T) (*BreachArbiter,
brar, cleanUpArb, err := createTestArbiter(
t, contractBreaches, alice.State().Db.GetParentDB(),
)
if err != nil {
t.Fatalf("unable to initialize test breach arbiter: %v", err)
}
require.NoError(t, err, "unable to initialize test breach arbiter")
// Send one HTLC to Bob and perform a state transition to lock it in.
htlcAmount := lnwire.NewMSatFromSatoshis(20000)
@ -1009,9 +1003,7 @@ func initBreachedState(t *testing.T) (*BreachArbiter,
// Generate the force close summary at this point in time, this will
// serve as the old state bob will broadcast.
bobClose, err := bob.ForceClose()
if err != nil {
t.Fatalf("unable to force close bob's channel: %v", err)
}
require.NoError(t, err, "unable to force close bob's channel")
// Now send another HTLC and perform a state transition, this ensures
// Alice is ahead of the state Bob will broadcast.
@ -1166,9 +1158,7 @@ func TestBreachHandoffFail(t *testing.T) {
brar, cleanUpArb, err := createTestArbiter(
t, contractBreaches, alice.State().Db.GetParentDB(),
)
if err != nil {
t.Fatalf("unable to initialize test breach arbiter: %v", err)
}
require.NoError(t, err, "unable to initialize test breach arbiter")
defer cleanUpArb()
// Signal a spend of the funding transaction and wait for the close
@ -1623,9 +1613,7 @@ func testBreachSpends(t *testing.T, test breachTest) {
retribution, err := lnwallet.NewBreachRetribution(
alice.State(), height, 1, forceCloseTx,
)
if err != nil {
t.Fatalf("unable to create breach retribution: %v", err)
}
require.NoError(t, err, "unable to create breach retribution")
processACK := make(chan error)
breach := &ContractBreachEvent{
@ -1664,9 +1652,7 @@ func testBreachSpends(t *testing.T, test breachTest) {
RemoteNextRevocation: state.RemoteNextRevocation,
LocalChanConfig: state.LocalChanCfg,
})
if err != nil {
t.Fatalf("unable to close channel: %v", err)
}
require.NoError(t, err, "unable to close channel")
// After exiting, the breach arbiter should have persisted the
// retribution information and the channel should be shown as pending
@ -1839,9 +1825,7 @@ func TestBreachDelayedJusticeConfirmation(t *testing.T) {
retribution, err := lnwallet.NewBreachRetribution(
alice.State(), height, uint32(blockHeight), forceCloseTx,
)
if err != nil {
t.Fatalf("unable to create breach retribution: %v", err)
}
require.NoError(t, err, "unable to create breach retribution")
processACK := make(chan error, 1)
breach := &ContractBreachEvent{
@ -1881,9 +1865,7 @@ func TestBreachDelayedJusticeConfirmation(t *testing.T) {
RemoteNextRevocation: state.RemoteNextRevocation,
LocalChanConfig: state.LocalChanCfg,
})
if err != nil {
t.Fatalf("unable to close channel: %v", err)
}
require.NoError(t, err, "unable to close channel")
// After exiting, the breach arbiter should have persisted the
// retribution information and the channel should be shown as pending
@ -2126,9 +2108,7 @@ func assertPendingClosed(t *testing.T, c *lnwallet.LightningChannel) {
t.Helper()
closedChans, err := c.State().Db.FetchClosedChannels(true)
if err != nil {
t.Fatalf("unable to load pending closed channels: %v", err)
}
require.NoError(t, err, "unable to load pending closed channels")
for _, chanSummary := range closedChans {
if chanSummary.ChanPoint == *c.ChanPoint {
@ -2145,9 +2125,7 @@ func assertNotPendingClosed(t *testing.T, c *lnwallet.LightningChannel) {
t.Helper()
closedChans, err := c.State().Db.FetchClosedChannels(true)
if err != nil {
t.Fatalf("unable to load pending closed channels: %v", err)
}
require.NoError(t, err, "unable to load pending closed channels")
for _, chanSummary := range closedChans {
if chanSummary.ChanPoint == *c.ChanPoint {

View file

@ -20,6 +20,7 @@ import (
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lntest/channels"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/stretchr/testify/require"
)
var (
@ -306,9 +307,7 @@ func TestContractInsertionRetrieval(t *testing.T) {
testLog, cleanUp, err := newTestBoltArbLog(
testChainHash, testChanPoint1,
)
if err != nil {
t.Fatalf("unable to create test log: %v", err)
}
require.NoError(t, err, "unable to create test log")
defer cleanUp()
// The log created, we'll create a series of resolvers, each properly
@ -386,17 +385,13 @@ func TestContractInsertionRetrieval(t *testing.T) {
// Now, we'll insert the resolver into the log, we do not need to apply
// any closures, so we will pass in nil.
err = testLog.InsertUnresolvedContracts(nil, resolvers...)
if err != nil {
t.Fatalf("unable to insert resolvers: %v", err)
}
require.NoError(t, err, "unable to insert resolvers")
// With the resolvers inserted, we'll now attempt to retrieve them from
// the database, so we can compare them to the versions we created
// above.
diskResolvers, err := testLog.FetchUnresolvedContracts()
if err != nil {
t.Fatalf("unable to retrieve resolvers: %v", err)
}
require.NoError(t, err, "unable to retrieve resolvers")
if len(diskResolvers) != len(resolvers) {
t.Fatalf("expected %v got resolvers, instead got %v: %#v",
@ -423,9 +418,7 @@ func TestContractInsertionRetrieval(t *testing.T) {
t.Fatalf("unable to wipe log: %v", err)
}
diskResolvers, err = testLog.FetchUnresolvedContracts()
if err != nil {
t.Fatalf("unable to fetch unresolved contracts: %v", err)
}
require.NoError(t, err, "unable to fetch unresolved contracts")
if len(diskResolvers) != 0 {
t.Fatalf("no resolvers should be found, instead %v were",
len(diskResolvers))
@ -442,9 +435,7 @@ func TestContractResolution(t *testing.T) {
testLog, cleanUp, err := newTestBoltArbLog(
testChainHash, testChanPoint1,
)
if err != nil {
t.Fatalf("unable to create test log: %v", err)
}
require.NoError(t, err, "unable to create test log")
defer cleanUp()
// We'll now create a timeout resolver that we'll be using for the
@ -469,13 +460,9 @@ func TestContractResolution(t *testing.T) {
// we get the same resolver out the other side. We do not need to apply
// any closures.
err = testLog.InsertUnresolvedContracts(nil, timeoutResolver)
if err != nil {
t.Fatalf("unable to insert contract into db: %v", err)
}
require.NoError(t, err, "unable to insert contract into db")
dbContracts, err := testLog.FetchUnresolvedContracts()
if err != nil {
t.Fatalf("unable to fetch contracts from db: %v", err)
}
require.NoError(t, err, "unable to fetch contracts from db")
assertResolversEqual(t, timeoutResolver, dbContracts[0])
// Now, we'll mark the contract as resolved within the database.
@ -485,9 +472,7 @@ func TestContractResolution(t *testing.T) {
// At this point, no contracts should exist within the log.
dbContracts, err = testLog.FetchUnresolvedContracts()
if err != nil {
t.Fatalf("unable to fetch contracts from db: %v", err)
}
require.NoError(t, err, "unable to fetch contracts from db")
if len(dbContracts) != 0 {
t.Fatalf("no contract should be from in the db, instead %v "+
"were", len(dbContracts))
@ -504,9 +489,7 @@ func TestContractSwapping(t *testing.T) {
testLog, cleanUp, err := newTestBoltArbLog(
testChainHash, testChanPoint1,
)
if err != nil {
t.Fatalf("unable to create test log: %v", err)
}
require.NoError(t, err, "unable to create test log")
defer cleanUp()
// We'll create two resolvers, a regular timeout resolver, and the
@ -533,23 +516,17 @@ func TestContractSwapping(t *testing.T) {
// We'll first insert the contest resolver into the log with no
// additional updates.
err = testLog.InsertUnresolvedContracts(nil, contestResolver)
if err != nil {
t.Fatalf("unable to insert contract into db: %v", err)
}
require.NoError(t, err, "unable to insert contract into db")
// With the resolver inserted, we'll now attempt to atomically swap it
// for its underlying timeout resolver.
err = testLog.SwapContract(contestResolver, timeoutResolver)
if err != nil {
t.Fatalf("unable to swap contracts: %v", err)
}
require.NoError(t, err, "unable to swap contracts")
// At this point, there should now only be a single contract in the
// database.
dbContracts, err := testLog.FetchUnresolvedContracts()
if err != nil {
t.Fatalf("unable to fetch contracts from db: %v", err)
}
require.NoError(t, err, "unable to fetch contracts from db")
if len(dbContracts) != 1 {
t.Fatalf("one contract should be from in the db, instead %v "+
"were", len(dbContracts))
@ -569,9 +546,7 @@ func TestContractResolutionsStorage(t *testing.T) {
testLog, cleanUp, err := newTestBoltArbLog(
testChainHash, testChanPoint1,
)
if err != nil {
t.Fatalf("unable to create test log: %v", err)
}
require.NoError(t, err, "unable to create test log")
defer cleanUp()
// With the test log created, we'll now craft a contact resolution that
@ -661,9 +636,7 @@ func TestContractResolutionsStorage(t *testing.T) {
t.Fatalf("unable to insert resolutions into db: %v", err)
}
diskRes, err := testLog.FetchContractResolutions()
if err != nil {
t.Fatalf("unable to read resolution from db: %v", err)
}
require.NoError(t, err, "unable to read resolution from db")
if !reflect.DeepEqual(&res, diskRes) {
t.Fatalf("resolution mismatch: expected %v\n, got %v",
@ -689,16 +662,12 @@ func TestStateMutation(t *testing.T) {
testLog, cleanUp, err := newTestBoltArbLog(
testChainHash, testChanPoint1,
)
if err != nil {
t.Fatalf("unable to create test log: %v", err)
}
require.NoError(t, err, "unable to create test log")
defer cleanUp()
// The default state of an arbitrator should be StateDefault.
arbState, err := testLog.CurrentState(nil)
if err != nil {
t.Fatalf("unable to read arb state: %v", err)
}
require.NoError(t, err, "unable to read arb state")
if arbState != StateDefault {
t.Fatalf("state mismatch: expected %v, got %v", StateDefault,
arbState)
@ -710,9 +679,7 @@ func TestStateMutation(t *testing.T) {
t.Fatalf("unable to write state: %v", err)
}
arbState, err = testLog.CurrentState(nil)
if err != nil {
t.Fatalf("unable to read arb state: %v", err)
}
require.NoError(t, err, "unable to read arb state")
if arbState != StateFullyResolved {
t.Fatalf("state mismatch: expected %v, got %v", StateFullyResolved,
arbState)
@ -721,16 +688,12 @@ func TestStateMutation(t *testing.T) {
// Next, we'll wipe our state and ensure that if we try to query for
// the current state, we get the proper error.
err = testLog.WipeHistory()
if err != nil {
t.Fatalf("unable to wipe history: %v", err)
}
require.NoError(t, err, "unable to wipe history")
// If we try to query for the state again, we should get the default
// state again.
arbState, err = testLog.CurrentState(nil)
if err != nil {
t.Fatalf("unable to query current state: %v", err)
}
require.NoError(t, err, "unable to query current state")
if arbState != StateDefault {
t.Fatalf("state mismatch: expected %v, got %v", StateDefault,
arbState)
@ -747,17 +710,13 @@ func TestScopeIsolation(t *testing.T) {
testLog1, cleanUp1, err := newTestBoltArbLog(
testChainHash, testChanPoint1,
)
if err != nil {
t.Fatalf("unable to create test log: %v", err)
}
require.NoError(t, err, "unable to create test log")
defer cleanUp1()
testLog2, cleanUp2, err := newTestBoltArbLog(
testChainHash, testChanPoint2,
)
if err != nil {
t.Fatalf("unable to create test log: %v", err)
}
require.NoError(t, err, "unable to create test log")
defer cleanUp2()
// We'll now update the current state of both the logs to a unique
@ -772,13 +731,9 @@ func TestScopeIsolation(t *testing.T) {
// Querying each log, the states should be the prior one we set, and be
// disjoint.
log1State, err := testLog1.CurrentState(nil)
if err != nil {
t.Fatalf("unable to read arb state: %v", err)
}
require.NoError(t, err, "unable to read arb state")
log2State, err := testLog2.CurrentState(nil)
if err != nil {
t.Fatalf("unable to read arb state: %v", err)
}
require.NoError(t, err, "unable to read arb state")
if log1State == log2State {
t.Fatalf("log states are the same: %v", log1State)
@ -802,9 +757,7 @@ func TestCommitSetStorage(t *testing.T) {
testLog, cleanUp, err := newTestBoltArbLog(
testChainHash, testChanPoint1,
)
if err != nil {
t.Fatalf("unable to create test log: %v", err)
}
require.NoError(t, err, "unable to create test log")
defer cleanUp()
activeHTLCs := []channeldb.HTLC{

View file

@ -13,6 +13,7 @@ import (
"github.com/lightningnetwork/lnd/clock"
"github.com/lightningnetwork/lnd/lntest/mock"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/stretchr/testify/require"
)
// TestChainArbitratorRepulishCloses tests that the chain arbitrator will
@ -145,14 +146,10 @@ func TestResolveContract(t *testing.T) {
// To start with, we'll create a new temp DB for the duration of this
// test.
tempPath, err := ioutil.TempDir("", "testdb")
if err != nil {
t.Fatalf("unable to make temp dir: %v", err)
}
require.NoError(t, err, "unable to make temp dir")
defer os.RemoveAll(tempPath)
db, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to open db: %v", err)
}
require.NoError(t, err, "unable to open db")
defer db.Close()
// With the DB created, we'll make a new channel, and mark it as
@ -160,9 +157,7 @@ func TestResolveContract(t *testing.T) {
newChannel, _, cleanup, err := lnwallet.CreateTestChannels(
channeldb.SingleFunderTweaklessBit,
)
if err != nil {
t.Fatalf("unable to make new test channel: %v", err)
}
require.NoError(t, err, "unable to make new test channel")
defer cleanup()
channel := newChannel.State()
channel.Db = db.ChannelStateDB()
@ -206,17 +201,13 @@ func TestResolveContract(t *testing.T) {
// While the resolver are active, we'll now remove the channel from the
// database (mark is as closed).
err = db.ChannelStateDB().AbandonChannel(&channel.FundingOutpoint, 4)
if err != nil {
t.Fatalf("unable to remove channel: %v", err)
}
require.NoError(t, err, "unable to remove channel")
// With the channel removed, we'll now manually call ResolveContract.
// This stimulates needing to remove a channel from the chain arb due
// to any possible external consistency issues.
err = chainArb.ResolveContract(channel.FundingOutpoint)
if err != nil {
t.Fatalf("unable to resolve contract: %v", err)
}
require.NoError(t, err, "unable to resolve contract")
// The shouldn't be an active chain watcher or channel arb for this
// channel.
@ -240,7 +231,5 @@ func TestResolveContract(t *testing.T) {
// If we attempt to call this method again, then we should get a nil
// error, as there is no more state to be cleaned up.
err = chainArb.ResolveContract(channel.FundingOutpoint)
if err != nil {
t.Fatalf("second resolve call shouldn't fail: %v", err)
}
require.NoError(t, err, "second resolve call shouldn't fail")
}

View file

@ -14,6 +14,7 @@ import (
"github.com/lightningnetwork/lnd/lntest/mock"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
// TestChainWatcherRemoteUnilateralClose tests that the chain watcher is able
@ -27,9 +28,7 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) {
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
channeldb.SingleFunderTweaklessBit,
)
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}
require.NoError(t, err, "unable to create test channels")
defer cleanUp()
// With the channels created, we'll now create a chain watcher instance
@ -45,13 +44,9 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) {
signer: aliceChannel.Signer,
extractStateNumHint: lnwallet.GetStateNumHint,
})
if err != nil {
t.Fatalf("unable to create chain watcher: %v", err)
}
require.NoError(t, err, "unable to create chain watcher")
err = aliceChainWatcher.Start()
if err != nil {
t.Fatalf("unable to start chain watcher: %v", err)
}
require.NoError(t, err, "unable to start chain watcher")
defer aliceChainWatcher.Stop()
// We'll request a new channel event subscription from Alice's chain
@ -118,9 +113,7 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
channeldb.SingleFunderTweaklessBit,
)
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}
require.NoError(t, err, "unable to create test channels")
defer cleanUp()
// With the channels created, we'll now create a chain watcher instance
@ -136,9 +129,7 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
signer: aliceChannel.Signer,
extractStateNumHint: lnwallet.GetStateNumHint,
})
if err != nil {
t.Fatalf("unable to create chain watcher: %v", err)
}
require.NoError(t, err, "unable to create chain watcher")
if err := aliceChainWatcher.Start(); err != nil {
t.Fatalf("unable to start chain watcher: %v", err)
}

View file

@ -462,9 +462,7 @@ func TestChannelArbitratorCooperativeClose(t *testing.T) {
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
if err := chanArbCtx.chanArb.Start(nil); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
@ -523,9 +521,7 @@ func TestChannelArbitratorRemoteForceClose(t *testing.T) {
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
if err := chanArb.Start(nil); err != nil {
@ -578,9 +574,7 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) {
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
if err := chanArb.Start(nil); err != nil {
@ -686,9 +680,7 @@ func TestChannelArbitratorBreachClose(t *testing.T) {
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
chanArb.cfg.PreimageDB = newMockWitnessBeacon()
chanArb.cfg.Registry = &mockRegistry{}
@ -817,9 +809,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
// a real DB will be created. We need this for our test as we want to
// test proper restart recovery and resolver population.
chanArbCtx, err := createTestChannelArbitrator(t, nil)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
chanArb.cfg.PreimageDB = newMockWitnessBeacon()
chanArb.cfg.Registry = &mockRegistry{}
@ -990,9 +980,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
// We'll no re-create the resolver, notice that we use the existing
// arbLog so it carries over the same on-disk state.
chanArbCtxNew, err := chanArbCtx.Restart(nil)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb = chanArbCtxNew.chanArb
defer chanArbCtxNew.CleanUp()
@ -1088,9 +1076,7 @@ func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) {
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
if err := chanArb.Start(nil); err != nil {
@ -1197,9 +1183,7 @@ func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) {
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
if err := chanArb.Start(nil); err != nil {
@ -1305,9 +1289,7 @@ func TestChannelArbitratorPersistence(t *testing.T) {
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
if err := chanArb.Start(nil); err != nil {
@ -1340,9 +1322,7 @@ func TestChannelArbitratorPersistence(t *testing.T) {
// Restart the channel arb, this'll use the same long and prior
// context.
chanArbCtx, err = chanArbCtx.Restart(nil)
if err != nil {
t.Fatalf("unable to restart channel arb: %v", err)
}
require.NoError(t, err, "unable to restart channel arb")
chanArb = chanArbCtx.chanArb
// Again, it should start up in the default state.
@ -1371,9 +1351,7 @@ func TestChannelArbitratorPersistence(t *testing.T) {
// Restart once again to simulate yet another restart.
chanArbCtx, err = chanArbCtx.Restart(nil)
if err != nil {
t.Fatalf("unable to restart channel arb: %v", err)
}
require.NoError(t, err, "unable to restart channel arb")
chanArb = chanArbCtx.chanArb
// Starts out in StateDefault.
@ -1400,9 +1378,7 @@ func TestChannelArbitratorPersistence(t *testing.T) {
// Create a new arbitrator, and now make fetching resolutions succeed.
log.failFetch = nil
chanArbCtx, err = chanArbCtx.Restart(nil)
if err != nil {
t.Fatalf("unable to restart channel arb: %v", err)
}
require.NoError(t, err, "unable to restart channel arb")
defer chanArbCtx.CleanUp()
// Finally it should advance to StateFullyResolved.
@ -1431,9 +1407,7 @@ func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) {
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
if err := chanArb.Start(nil); err != nil {
@ -1515,9 +1489,7 @@ func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) {
c.chanArb.cfg.ClosingHeight = 100
c.chanArb.cfg.CloseType = channeldb.BreachClose
})
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
defer chanArbCtx.CleanUp()
// We should transition to StateContractClosed.
@ -1699,9 +1671,7 @@ func TestChannelArbitratorEmptyResolutions(t *testing.T) {
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
chanArb.cfg.IsPendingClose = true
@ -1736,9 +1706,7 @@ func TestChannelArbitratorAlreadyForceClosed(t *testing.T) {
state: StateCommitmentBroadcasted,
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
if err := chanArb.Start(nil); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
@ -2009,9 +1977,7 @@ func TestChannelArbitratorPendingExpiredHTLC(t *testing.T) {
resolvers: make(map[ContractResolver]struct{}),
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
// We'll inject a test clock implementation so we can control the uptime.
@ -2504,9 +2470,7 @@ func TestChannelArbitratorAnchors(t *testing.T) {
}
chanArbCtx, err := createTestChannelArbitrator(t, log)
if err != nil {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
require.NoError(t, err, "unable to create ChannelArbitrator")
// Replace our mocked put report function with one which will push
// reports into a channel for us to consume. We update this function

View file

@ -10,6 +10,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/stretchr/testify/require"
)
type incubateTest struct {
@ -53,15 +54,11 @@ func initIncubateTests() {
// any modifying calls are made.
func TestNurseryStoreInit(t *testing.T) {
cdb, cleanUp, err := channeldb.MakeTestDB()
if err != nil {
t.Fatalf("unable to open channel db: %v", err)
}
require.NoError(t, err, "unable to open channel db")
defer cleanUp()
ns, err := NewNurseryStore(&chainHash, cdb)
if err != nil {
t.Fatalf("unable to open nursery store: %v", err)
}
require.NoError(t, err, "unable to open nursery store")
assertNumChannels(t, ns, 0)
assertNumPreschools(t, ns, 0)
@ -73,15 +70,11 @@ func TestNurseryStoreInit(t *testing.T) {
// intermediate states.
func TestNurseryStoreIncubate(t *testing.T) {
cdb, cleanUp, err := channeldb.MakeTestDB()
if err != nil {
t.Fatalf("unable to open channel db: %v", err)
}
require.NoError(t, err, "unable to open channel db")
defer cleanUp()
ns, err := NewNurseryStore(&chainHash, cdb)
if err != nil {
t.Fatalf("unable to open nursery store: %v", err)
}
require.NoError(t, err, "unable to open nursery store")
for i, test := range incubateTests {
// At the beginning of each test, we do not expect to the
@ -314,15 +307,11 @@ func TestNurseryStoreIncubate(t *testing.T) {
// purged height is set appropriately.
func TestNurseryStoreGraduate(t *testing.T) {
cdb, cleanUp, err := channeldb.MakeTestDB()
if err != nil {
t.Fatalf("unable to open channel db: %v", err)
}
require.NoError(t, err, "unable to open channel db")
defer cleanUp()
ns, err := NewNurseryStore(&chainHash, cdb)
if err != nil {
t.Fatalf("unable to open nursery store: %v", err)
}
require.NoError(t, err, "unable to open nursery store")
kid := &kidOutputs[3]
@ -333,16 +322,12 @@ func TestNurseryStoreGraduate(t *testing.T) {
// First, add a commitment output to the nursery store, which is
// initially inserted in the preschool bucket.
err = ns.Incubate([]kidOutput{*kid}, nil)
if err != nil {
t.Fatalf("unable to incubate commitment output: %v", err)
}
require.NoError(t, err, "unable to incubate commitment output")
// Then, move the commitment output to the kindergarten bucket, such
// that it resides in the height index at its maturity height.
err = ns.PreschoolToKinder(kid, 0)
if err != nil {
t.Fatalf("unable to move pscl output to kndr: %v", err)
}
require.NoError(t, err, "unable to move pscl output to kndr")
// Now, iteratively purge all height below the target maturity height,
// checking that each class is now empty, and that the last purged
@ -394,9 +379,7 @@ func assertNumChanOutputs(t *testing.T, ns NurseryStorer,
// matches the expected number.
func assertNumPreschools(t *testing.T, ns NurseryStorer, expected int) {
psclOutputs, err := ns.FetchPreschools()
if err != nil {
t.Fatalf("unable to retrieve preschool outputs: %v", err)
}
require.NoError(t, err, "unable to retrieve preschool outputs")
if len(psclOutputs) != expected {
t.Fatalf("expected number of pscl outputs to be %d, got %v",
@ -534,9 +517,7 @@ func assertChannelMaturity(t *testing.T, ns NurseryStorer,
chanPoint *wire.OutPoint, expectedMaturity bool) {
isMature, err := ns.IsMatureChannel(chanPoint)
if err != nil {
t.Fatalf("unable to fetch channel maturity: %v", err)
}
require.NoError(t, err, "unable to fetch channel maturity")
if isMature != expectedMaturity {
t.Fatalf("expected channel maturity: %v, actual: %v",

View file

@ -24,6 +24,7 @@ import (
"github.com/lightningnetwork/lnd/lntest/mock"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/sweep"
"github.com/stretchr/testify/require"
)
var (
@ -419,9 +420,7 @@ func createNurseryTestContext(t *testing.T,
// still considerable logic in the store.
cdb, cleanup, err := channeldb.MakeTestDB()
if err != nil {
t.Fatalf("unable to open channeldb: %v", err)
}
require.NoError(t, err, "unable to open channeldb")
store, err := NewNurseryStore(&chainhash.Hash{}, cdb)
if err != nil {

File diff suppressed because it is too large Load diff

View file

@ -13,15 +13,14 @@ import (
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
func createTestMessageStore(t *testing.T) (*MessageStore, func()) {
t.Helper()
tempDir, err := ioutil.TempDir("", "channeldb")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
db, err := channeldb.Open(tempDir)
if err != nil {
os.RemoveAll(tempDir)
@ -44,9 +43,7 @@ func createTestMessageStore(t *testing.T) (*MessageStore, func()) {
func randPubKey(t *testing.T) *btcec.PublicKey {
priv, err := btcec.NewPrivateKey()
if err != nil {
t.Fatalf("unable to create private key: %v", err)
}
require.NoError(t, err, "unable to create private key")
return priv.PubKey()
}
@ -242,24 +239,18 @@ func TestMessageStoreUnsupportedMessage(t *testing.T) {
messageStore := tx.ReadWriteBucket(messageStoreBucket)
return messageStore.Put(msgKey, rawMsg.Bytes())
}, func() {})
if err != nil {
t.Fatalf("unable to add unsupported message to store: %v", err)
}
require.NoError(t, err, "unable to add unsupported message to store")
// Finally, we'll check that the store can properly filter out messages
// that are currently unknown to it. We'll make sure this is done for
// both Messages and MessagesForPeer.
totalMsgs, err := msgStore.Messages()
if err != nil {
t.Fatalf("unable to retrieve messages: %v", err)
}
require.NoError(t, err, "unable to retrieve messages")
if len(totalMsgs) != 0 {
t.Fatalf("expected to filter out unsupported message")
}
peerMsgs, err := msgStore.MessagesForPeer(peer)
if err != nil {
t.Fatalf("unable to retrieve peer messages: %v", err)
}
require.NoError(t, err, "unable to retrieve peer messages")
if len(peerMsgs) != 0 {
t.Fatalf("expected to filter out unsupported message")
}

View file

@ -489,9 +489,7 @@ func TestGossipSyncerApplyGossipFilter(t *testing.T) {
// We'll now attempt to apply the gossip filter for the remote peer.
err := syncer.ApplyGossipFilter(remoteHorizon)
if err != nil {
t.Fatalf("unable to apply filter: %v", err)
}
require.NoError(t, err, "unable to apply filter")
// There should be no messages in the message queue as we didn't send
// the syncer and messages within the horizon.
@ -539,9 +537,7 @@ func TestGossipSyncerApplyGossipFilter(t *testing.T) {
}
}()
err = syncer.ApplyGossipFilter(remoteHorizon)
if err != nil {
t.Fatalf("unable to apply filter: %v", err)
}
require.NoError(t, err, "unable to apply filter")
// We should get back the exact same message.
select {
@ -587,9 +583,7 @@ func TestGossipSyncerQueryChannelRangeWrongChainHash(t *testing.T) {
NumBlocks: math.MaxUint32,
}
err := syncer.replyChanRangeQuery(query)
if err != nil {
t.Fatalf("unable to process short chan ID's: %v", err)
}
require.NoError(t, err, "unable to process short chan ID's")
select {
case <-time.After(time.Second * 15):
@ -638,9 +632,7 @@ func TestGossipSyncerReplyShortChanIDsWrongChainHash(t *testing.T) {
err := syncer.replyShortChanIDs(&lnwire.QueryShortChanIDs{
ChainHash: *chaincfg.SimNetParams.GenesisHash,
})
if err != nil {
t.Fatalf("unable to process short chan ID's: %v", err)
}
require.NoError(t, err, "unable to process short chan ID's")
select {
case <-time.After(time.Second * 15):
@ -729,9 +721,7 @@ func TestGossipSyncerReplyShortChanIDs(t *testing.T) {
err := syncer.replyShortChanIDs(&lnwire.QueryShortChanIDs{
ShortChanIDs: queryChanIDs,
})
if err != nil {
t.Fatalf("unable to query for chan IDs: %v", err)
}
require.NoError(t, err, "unable to query for chan IDs")
for i := 0; i < len(queryReply)+1; i++ {
select {
@ -1157,9 +1147,7 @@ func TestGossipSyncerGenChanRangeQuery(t *testing.T) {
// should return a start height that's back chanRangeQueryBuffer
// blocks.
rangeQuery, err := syncer.genChanRangeQuery(false)
if err != nil {
t.Fatalf("unable to resp: %v", err)
}
require.NoError(t, err, "unable to resp")
firstHeight := uint32(startingHeight - chanRangeQueryBuffer)
if rangeQuery.FirstBlockHeight != firstHeight {
@ -1175,9 +1163,7 @@ func TestGossipSyncerGenChanRangeQuery(t *testing.T) {
// Generating a historical range query should result in a start height
// of 0.
rangeQuery, err = syncer.genChanRangeQuery(true)
if err != nil {
t.Fatalf("unable to resp: %v", err)
}
require.NoError(t, err, "unable to resp")
if rangeQuery.FirstBlockHeight != 0 {
t.Fatalf("incorrect chan range query: expected %v, %v", 0,
rangeQuery.FirstBlockHeight)
@ -1221,9 +1207,7 @@ func testGossipSyncerProcessChanRangeReply(t *testing.T, legacy bool) {
startingState := syncer.state
query, err := syncer.genChanRangeQuery(true)
if err != nil {
t.Fatalf("unable to generate channel range query: %v", err)
}
require.NoError(t, err, "unable to generate channel range query")
// When interpreting block ranges, the first reply should start from
// our requested first block, and the last should end at our requested
@ -1431,9 +1415,7 @@ func TestGossipSyncerSynchronizeChanIDs(t *testing.T) {
// If we issue another query, the syncer should tell us that it's done.
done, err := syncer.synchronizeChanIDs()
if err != nil {
t.Fatalf("unable to sync chan IDs: %v", err)
}
require.NoError(t, err, "unable to sync chan IDs")
if done {
t.Fatalf("syncer should be finished!")
}

View file

@ -5,6 +5,7 @@ import (
"testing"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
type managerTest struct {
@ -73,9 +74,7 @@ func TestManager(t *testing.T) {
func testManager(t *testing.T, test managerTest) {
m, err := newManager(test.cfg, testSetDesc)
if err != nil {
t.Fatalf("unable to create feature manager: %v", err)
}
require.NoError(t, err, "unable to create feature manager")
sets := []Set{
SetInit,

View file

@ -349,9 +349,7 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey,
cdb, netParams, chainNotifier, wc, signer, keyRing, bio,
estimator,
)
if err != nil {
t.Fatalf("unable to create test ln wallet: %v", err)
}
require.NoError(t, err, "unable to create test ln wallet")
var chanIDSeed [32]byte
@ -459,9 +457,7 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey,
}
f, err := NewFundingManager(fundingCfg)
if err != nil {
t.Fatalf("failed creating fundingManager: %v", err)
}
require.NoError(t, err, "failed creating fundingManager")
if err = f.Start(); err != nil {
t.Fatalf("failed starting fundingManager: %v", err)
}
@ -557,9 +553,7 @@ func recreateAliceFundingManager(t *testing.T, alice *testNode) {
ReservationTimeout: oldCfg.ReservationTimeout,
OpenChannelPredicate: chainedAcceptor,
})
if err != nil {
t.Fatalf("failed recreating aliceFundingManager: %v", err)
}
require.NoError(t, err, "failed recreating aliceFundingManager")
alice.fundingMgr = f
alice.msgChan = aliceMsgChan
@ -578,28 +572,20 @@ func setupFundingManagers(t *testing.T,
options ...cfgOption) (*testNode, *testNode) {
aliceTestDir, err := ioutil.TempDir("", "alicelnwallet")
if err != nil {
t.Fatalf("unable to create temp directory: %v", err)
}
require.NoError(t, err, "unable to create temp directory")
alice, err := createTestFundingManager(
t, alicePrivKey, aliceAddr, aliceTestDir, options...,
)
if err != nil {
t.Fatalf("failed creating fundingManager: %v", err)
}
require.NoError(t, err, "failed creating fundingManager")
bobTestDir, err := ioutil.TempDir("", "boblnwallet")
if err != nil {
t.Fatalf("unable to create temp directory: %v", err)
}
require.NoError(t, err, "unable to create temp directory")
bob, err := createTestFundingManager(
t, bobPrivKey, bobAddr, bobTestDir, options...,
)
if err != nil {
t.Fatalf("failed creating fundingManager: %v", err)
}
require.NoError(t, err, "failed creating fundingManager")
// With the funding manager's created, we'll now attempt to mimic a
// connection pipe between them. In order to intercept the messages
@ -1968,9 +1954,7 @@ func TestFundingManagerFundingTimeout(t *testing.T) {
// Bob will at this point be waiting for the funding transaction to be
// confirmed, so the channel should be considered pending.
pendingChannels, err := bob.fundingMgr.cfg.Wallet.Cfg.Database.FetchPendingChannels()
if err != nil {
t.Fatalf("unable to fetch pending channels: %v", err)
}
require.NoError(t, err, "unable to fetch pending channels")
if len(pendingChannels) != 1 {
t.Fatalf("Expected Bob to have 1 pending channel, had %v",
len(pendingChannels))
@ -2014,9 +1998,7 @@ func TestFundingManagerFundingNotTimeoutInitiator(t *testing.T) {
// Alice will at this point be waiting for the funding transaction to be
// confirmed, so the channel should be considered pending.
pendingChannels, err := alice.fundingMgr.cfg.Wallet.Cfg.Database.FetchPendingChannels()
if err != nil {
t.Fatalf("unable to fetch pending channels: %v", err)
}
require.NoError(t, err, "unable to fetch pending channels")
if len(pendingChannels) != 1 {
t.Fatalf("Expected Alice to have 1 pending channel, had %v",
len(pendingChannels))
@ -2766,9 +2748,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) {
// Check that the custom channel parameters were properly set in the
// channel reservation.
resCtx, err := alice.fundingMgr.getReservationCtx(bobPubKey, chanID)
if err != nil {
t.Fatalf("unable to find ctx: %v", err)
}
require.NoError(t, err, "unable to find ctx")
// Alice's CSV delay should be 4 since Bob sent the default value, and
// Bob's should be 67 since Alice sent the custom value.
@ -2792,9 +2772,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) {
// Also make sure the parameters are properly set on Bob's end.
resCtx, err = bob.fundingMgr.getReservationCtx(alicePubKey, chanID)
if err != nil {
t.Fatalf("unable to find ctx: %v", err)
}
require.NoError(t, err, "unable to find ctx")
if err := assertDelay(resCtx, csvDelay, 4); err != nil {
t.Fatal(err)

View file

@ -16,6 +16,7 @@ import (
"github.com/lightningnetwork/lnd/htlcswitch/hop"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
var (
@ -126,9 +127,7 @@ func newCircuitMap(t *testing.T, resMsg bool) (*htlcswitch.CircuitMapConfig,
}
circuitMap, err := htlcswitch.NewCircuitMap(circuitMapCfg)
if err != nil {
t.Fatalf("unable to create persistent circuit map: %v", err)
}
require.NoError(t, err, "unable to create persistent circuit map")
return circuitMapCfg, circuitMap
}
@ -489,9 +488,7 @@ func TestCircuitMapPersistence(t *testing.T) {
// Test removing circuits and the subsequent lookups.
err = circuitMap.DeleteCircuits(circuit1.Incoming)
if err != nil {
t.Fatalf("Remove returned unexpected error: %v", err)
}
require.NoError(t, err, "Remove returned unexpected error")
// There should be exactly one remaining circuit with hash1, and it
// should be circuit4.
@ -514,9 +511,7 @@ func TestCircuitMapPersistence(t *testing.T) {
// Remove last remaining circuit with payment hash hash1.
err = circuitMap.DeleteCircuits(circuit4.Incoming)
if err != nil {
t.Fatalf("Remove returned unexpected error: %v", err)
}
require.NoError(t, err, "Remove returned unexpected error")
assertNumCircuitsWithHash(t, circuitMap, hash1, 0)
assertNumCircuitsWithHash(t, circuitMap, hash2, 1)
@ -528,9 +523,7 @@ func TestCircuitMapPersistence(t *testing.T) {
// Remove last remaining circuit with payment hash hash2.
err = circuitMap.DeleteCircuits(circuit2.Incoming)
if err != nil {
t.Fatalf("Remove returned unexpected error: %v", err)
}
require.NoError(t, err, "Remove returned unexpected error")
// There should now only be one remaining circuit, with hash3.
assertNumCircuitsWithHash(t, circuitMap, hash2, 0)
@ -639,9 +632,7 @@ func makeCircuitDB(t *testing.T, path string) *channeldb.DB {
}
db, err := channeldb.Open(path)
if err != nil {
t.Fatalf("unable to open channel db: %v", err)
}
require.NoError(t, err, "unable to open channel db")
return db
}
@ -666,9 +657,7 @@ func restartCircuitMap(t *testing.T, cfg *htlcswitch.CircuitMapConfig) (
CheckResolutionMsg: cfg.CheckResolutionMsg,
}
cm2, err := htlcswitch.NewCircuitMap(cfg2)
if err != nil {
t.Fatalf("unable to recreate persistent circuit map: %v", err)
}
require.NoError(t, err, "unable to recreate persistent circuit map")
return cfg2, cm2
}
@ -699,9 +688,7 @@ func TestCircuitMapCommitCircuits(t *testing.T) {
// First we will try to add an new circuit to the circuit map, this
// should succeed.
actions, err := circuitMap.CommitCircuits(circuit)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
if len(actions.Drops) > 0 {
t.Fatalf("new circuit should not have been dropped")
}
@ -723,9 +710,7 @@ func TestCircuitMapCommitCircuits(t *testing.T) {
// in the circuit being dropped. This can happen if the incoming link
// flaps.
actions, err = circuitMap.CommitCircuits(circuit)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
if len(actions.Adds) > 0 {
t.Fatalf("duplicate circuit should not have been added to circuit map")
}
@ -744,9 +729,7 @@ func TestCircuitMapCommitCircuits(t *testing.T) {
_, circuitMap = restartCircuitMap(t, cfg)
actions, err = circuitMap.CommitCircuits(circuit)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
if len(actions.Adds) > 0 {
t.Fatalf("duplicate circuit with incomplete forwarding " +
"decision should not have been added to circuit map")
@ -795,9 +778,7 @@ func TestCircuitMapOpenCircuits(t *testing.T) {
// First we will try to add an new circuit to the circuit map, this
// should succeed.
_, err = circuitMap.CommitCircuits(circuit)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
keystone := htlcswitch.Keystone{
InKey: circuit.Incoming,
@ -809,9 +790,7 @@ func TestCircuitMapOpenCircuits(t *testing.T) {
// Open the circuit for the first time.
err = circuitMap.OpenCircuits(keystone)
if err != nil {
t.Fatalf("failed to open circuits: %v", err)
}
require.NoError(t, err, "failed to open circuits")
// Check that we can retrieve the open circuit if the circuit map before
// the circuit map is restarted.
@ -841,9 +820,7 @@ func TestCircuitMapOpenCircuits(t *testing.T) {
// flaps OR the switch is entirely restarted and the outgoing link has
// not received a response.
actions, err := circuitMap.CommitCircuits(circuit)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
if len(actions.Adds) > 0 {
t.Fatalf("duplicate circuit should not have been added to circuit map")
}
@ -882,9 +859,7 @@ func TestCircuitMapOpenCircuits(t *testing.T) {
// if the incoming link flaps OR the switch is entirely restarted and
// the outgoing link has not received a response.
actions, err = circuitMap.CommitCircuits(circuit)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
if len(actions.Adds) > 0 {
t.Fatalf("duplicate circuit should not have been added to circuit map")
}
@ -1012,9 +987,7 @@ func TestCircuitMapTrimOpenCircuits(t *testing.T) {
// First we will try to add an new circuit to the circuit map, this
// should succeed.
_, err = circuitMap.CommitCircuits(circuits...)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
// Now create a list of the keystones that we will use to preemptively
// open the circuits. We set the index as the outgoing HtlcID to i
@ -1032,9 +1005,7 @@ func TestCircuitMapTrimOpenCircuits(t *testing.T) {
// Open the circuits for the first time.
err = circuitMap.OpenCircuits(keystones...)
if err != nil {
t.Fatalf("failed to open circuits: %v", err)
}
require.NoError(t, err, "failed to open circuits")
// Check that all circuits are marked open.
assertCircuitsOpenedPreRestart(t, circuitMap, circuits, keystones)
@ -1152,9 +1123,7 @@ func TestCircuitMapCloseOpenCircuits(t *testing.T) {
// First we will try to add an new circuit to the circuit map, this
// should succeed.
_, err = circuitMap.CommitCircuits(circuit)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
keystone := htlcswitch.Keystone{
InKey: circuit.Incoming,
@ -1166,9 +1135,7 @@ func TestCircuitMapCloseOpenCircuits(t *testing.T) {
// Open the circuit for the first time.
err = circuitMap.OpenCircuits(keystone)
if err != nil {
t.Fatalf("failed to open circuits: %v", err)
}
require.NoError(t, err, "failed to open circuits")
// Check that we can retrieve the open circuit if the circuit map before
// the circuit map is restarted.
@ -1243,9 +1210,7 @@ func TestCircuitMapCloseUnopenedCircuit(t *testing.T) {
// First we will try to add an new circuit to the circuit map, this
// should succeed.
_, err = circuitMap.CommitCircuits(circuit)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
// Close the open circuit for the first time, which should succeed.
_, err = circuitMap.FailCircuit(circuit.Incoming)
@ -1300,9 +1265,7 @@ func TestCircuitMapDeleteUnopenedCircuit(t *testing.T) {
// First we will try to add an new circuit to the circuit map, this
// should succeed.
_, err = circuitMap.CommitCircuits(circuit)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
// Close the open circuit for the first time, which should succeed.
_, err = circuitMap.FailCircuit(circuit.Incoming)
@ -1359,9 +1322,7 @@ func TestCircuitMapDeleteOpenCircuit(t *testing.T) {
// First we will try to add an new circuit to the circuit map, this
// should succeed.
_, err = circuitMap.CommitCircuits(circuit)
if err != nil {
t.Fatalf("failed to commit circuits: %v", err)
}
require.NoError(t, err, "failed to commit circuits")
keystone := htlcswitch.Keystone{
InKey: circuit.Incoming,
@ -1373,9 +1334,7 @@ func TestCircuitMapDeleteOpenCircuit(t *testing.T) {
// Open the circuit for the first time.
err = circuitMap.OpenCircuits(keystone)
if err != nil {
t.Fatalf("failed to open circuits: %v", err)
}
require.NoError(t, err, "failed to open circuits")
// Close the open circuit for the first time, which should succeed.
_, err = circuitMap.FailCircuit(circuit.Incoming)

View file

@ -12,6 +12,7 @@ import (
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lntest/mock"
"github.com/stretchr/testify/require"
)
const (
@ -22,9 +23,7 @@ const (
// decayed log instance.
func tempDecayedLogPath(t *testing.T) string {
dir, err := ioutil.TempDir("", "decayedlog")
if err != nil {
t.Fatalf("unable to create temporary decayed log dir: %v", err)
}
require.NoError(t, err, "unable to create temporary decayed log dir")
return dir
}
@ -99,16 +98,12 @@ func TestDecayedLogGarbageCollector(t *testing.T) {
dbPath := tempDecayedLogPath(t)
d, notifier, hashedSecret, _, err := startup(dbPath, true)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
require.NoError(t, err, "Unable to start up DecayedLog")
defer shutdown(dbPath, d)
// Store <hashedSecret, cltv> in the sharedHashBucket.
err = d.Put(hashedSecret, cltv)
if err != nil {
t.Fatalf("Unable to store in channeldb: %v", err)
}
require.NoError(t, err, "Unable to store in channeldb")
// Wait for database write (GC is in a goroutine)
time.Sleep(500 * time.Millisecond)
@ -123,9 +118,7 @@ func TestDecayedLogGarbageCollector(t *testing.T) {
// Assert that hashedSecret is still in the sharedHashBucket
val, err := d.Get(hashedSecret)
if err != nil {
t.Fatalf("Get failed - received an error upon Get: %v", err)
}
require.NoError(t, err, "Get failed - received an error upon Get")
if val != cltv {
t.Fatalf("GC incorrectly deleted CLTV")
@ -160,9 +153,7 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
dbPath := tempDecayedLogPath(t)
d, _, hashedSecret, stop, err := startup(dbPath, true)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
require.NoError(t, err, "Unable to start up DecayedLog")
defer shutdown(dbPath, d)
// Store <hashedSecret, cltv> in the sharedHashBucket
@ -180,9 +171,7 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
stop()
d2, notifier2, _, _, err := startup(dbPath, true)
if err != nil {
t.Fatalf("Unable to restart DecayedLog: %v", err)
}
require.NoError(t, err, "Unable to restart DecayedLog")
defer shutdown(dbPath, d2)
// Check that the hash prefix still exists in the new db instance.
@ -216,22 +205,16 @@ func TestDecayedLogInsertionAndDeletion(t *testing.T) {
dbPath := tempDecayedLogPath(t)
d, _, hashedSecret, _, err := startup(dbPath, false)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
require.NoError(t, err, "Unable to start up DecayedLog")
defer shutdown(dbPath, d)
// Store <hashedSecret, cltv> in the sharedHashBucket.
err = d.Put(hashedSecret, cltv)
if err != nil {
t.Fatalf("Unable to store in channeldb: %v", err)
}
require.NoError(t, err, "Unable to store in channeldb")
// Delete hashedSecret from the sharedHashBucket.
err = d.Delete(hashedSecret)
if err != nil {
t.Fatalf("Unable to delete from channeldb: %v", err)
}
require.NoError(t, err, "Unable to delete from channeldb")
// Assert that hashedSecret is not in the sharedHashBucket
_, err = d.Get(hashedSecret)
@ -254,31 +237,23 @@ func TestDecayedLogStartAndStop(t *testing.T) {
dbPath := tempDecayedLogPath(t)
d, _, hashedSecret, stop, err := startup(dbPath, false)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
require.NoError(t, err, "Unable to start up DecayedLog")
defer shutdown(dbPath, d)
// Store <hashedSecret, cltv> in the sharedHashBucket.
err = d.Put(hashedSecret, cltv)
if err != nil {
t.Fatalf("Unable to store in channeldb: %v", err)
}
require.NoError(t, err, "Unable to store in channeldb")
// Shutdown the DecayedLog's channeldb
stop()
d2, _, hashedSecret2, stop, err := startup(dbPath, false)
if err != nil {
t.Fatalf("Unable to restart DecayedLog: %v", err)
}
require.NoError(t, err, "Unable to restart DecayedLog")
defer shutdown(dbPath, d2)
// Retrieve the stored cltv value given the hashedSecret key.
value, err := d2.Get(hashedSecret)
if err != nil {
t.Fatalf("Unable to retrieve from channeldb: %v", err)
}
require.NoError(t, err, "Unable to retrieve from channeldb")
// Check that the original cltv value matches the retrieved cltv
// value.
@ -288,17 +263,13 @@ func TestDecayedLogStartAndStop(t *testing.T) {
// Delete hashedSecret from sharedHashBucket
err = d2.Delete(hashedSecret2)
if err != nil {
t.Fatalf("Unable to delete from channeldb: %v", err)
}
require.NoError(t, err, "Unable to delete from channeldb")
// Shutdown the DecayedLog's channeldb
stop()
d3, _, hashedSecret3, _, err := startup(dbPath, false)
if err != nil {
t.Fatalf("Unable to restart DecayedLog: %v", err)
}
require.NoError(t, err, "Unable to restart DecayedLog")
defer shutdown(dbPath, d3)
// Assert that hashedSecret is not in the sharedHashBucket
@ -320,22 +291,16 @@ func TestDecayedLogStorageAndRetrieval(t *testing.T) {
dbPath := tempDecayedLogPath(t)
d, _, hashedSecret, _, err := startup(dbPath, false)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
require.NoError(t, err, "Unable to start up DecayedLog")
defer shutdown(dbPath, d)
// Store <hashedSecret, cltv> in the sharedHashBucket
err = d.Put(hashedSecret, cltv)
if err != nil {
t.Fatalf("Unable to store in channeldb: %v", err)
}
require.NoError(t, err, "Unable to store in channeldb")
// Retrieve the stored cltv value given the hashedSecret key.
value, err := d.Get(hashedSecret)
if err != nil {
t.Fatalf("Unable to retrieve from channeldb: %v", err)
}
require.NoError(t, err, "Unable to retrieve from channeldb")
// If the original cltv value does not match the value retrieved,
// then the test failed.

View file

@ -10,6 +10,7 @@ import (
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/record"
"github.com/lightningnetwork/lnd/tlv"
"github.com/stretchr/testify/require"
)
// TestSphinxHopIteratorForwardingInstructions tests that we're able to
@ -44,9 +45,7 @@ func TestSphinxHopIteratorForwardingInstructions(t *testing.T) {
record.NewNextHopIDRecord(&nextAddrInt),
}
tlvStream, err := tlv.NewStream(tlvRecords...)
if err != nil {
t.Fatalf("unable to create stream: %v", err)
}
require.NoError(t, err, "unable to create stream")
if err := tlvStream.Encode(&b); err != nil {
t.Fatalf("unable to encode stream: %v", err)
}

File diff suppressed because it is too large Load diff

View file

@ -122,13 +122,9 @@ func TestMailBoxCouriers(t *testing.T) {
// With the packets drained and partially acked, we reset the mailbox,
// simulating a link shutting down and then coming back up.
err := ctx.mailbox.ResetMessages()
if err != nil {
t.Fatalf("unable to reset messages: %v", err)
}
require.NoError(t, err, "unable to reset messages")
err = ctx.mailbox.ResetPackets()
if err != nil {
t.Fatalf("unable to reset packets: %v", err)
}
require.NoError(t, err, "unable to reset packets")
// Now, we'll use the same alternating strategy to read from our
// mailbox. All wire messages are dropped on startup, but any unacked
@ -348,9 +344,7 @@ func TestMailBoxFailAdd(t *testing.T) {
// the link flapping and coming back up before the second batch's
// expiries have elapsed. We should see no failures sent back.
err := ctx.mailbox.ResetPackets()
if err != nil {
t.Fatalf("unable to reset packets: %v", err)
}
require.NoError(t, err, "unable to reset packets")
ctx.checkFails(nil)
// Redeliver the second batch to the link and hold them there.
@ -369,9 +363,7 @@ func TestMailBoxFailAdd(t *testing.T) {
// Finally, reset the link which should cause the second batch to be
// cancelled immediately.
err = ctx.mailbox.ResetPackets()
if err != nil {
t.Fatalf("unable to reset packets: %v", err)
}
require.NoError(t, err, "unable to reset packets")
ctx.checkFails(secondBatch)
}

View file

@ -155,9 +155,7 @@ func TestNetworkResultStore(t *testing.T) {
// Let the third one subscribe now. THe result should be received
// immediately.
sub, err := store.subscribeResult(2)
if err != nil {
t.Fatalf("unable to subscribe: %v", err)
}
require.NoError(t, err, "unable to subscribe")
select {
case <-sub:
case <-time.After(1 * time.Second):
@ -173,14 +171,10 @@ func TestNetworkResultStore(t *testing.T) {
// Add the result and try again.
err = store.storeResult(3, results[3])
if err != nil {
t.Fatalf("unable to store result: %v", err)
}
require.NoError(t, err, "unable to store result")
_, err = store.getResult(3)
if err != nil {
t.Fatalf("unable to get result: %v", err)
}
require.NoError(t, err, "unable to get result")
// Since we don't delete results from the store (yet), make sure we
// will get subscriptions for all of them.

View file

@ -44,14 +44,10 @@ func TestSwitchAddDuplicateLink(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -76,9 +72,7 @@ func TestSwitchAddDuplicateLink(t *testing.T) {
// Update the short chan id of the channel, so that the link goes live.
aliceChannelLink.setLiveShortChanID(aliceChanID)
err = s.UpdateShortChanID(chanID1)
if err != nil {
t.Fatalf("unable to update alice short_chan_id: %v", err)
}
require.NoError(t, err, "unable to update alice short_chan_id")
// Alice should have a live link, adding again should fail.
if err := s.AddLink(aliceChannelLink); err == nil {
@ -104,14 +98,10 @@ func TestSwitchHasActiveLink(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -138,9 +128,7 @@ func TestSwitchHasActiveLink(t *testing.T) {
// Update the short chan id of the channel, so that the link goes live.
aliceChannelLink.setLiveShortChanID(aliceChanID)
err = s.UpdateShortChanID(chanID1)
if err != nil {
t.Fatalf("unable to update alice short_chan_id: %v", err)
}
require.NoError(t, err, "unable to update alice short_chan_id")
// UpdateShortChanID will cause the mock link to become eligible to
// forward. However, we can simulate the event where the short chan id
@ -174,21 +162,15 @@ func TestSwitchSendPending(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -215,9 +197,7 @@ func TestSwitchSendPending(t *testing.T) {
// Create request which should is being forwarded from Bob channel
// link to Alice channel link.
preimage, err := genPreimage()
if err != nil {
t.Fatalf("unable to generate preimage: %v", err)
}
require.NoError(t, err, "unable to generate preimage")
rhash := sha256.Sum256(preimage[:])
packet := &htlcPacket{
incomingChanID: bobChanID,
@ -269,9 +249,7 @@ func TestSwitchSendPending(t *testing.T) {
// move the link to the live state.
aliceChannelLink.setLiveShortChanID(aliceChanID)
err = s.UpdateShortChanID(chanID1)
if err != nil {
t.Fatalf("unable to update alice short_chan_id: %v", err)
}
require.NoError(t, err, "unable to update alice short_chan_id")
// Increment the packet's HTLC index, so that it does not collide with
// the prior attempt.
@ -298,20 +276,14 @@ func TestSwitchForward(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -335,9 +307,7 @@ func TestSwitchForward(t *testing.T) {
// Create request which should be forwarded from Alice channel link to
// bob channel link.
preimage, err := genPreimage()
if err != nil {
t.Fatalf("unable to generate preimage: %v", err)
}
require.NoError(t, err, "unable to generate preimage")
rhash := sha256.Sum256(preimage[:])
packet := &htlcPacket{
incomingChanID: aliceChannelLink.ShortChanID(),
@ -411,30 +381,20 @@ func TestSwitchForwardFailAfterFullAdd(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
tempPath, err := ioutil.TempDir("", "circuitdb")
if err != nil {
t.Fatalf("unable to temporary path: %v", err)
}
require.NoError(t, err, "unable to temporary path")
cdb, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to open channeldb: %v", err)
}
require.NoError(t, err, "unable to open channeldb")
s, err := initSwitchWithDB(testStartingHeight, cdb)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -522,14 +482,10 @@ func TestSwitchForwardFailAfterFullAdd(t *testing.T) {
}
cdb2, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to reopen channeldb: %v", err)
}
require.NoError(t, err, "unable to reopen channeldb")
s2, err := initSwitchWithDB(testStartingHeight, cdb2)
if err != nil {
t.Fatalf("unable reinit switch: %v", err)
}
require.NoError(t, err, "unable reinit switch")
if err := s2.Start(); err != nil {
t.Fatalf("unable to restart switch: %v", err)
}
@ -610,30 +566,20 @@ func TestSwitchForwardSettleAfterFullAdd(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
tempPath, err := ioutil.TempDir("", "circuitdb")
if err != nil {
t.Fatalf("unable to temporary path: %v", err)
}
require.NoError(t, err, "unable to temporary path")
cdb, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to open channeldb: %v", err)
}
require.NoError(t, err, "unable to open channeldb")
s, err := initSwitchWithDB(testStartingHeight, cdb)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -721,14 +667,10 @@ func TestSwitchForwardSettleAfterFullAdd(t *testing.T) {
}
cdb2, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to reopen channeldb: %v", err)
}
require.NoError(t, err, "unable to reopen channeldb")
s2, err := initSwitchWithDB(testStartingHeight, cdb2)
if err != nil {
t.Fatalf("unable reinit switch: %v", err)
}
require.NoError(t, err, "unable reinit switch")
if err := s2.Start(); err != nil {
t.Fatalf("unable to restart switch: %v", err)
}
@ -812,30 +754,20 @@ func TestSwitchForwardDropAfterFullAdd(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
tempPath, err := ioutil.TempDir("", "circuitdb")
if err != nil {
t.Fatalf("unable to temporary path: %v", err)
}
require.NoError(t, err, "unable to temporary path")
cdb, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to open channeldb: %v", err)
}
require.NoError(t, err, "unable to open channeldb")
s, err := initSwitchWithDB(testStartingHeight, cdb)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -915,14 +847,10 @@ func TestSwitchForwardDropAfterFullAdd(t *testing.T) {
}
cdb2, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to reopen channeldb: %v", err)
}
require.NoError(t, err, "unable to reopen channeldb")
s2, err := initSwitchWithDB(testStartingHeight, cdb2)
if err != nil {
t.Fatalf("unable reinit switch: %v", err)
}
require.NoError(t, err, "unable reinit switch")
if err := s2.Start(); err != nil {
t.Fatalf("unable to restart switch: %v", err)
}
@ -977,30 +905,20 @@ func TestSwitchForwardFailAfterHalfAdd(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
tempPath, err := ioutil.TempDir("", "circuitdb")
if err != nil {
t.Fatalf("unable to temporary path: %v", err)
}
require.NoError(t, err, "unable to temporary path")
cdb, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to open channeldb: %v", err)
}
require.NoError(t, err, "unable to open channeldb")
s, err := initSwitchWithDB(testStartingHeight, cdb)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -1075,14 +993,10 @@ func TestSwitchForwardFailAfterHalfAdd(t *testing.T) {
}
cdb2, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to reopen channeldb: %v", err)
}
require.NoError(t, err, "unable to reopen channeldb")
s2, err := initSwitchWithDB(testStartingHeight, cdb2)
if err != nil {
t.Fatalf("unable reinit switch: %v", err)
}
require.NoError(t, err, "unable reinit switch")
if err := s2.Start(); err != nil {
t.Fatalf("unable to restart switch: %v", err)
}
@ -1143,30 +1057,20 @@ func TestSwitchForwardCircuitPersistence(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
tempPath, err := ioutil.TempDir("", "circuitdb")
if err != nil {
t.Fatalf("unable to temporary path: %v", err)
}
require.NoError(t, err, "unable to temporary path")
cdb, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to open channeldb: %v", err)
}
require.NoError(t, err, "unable to open channeldb")
s, err := initSwitchWithDB(testStartingHeight, cdb)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -1240,14 +1144,10 @@ func TestSwitchForwardCircuitPersistence(t *testing.T) {
}
cdb2, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to reopen channeldb: %v", err)
}
require.NoError(t, err, "unable to reopen channeldb")
s2, err := initSwitchWithDB(testStartingHeight, cdb2)
if err != nil {
t.Fatalf("unable reinit switch: %v", err)
}
require.NoError(t, err, "unable reinit switch")
if err := s2.Start(); err != nil {
t.Fatalf("unable to restart switch: %v", err)
}
@ -1333,14 +1233,10 @@ func TestSwitchForwardCircuitPersistence(t *testing.T) {
}
cdb3, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to reopen channeldb: %v", err)
}
require.NoError(t, err, "unable to reopen channeldb")
s3, err := initSwitchWithDB(testStartingHeight, cdb3)
if err != nil {
t.Fatalf("unable reinit switch: %v", err)
}
require.NoError(t, err, "unable reinit switch")
if err := s3.Start(); err != nil {
t.Fatalf("unable to restart switch: %v", err)
}
@ -1617,20 +1513,14 @@ func testSkipIneligibleLinksMultiHopForward(t *testing.T,
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -1747,14 +1637,10 @@ func testSkipLinkLocalForward(t *testing.T, eligible bool,
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -1773,9 +1659,7 @@ func testSkipLinkLocalForward(t *testing.T, eligible bool,
}
preimage, err := genPreimage()
if err != nil {
t.Fatalf("unable to generate preimage: %v", err)
}
require.NoError(t, err, "unable to generate preimage")
rhash := sha256.Sum256(preimage[:])
addMsg := &lnwire.UpdateAddHTLC{
PaymentHash: rhash,
@ -1803,20 +1687,14 @@ func TestSwitchCancel(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -1840,9 +1718,7 @@ func TestSwitchCancel(t *testing.T) {
// Create request which should be forwarder from alice channel link
// to bob channel link.
preimage, err := genPreimage()
if err != nil {
t.Fatalf("unable to generate preimage: %v", err)
}
require.NoError(t, err, "unable to generate preimage")
rhash := sha256.Sum256(preimage[:])
request := &htlcPacket{
incomingChanID: aliceChannelLink.ShortChanID(),
@ -1920,20 +1796,14 @@ func TestSwitchAddSamePayment(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -1955,9 +1825,7 @@ func TestSwitchAddSamePayment(t *testing.T) {
// Create request which should be forwarder from alice channel link
// to bob channel link.
preimage, err := genPreimage()
if err != nil {
t.Fatalf("unable to generate preimage: %v", err)
}
require.NoError(t, err, "unable to generate preimage")
rhash := sha256.Sum256(preimage[:])
request := &htlcPacket{
incomingChanID: aliceChannelLink.ShortChanID(),
@ -2083,14 +1951,10 @@ func TestSwitchSendPayment(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -2108,9 +1972,7 @@ func TestSwitchSendPayment(t *testing.T) {
// Create request which should be forwarder from alice channel link
// to bob channel link.
preimage, err := genPreimage()
if err != nil {
t.Fatalf("unable to generate preimage: %v", err)
}
require.NoError(t, err, "unable to generate preimage")
rhash := sha256.Sum256(preimage[:])
update := &lnwire.UpdateAddHTLC{
PaymentHash: rhash,
@ -2183,9 +2045,7 @@ func TestSwitchSendPayment(t *testing.T) {
obfuscator := NewMockObfuscator()
failure := lnwire.NewFailIncorrectDetails(update.Amount, 100)
reason, err := obfuscator.EncryptFirstHop(failure)
if err != nil {
t.Fatalf("unable obfuscate failure: %v", err)
}
require.NoError(t, err, "unable obfuscate failure")
if s.IsForwardedHTLC(aliceChannelLink.ShortChanID(), update.ID) {
t.Fatal("htlc should be identified as not forwarded")
@ -2224,9 +2084,7 @@ func TestLocalPaymentNoForwardingEvents(t *testing.T) {
channels, cleanUp, _, err := createClusterChannels(
btcutil.SatoshiPerBitcoin*3,
btcutil.SatoshiPerBitcoin*5)
if err != nil {
t.Fatalf("unable to create channel: %v", err)
}
require.NoError(t, err, "unable to create channel")
defer cleanUp()
n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
@ -2250,9 +2108,7 @@ func TestLocalPaymentNoForwardingEvents(t *testing.T) {
n.aliceServer, receiver, firstHop, hops, amount, htlcAmt,
totalTimelock,
).Wait(30 * time.Second)
if err != nil {
t.Fatalf("unable to make the payment: %v", err)
}
require.NoError(t, err, "unable to make the payment")
// At this point, we'll forcibly stop the three hop network. Doing
// this will cause any pending forwarding events to be flushed by the
@ -2286,9 +2142,7 @@ func TestMultiHopPaymentForwardingEvents(t *testing.T) {
channels, cleanUp, _, err := createClusterChannels(
btcutil.SatoshiPerBitcoin*3,
btcutil.SatoshiPerBitcoin*5)
if err != nil {
t.Fatalf("unable to create channel: %v", err)
}
require.NoError(t, err, "unable to create channel")
defer cleanUp()
n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
@ -2443,9 +2297,7 @@ func TestUpdateFailMalformedHTLCErrorConversion(t *testing.T) {
channels, cleanUp, _, err := createClusterChannels(
btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5,
)
if err != nil {
t.Fatalf("unable to create channel: %v", err)
}
require.NoError(t, err, "unable to create channel")
defer cleanUp()
n := newThreeHopNetwork(
@ -2518,9 +2370,7 @@ func TestSwitchGetPaymentResult(t *testing.T) {
preimg[0] = 3
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -2549,9 +2399,7 @@ func TestSwitchGetPaymentResult(t *testing.T) {
resultChan, err := s.GetPaymentResult(
paymentID, lntypes.Hash{}, newMockDeobfuscator(),
)
if err != nil {
t.Fatalf("unable to get payment result: %v", err)
}
require.NoError(t, err, "unable to get payment result")
// Add the result to the store.
n := &networkResult{
@ -2563,9 +2411,7 @@ func TestSwitchGetPaymentResult(t *testing.T) {
}
err = s.networkResults.storeResult(paymentID, n)
if err != nil {
t.Fatalf("unable to store result: %v", err)
}
require.NoError(t, err, "unable to store result")
// The result should be available.
select {
@ -2594,9 +2440,7 @@ func TestSwitchGetPaymentResult(t *testing.T) {
resultChan, err = s.GetPaymentResult(
paymentID, lntypes.Hash{}, newMockDeobfuscator(),
)
if err != nil {
t.Fatalf("unable to get payment result: %v", err)
}
require.NoError(t, err, "unable to get payment result")
select {
case res, ok := <-resultChan:
@ -2626,14 +2470,10 @@ func TestInvalidFailure(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
s, err := initSwitchWithDB(testStartingHeight, nil)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -2651,9 +2491,7 @@ func TestInvalidFailure(t *testing.T) {
// Create a request which should be forwarded to the mock channel link.
preimage, err := genPreimage()
if err != nil {
t.Fatalf("unable to generate preimage: %v", err)
}
require.NoError(t, err, "unable to generate preimage")
rhash := sha256.Sum256(preimage[:])
update := &lnwire.UpdateAddHTLC{
PaymentHash: rhash,
@ -2666,9 +2504,7 @@ func TestInvalidFailure(t *testing.T) {
err = s.SendHTLC(
aliceChannelLink.ShortChanID(), paymentID, update,
)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
require.NoError(t, err, "unable to send payment")
// Catch the packet and complete the circuit so that the switch is ready
// for a response.
@ -2860,9 +2696,7 @@ func testHtcNotifier(t *testing.T, testOpts []serverOption, iterations int,
channels, cleanUp, _, err := createClusterChannels(
btcutil.SatoshiPerBitcoin*3,
btcutil.SatoshiPerBitcoin*5)
if err != nil {
t.Fatalf("unable to create channel: %v", err)
}
require.NoError(t, err, "unable to create channel")
defer cleanUp()
// Mock time so that all events are reported with a static timestamp.
@ -3009,9 +2843,7 @@ func (n *threeHopNetwork) sendThreeHopPayment(t *testing.T) (*lnwire.UpdateAddHT
}
err = n.carolServer.registry.AddInvoice(*invoice, htlc.PaymentHash)
if err != nil {
t.Fatalf("unable to add invoice in carol registry: %v", err)
}
require.NoError(t, err, "unable to add invoice in carol registry")
if err := n.aliceServer.htlcSwitch.SendHTLC(
n.firstBobChannelLink.ShortChanID(), pid, htlc,
@ -3219,30 +3051,20 @@ func TestSwitchHoldForward(t *testing.T) {
alicePeer, err := newMockServer(
t, "alice", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobPeer, err := newMockServer(
t, "bob", testStartingHeight, nil, testDefaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
tempPath, err := ioutil.TempDir("", "circuitdb")
if err != nil {
t.Fatalf("unable to temporary path: %v", err)
}
require.NoError(t, err, "unable to temporary path")
cdb, err := channeldb.Open(tempPath)
if err != nil {
t.Fatalf("unable to open channeldb: %v", err)
}
require.NoError(t, err, "unable to open channeldb")
s, err := initSwitchWithDB(testStartingHeight, cdb)
if err != nil {
t.Fatalf("unable to init switch: %v", err)
}
require.NoError(t, err, "unable to init switch")
if err := s.Start(); err != nil {
t.Fatalf("unable to start switch: %v", err)
}
@ -3320,9 +3142,7 @@ func TestSwitchHoldForward(t *testing.T) {
packet.incomingTimeout = testStartingHeight + cltvRejectDelta - 1
err = switchForwardInterceptor.ForwardPackets(linkQuit, false, packet)
if err != nil {
t.Fatalf("can't forward htlc packet: %v", err)
}
require.NoError(t, err, "can't forward htlc packet")
assertOutgoingLinkReceive(t, bobChannelLink, false)
assertOutgoingLinkReceiveIntercepted(t, aliceChannelLink)
assertNumCircuits(t, s, 0, 0)
@ -3340,9 +3160,7 @@ func TestSwitchHoldForward(t *testing.T) {
}
err = switchForwardInterceptor.ForwardPackets(linkQuit, false, packet)
if err != nil {
t.Fatalf("can't forward htlc packet: %v", err)
}
require.NoError(t, err, "can't forward htlc packet")
receivedPkt := assertOutgoingLinkReceive(t, bobChannelLink, true)
assertNumCircuits(t, s, 1, 1)

View file

@ -39,6 +39,7 @@ import (
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/shachain"
"github.com/lightningnetwork/lnd/ticker"
"github.com/stretchr/testify/require"
)
var (
@ -966,21 +967,15 @@ func newThreeHopNetwork(t testing.TB, aliceChannel, firstBobChannel,
aliceServer, err := newMockServer(
t, "alice", startingHeight, aliceDb, hopNetwork.defaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobServer, err := newMockServer(
t, "bob", startingHeight, bobDb, hopNetwork.defaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
carolServer, err := newMockServer(
t, "carol", startingHeight, carolDb, hopNetwork.defaultDelta,
)
if err != nil {
t.Fatalf("unable to create carol server: %v", err)
}
require.NoError(t, err, "unable to create carol server")
// Apply all additional functional options to the servers before
// creating any links.
@ -1231,15 +1226,11 @@ func newTwoHopNetwork(t testing.TB,
aliceServer, err := newMockServer(
t, "alice", startingHeight, aliceDb, hopNetwork.defaultDelta,
)
if err != nil {
t.Fatalf("unable to create alice server: %v", err)
}
require.NoError(t, err, "unable to create alice server")
bobServer, err := newMockServer(
t, "bob", startingHeight, bobDb, hopNetwork.defaultDelta,
)
if err != nil {
t.Fatalf("unable to create bob server: %v", err)
}
require.NoError(t, err, "unable to create bob server")
// Create mock decoder instead of sphinx one in order to mock the route
// which htlc should follow.

View file

@ -28,9 +28,7 @@ func assertEngineExecution(t *testing.T, testNum int, valid bool,
// Get a new VM to execute.
vm, err := newEngine()
if err != nil {
t.Fatalf("unable to create engine: %v", err)
}
require.NoError(t, err, "unable to create engine")
// Execute the VM, only go on to the step-by-step execution if
// it doesn't validate as expected.
@ -42,9 +40,7 @@ func assertEngineExecution(t *testing.T, testNum int, valid bool,
// Now that the execution didn't match what we expected, fetch a new VM
// to step through.
vm, err = newEngine()
if err != nil {
t.Fatalf("unable to create engine: %v", err)
}
require.NoError(t, err, "unable to create engine")
// This buffer will trace execution of the Script, dumping out
// to stdout.
@ -178,9 +174,7 @@ func TestHTLCSenderSpendValidation(t *testing.T) {
// doesn't need to exist, as we'll only be validating spending from the
// transaction that references this.
txid, err := chainhash.NewHash(testHdSeed.CloneBytes())
if err != nil {
t.Fatalf("unable to create txid: %v", err)
}
require.NoError(t, err, "unable to create txid")
fundingOut := &wire.OutPoint{
Hash: *txid,
Index: 50,
@ -580,9 +574,7 @@ func TestHTLCReceiverSpendValidation(t *testing.T) {
// doesn't need to exist, as we'll only be validating spending from the
// transaction that references this.
txid, err := chainhash.NewHash(testHdSeed.CloneBytes())
if err != nil {
t.Fatalf("unable to create txid: %v", err)
}
require.NoError(t, err, "unable to create txid")
fundingOut := &wire.OutPoint{
Hash: *txid,
Index: 50,
@ -1009,9 +1001,7 @@ func TestSecondLevelHtlcSpends(t *testing.T) {
// Next, craft a fake HTLC outpoint that we'll use to generate the
// sweeping transaction using.
txid, err := chainhash.NewHash(testHdSeed.CloneBytes())
if err != nil {
t.Fatalf("unable to create txid: %v", err)
}
require.NoError(t, err, "unable to create txid")
htlcOutPoint := &wire.OutPoint{
Hash: *txid,
Index: 0,
@ -1039,13 +1029,9 @@ func TestSecondLevelHtlcSpends(t *testing.T) {
// sweep the output after a particular delay.
htlcWitnessScript, err := SecondLevelHtlcScript(revocationKey,
delayKey, claimDelay)
if err != nil {
t.Fatalf("unable to create htlc script: %v", err)
}
require.NoError(t, err, "unable to create htlc script")
htlcPkScript, err := WitnessScriptHash(htlcWitnessScript)
if err != nil {
t.Fatalf("unable to create htlc output: %v", err)
}
require.NoError(t, err, "unable to create htlc output")
htlcOutput := &wire.TxOut{
PkScript: htlcPkScript,
@ -1644,21 +1630,15 @@ func TestCommitSpendToRemoteConfirmed(t *testing.T) {
aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(testWalletPrivKey)
txid, err := chainhash.NewHash(testHdSeed.CloneBytes())
if err != nil {
t.Fatalf("unable to create txid: %v", err)
}
require.NoError(t, err, "unable to create txid")
commitOut := &wire.OutPoint{
Hash: *txid,
Index: 0,
}
commitScript, err := CommitScriptToRemoteConfirmed(aliceKeyPub)
if err != nil {
t.Fatalf("unable to create htlc script: %v", err)
}
require.NoError(t, err, "unable to create htlc script")
commitPkScript, err := WitnessScriptHash(commitScript)
if err != nil {
t.Fatalf("unable to create htlc output: %v", err)
}
require.NoError(t, err, "unable to create htlc output")
commitOutput := &wire.TxOut{
PkScript: commitPkScript,
@ -1902,9 +1882,7 @@ func TestSpendAnchor(t *testing.T) {
// Create a fake anchor outpoint that we'll use to generate the
// sweeping transaction.
txid, err := chainhash.NewHash(testHdSeed.CloneBytes())
if err != nil {
t.Fatalf("unable to create txid: %v", err)
}
require.NoError(t, err, "unable to create txid")
anchorOutPoint := &wire.OutPoint{
Hash: *txid,
Index: 0,
@ -1922,13 +1900,9 @@ func TestSpendAnchor(t *testing.T) {
// Generate the anchor script that can be spent by Alice immediately,
// or by anyone after 16 blocks.
anchorScript, err := CommitScriptAnchor(aliceKeyPub)
if err != nil {
t.Fatalf("unable to create htlc script: %v", err)
}
require.NoError(t, err, "unable to create htlc script")
anchorPkScript, err := WitnessScriptHash(anchorScript)
if err != nil {
t.Fatalf("unable to create htlc output: %v", err)
}
require.NoError(t, err, "unable to create htlc output")
anchorOutput := &wire.TxOut{
PkScript: anchorPkScript,
@ -2011,21 +1985,13 @@ func TestSpecificationKeyDerivation(t *testing.T) {
)
baseSecret, err := privkeyFromHex(baseSecretHex)
if err != nil {
t.Fatalf("Failed to parse serialized privkey: %v", err)
}
require.NoError(t, err, "Failed to parse serialized privkey")
perCommitmentSecret, err := privkeyFromHex(perCommitmentSecretHex)
if err != nil {
t.Fatalf("Failed to parse serialized privkey: %v", err)
}
require.NoError(t, err, "Failed to parse serialized privkey")
basePoint, err := pubkeyFromHex(basePointHex)
if err != nil {
t.Fatalf("Failed to parse serialized pubkey: %v", err)
}
require.NoError(t, err, "Failed to parse serialized pubkey")
perCommitmentPoint, err := pubkeyFromHex(perCommitmentPointHex)
if err != nil {
t.Fatalf("Failed to parse serialized pubkey: %v", err)
}
require.NoError(t, err, "Failed to parse serialized pubkey")
// name: derivation of key from basepoint and per_commitment_point
const expectedLocalKeyHex = "0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5"

View file

@ -56,42 +56,26 @@ func TestTxWeightEstimator(t *testing.T) {
p2pkhAddr, err := btcutil.NewAddressPubKeyHash(
make([]byte, 20), netParams)
if err != nil {
t.Fatalf("Failed to generate address: %v", err)
}
require.NoError(t, err, "Failed to generate address")
p2pkhScript, err := txscript.PayToAddrScript(p2pkhAddr)
if err != nil {
t.Fatalf("Failed to generate scriptPubKey: %v", err)
}
require.NoError(t, err, "Failed to generate scriptPubKey")
p2wkhAddr, err := btcutil.NewAddressWitnessPubKeyHash(
make([]byte, 20), netParams)
if err != nil {
t.Fatalf("Failed to generate address: %v", err)
}
require.NoError(t, err, "Failed to generate address")
p2wkhScript, err := txscript.PayToAddrScript(p2wkhAddr)
if err != nil {
t.Fatalf("Failed to generate scriptPubKey: %v", err)
}
require.NoError(t, err, "Failed to generate scriptPubKey")
p2wshAddr, err := btcutil.NewAddressWitnessScriptHash(
make([]byte, 32), netParams)
if err != nil {
t.Fatalf("Failed to generate address: %v", err)
}
require.NoError(t, err, "Failed to generate address")
p2wshScript, err := txscript.PayToAddrScript(p2wshAddr)
if err != nil {
t.Fatalf("Failed to generate scriptPubKey: %v", err)
}
require.NoError(t, err, "Failed to generate scriptPubKey")
p2shAddr, err := btcutil.NewAddressScriptHash([]byte{0}, netParams)
if err != nil {
t.Fatalf("Failed to generate address: %v", err)
}
require.NoError(t, err, "Failed to generate address")
p2shScript, err := txscript.PayToAddrScript(p2shAddr)
if err != nil {
t.Fatalf("Failed to generate scriptPubKey: %v", err)
}
require.NoError(t, err, "Failed to generate scriptPubKey")
testCases := []struct {
numP2PKHInputs int

View file

@ -9,6 +9,7 @@ import (
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/clock"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
// invoiceExpiryWatcherTest holds a test fixture and implements checks
@ -72,9 +73,7 @@ func newInvoiceExpiryWatcherTest(t *testing.T, now time.Time,
return nil
})
if err != nil {
t.Fatalf("cannot start InvoiceExpiryWatcher: %v", err)
}
require.NoError(t, err, "cannot start InvoiceExpiryWatcher")
return test
}

View file

@ -132,9 +132,7 @@ func TestSettleInvoice(t *testing.T) {
testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight,
getCircuitKey(0), hodlChan, testPayload,
)
if err != nil {
t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err)
}
require.NoError(t, err, "unexpected NotifyExitHopHtlc error")
require.NotNil(t, resolution)
settleResolution = checkSettleResolution(
t, resolution, testInvoicePreimage,
@ -148,9 +146,7 @@ func TestSettleInvoice(t *testing.T) {
testInvoicePaymentHash, amtPaid+600, testHtlcExpiry, testCurrentHeight,
getCircuitKey(1), hodlChan, testPayload,
)
if err != nil {
t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err)
}
require.NoError(t, err, "unexpected NotifyExitHopHtlc error")
require.NotNil(t, resolution)
settleResolution = checkSettleResolution(
t, resolution, testInvoicePreimage,
@ -163,9 +159,7 @@ func TestSettleInvoice(t *testing.T) {
testInvoicePaymentHash, amtPaid-600, testHtlcExpiry, testCurrentHeight,
getCircuitKey(2), hodlChan, testPayload,
)
if err != nil {
t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err)
}
require.NoError(t, err, "unexpected NotifyExitHopHtlc error")
require.NotNil(t, resolution)
checkFailResolution(t, resolution, ResultAmountTooLow)

View file

@ -272,15 +272,11 @@ func newTestInvoice(t *testing.T, preimage lntypes.Preimage,
zpay32.Expiry(expiry),
zpay32.PaymentAddr(payAddr),
)
if err != nil {
t.Fatalf("Error while creating new invoice: %v", err)
}
require.NoError(t, err, "Error while creating new invoice")
paymentRequest, err := rawInvoice.Encode(testMessageSigner)
if err != nil {
t.Fatalf("Error while encoding payment request: %v", err)
}
require.NoError(t, err, "Error while encoding payment request")
return &channeldb.Invoice{
Terms: channeldb.ContractTerm{

View file

@ -11,9 +11,7 @@ func BenchmarkDerivePrivKey(t *testing.B) {
cleanUp, wallet, err := createTestBtcWallet(
CoinTypeBitcoin,
)
if err != nil {
t.Fatalf("unable to create wallet: %v", err)
}
require.NoError(t, err, "unable to create wallet")
keyRing := NewBtcWalletKeyRing(wallet, CoinTypeBitcoin)

View file

@ -202,9 +202,7 @@ func testLNAddress(t *testing.T, test lnAddressCase) {
lnAddr, err := ParseLNAddressString(
test.lnAddress, defaultTestPort, net.ResolveTCPAddr,
)
if err != nil {
t.Fatalf("unable to parse ln address: %v", err)
}
require.NoError(t, err, "unable to parse ln address")
// Assert that the public key matches the expected public key.
pkBytes := lnAddr.IdentityKey.SerializeCompressed()

View file

@ -99,9 +99,7 @@ func TestStaticFeeEstimator(t *testing.T) {
defer feeEstimator.Stop()
feeRate, err := feeEstimator.EstimateFeePerKW(6)
if err != nil {
t.Fatalf("unable to get fee rate: %v", err)
}
require.NoError(t, err, "unable to get fee rate")
if feeRate != feePerKw {
t.Fatalf("expected fee rate %v, got %v", feePerKw, feeRate)
@ -130,16 +128,12 @@ func TestSparseConfFeeSource(t *testing.T) {
}
testJSON := map[string]map[uint32]uint32{"fee_by_block_target": testFees}
jsonResp, err := json.Marshal(testJSON)
if err != nil {
t.Fatalf("unable to marshal JSON API response: %v", err)
}
require.NoError(t, err, "unable to marshal JSON API response")
reader := bytes.NewReader(jsonResp)
// Finally, ensure the expected map is returned without error.
fees, err := feeSource.ParseResponse(reader)
if err != nil {
t.Fatalf("unable to parse API response: %v", err)
}
require.NoError(t, err, "unable to parse API response")
if !reflect.DeepEqual(fees, testFees) {
t.Fatalf("expected %v, got %v", testFees, fees)
}
@ -148,9 +142,7 @@ func TestSparseConfFeeSource(t *testing.T) {
badFees := map[string]uint32{"hi": 12345, "hello": 42, "satoshi": 54321}
badJSON := map[string]map[string]uint32{"fee_by_block_target": badFees}
jsonResp, err = json.Marshal(badJSON)
if err != nil {
t.Fatalf("unable to marshal JSON API response: %v", err)
}
require.NoError(t, err, "unable to marshal JSON API response")
reader = bytes.NewReader(jsonResp)
// Finally, ensure the improperly formatted fees error.

View file

@ -5,6 +5,7 @@ import (
"testing"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
// randDeliveryAddress generates a random delivery address for testing.
@ -13,9 +14,7 @@ func randDeliveryAddress(t *testing.T) lnwire.DeliveryAddress {
da := lnwire.DeliveryAddress(make([]byte, 34))
_, err := rand.Read(da)
if err != nil {
t.Fatalf("cannot generate random address: %v", err)
}
require.NoError(t, err, "cannot generate random address")
return da
}

View file

@ -38,9 +38,7 @@ func TestPsbtIntent(t *testing.T) {
// the funding intent.
a := NewPsbtAssembler(chanCapacity, nil, &params, true)
intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity})
if err != nil {
t.Fatalf("error provisioning channel: %v", err)
}
require.NoError(t, err, "error provisioning channel")
psbtIntent, ok := intent.(*PsbtIntent)
if !ok {
t.Fatalf("intent was not a PsbtIntent")
@ -69,20 +67,14 @@ func TestPsbtIntent(t *testing.T) {
localPubkey.SerializeCompressed(),
remotePubkey.SerializeCompressed(), int64(chanCapacity),
)
if err != nil {
t.Fatalf("error calculating script: %v", err)
}
require.NoError(t, err, "error calculating script")
witnessScriptHash := sha256.Sum256(script)
addr, err := btcutil.NewAddressWitnessScriptHash(
witnessScriptHash[:], &params,
)
if err != nil {
t.Fatalf("unable to encode address: %v", err)
}
require.NoError(t, err, "unable to encode address")
fundingAddr, amt, pendingPsbt, err := psbtIntent.FundingParams()
if err != nil {
t.Fatalf("unable to get funding params: %v", err)
}
require.NoError(t, err, "unable to get funding params")
if addr.EncodeAddress() != fundingAddr.EncodeAddress() {
t.Fatalf("unexpected address. got %s wanted %s", fundingAddr,
addr)
@ -120,9 +112,7 @@ func TestPsbtIntent(t *testing.T) {
// Verify the dummy PSBT with the intent.
err = psbtIntent.Verify(pendingPsbt, false)
if err != nil {
t.Fatalf("error verifying pending PSBT: %v", err)
}
require.NoError(t, err, "error verifying pending PSBT")
if psbtIntent.State != PsbtVerified {
t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State,
PsbtVerified)
@ -154,16 +144,12 @@ func TestPsbtIntent(t *testing.T) {
}
}()
err = psbtIntent.Finalize(pendingPsbt)
if err != nil {
t.Fatalf("error finalizing pending PSBT: %v", err)
}
require.NoError(t, err, "error finalizing pending PSBT")
wg.Wait()
// We should have a nil error in our channel now.
err = <-errChan
if err != nil {
t.Fatalf("unexpected error after finalize: %v", err)
}
require.NoError(t, err, "unexpected error after finalize")
if psbtIntent.State != PsbtFinalized {
t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State,
PsbtFinalized)
@ -171,9 +157,7 @@ func TestPsbtIntent(t *testing.T) {
// Make sure the funding transaction can be compiled.
_, err = psbtIntent.CompileFundingTx()
if err != nil {
t.Fatalf("error compiling funding TX from PSBT: %v", err)
}
require.NoError(t, err, "error compiling funding TX from PSBT")
if psbtIntent.State != PsbtFundingTxCompiled {
t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State,
PsbtFundingTxCompiled)
@ -204,24 +188,18 @@ func TestPsbtIntentBasePsbt(t *testing.T) {
localPubkey.SerializeCompressed(),
remotePubkey.SerializeCompressed(), int64(chanCapacity),
)
if err != nil {
t.Fatalf("error calculating script: %v", err)
}
require.NoError(t, err, "error calculating script")
witnessScriptHash := sha256.Sum256(script)
addr, err := btcutil.NewAddressWitnessScriptHash(
witnessScriptHash[:], &params,
)
if err != nil {
t.Fatalf("unable to encode address: %v", err)
}
require.NoError(t, err, "unable to encode address")
// Now as the next step, create a new assembler/intent pair with a base
// PSBT to see that we can add an additional output to it.
a := NewPsbtAssembler(chanCapacity, pendingPsbt, &params, true)
intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity})
if err != nil {
t.Fatalf("error provisioning channel: %v", err)
}
require.NoError(t, err, "error provisioning channel")
psbtIntent, ok := intent.(*PsbtIntent)
if !ok {
t.Fatalf("intent was not a PsbtIntent")
@ -230,9 +208,7 @@ func TestPsbtIntentBasePsbt(t *testing.T) {
&keychain.KeyDescriptor{PubKey: localPubkey}, remotePubkey,
)
newAddr, amt, twoOutPsbt, err := psbtIntent.FundingParams()
if err != nil {
t.Fatalf("unable to get funding params: %v", err)
}
require.NoError(t, err, "unable to get funding params")
if addr.EncodeAddress() != newAddr.EncodeAddress() {
t.Fatalf("unexpected address. got %s wanted %s", newAddr,
addr)
@ -468,9 +444,7 @@ func TestPsbtVerify(t *testing.T) {
// the funding intent.
a := NewPsbtAssembler(chanCapacity, nil, &params, true)
intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity})
if err != nil {
t.Fatalf("error provisioning channel: %v", err)
}
require.NoError(t, err, "error provisioning channel")
psbtIntent := intent.(*PsbtIntent)
// Bind our test keys to get the funding parameters.
@ -636,9 +610,7 @@ func TestPsbtFinalize(t *testing.T) {
// the funding intent.
a := NewPsbtAssembler(chanCapacity, nil, &params, true)
intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity})
if err != nil {
t.Fatalf("error provisioning channel: %v", err)
}
require.NoError(t, err, "error provisioning channel")
psbtIntent := intent.(*PsbtIntent)
// Bind our test keys to get the funding parameters.
@ -792,12 +764,8 @@ func TestVerifyAllInputsSegWit(t *testing.T) {
func clonePsbt(t *testing.T, p *psbt.Packet) *psbt.Packet {
var buf bytes.Buffer
err := p.Serialize(&buf)
if err != nil {
t.Fatalf("error serializing PSBT: %v", err)
}
require.NoError(t, err, "error serializing PSBT")
newPacket, err := psbt.NewFromRawBytes(&buf, false)
if err != nil {
t.Fatalf("error unserializing PSBT: %v", err)
}
require.NoError(t, err, "error unserializing PSBT")
return newPacket
}

File diff suppressed because it is too large Load diff

View file

@ -12,6 +12,7 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
var (
@ -155,9 +156,7 @@ func TestValidate(t *testing.T) {
chanSize := int64(1000000)
channelCtx, err := newChannelTestCtx(chanSize)
if err != nil {
t.Fatalf("unable to make channel context: %v", err)
}
require.NoError(t, err, "unable to make channel context")
testCases := []struct {
// expectedErr is the error we expect, this should be nil if

View file

@ -132,9 +132,7 @@ func assertProperBalance(t *testing.T, lw *lnwallet.LightningWallet,
numConfirms int32, amount float64) {
balance, err := lw.ConfirmedBalance(numConfirms, lnwallet.DefaultAccountName)
if err != nil {
t.Fatalf("unable to query for balance: %v", err)
}
require.NoError(t, err, "unable to query for balance")
if balance.ToBTC() != amount {
t.Fatalf("wallet credits not properly loaded, should have 40BTC, "+
"instead have %v", balance)
@ -161,9 +159,7 @@ func mineAndAssertTxInBlock(t *testing.T, miner *rpctest.Harness,
// We'll mined a block to confirm it.
blockHashes, err := miner.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate new block: %v", err)
}
require.NoError(t, err, "unable to generate new block")
// Finally, we'll check it was actually mined in this block.
block, err := miner.Client.GetBlock(blockHashes[0])
@ -188,13 +184,9 @@ func newPkScript(t *testing.T, w *lnwallet.LightningWallet,
t.Helper()
addr, err := w.NewAddress(addrType, false, lnwallet.DefaultAccountName)
if err != nil {
t.Fatalf("unable to create new address: %v", err)
}
require.NoError(t, err, "unable to create new address")
pkScript, err := txscript.PayToAddrScript(addr)
if err != nil {
t.Fatalf("unable to create output script: %v", err)
}
require.NoError(t, err, "unable to create output script")
return pkScript
}
@ -210,9 +202,7 @@ func sendCoins(t *testing.T, miner *rpctest.Harness,
tx, err := sender.SendOutputs(
[]*wire.TxOut{output}, feeRate, minConf, labels.External,
)
if err != nil {
t.Fatalf("unable to send transaction: %v", err)
}
require.NoError(t, err, "unable to send transaction")
if mineBlock {
mineAndAssertTxInBlock(t, miner, tx.TxHash())
@ -239,9 +229,7 @@ func assertTxInWallet(t *testing.T, w *lnwallet.LightningWallet,
// finding the expected transaction with its expected confirmation
// status.
txs, err := w.ListTransactionDetails(0, btcwallet.UnconfirmedHeight, "")
if err != nil {
t.Fatalf("unable to retrieve transactions: %v", err)
}
require.NoError(t, err, "unable to retrieve transactions")
for _, tx := range txs {
if tx.Hash != txHash {
continue
@ -445,9 +433,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
alice, bob *lnwallet.LightningWallet, t *testing.T) {
fundingAmount, err := btcutil.NewAmount(5)
if err != nil {
t.Fatalf("unable to create amt: %v", err)
}
require.NoError(t, err, "unable to create amt")
// In this scenario, we'll test a dual funder reservation, with each
// side putting in 10 BTC.
@ -455,9 +441,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
// Alice initiates a channel funded with 5 BTC for each side, so 10 BTC
// total. She also generates 2 BTC in change.
feePerKw, err := alice.Cfg.FeeEstimator.EstimateFeePerKW(1)
if err != nil {
t.Fatalf("unable to query fee estimator: %v", err)
}
require.NoError(t, err, "unable to query fee estimator")
aliceReq := &lnwallet.InitFundingReserveMsg{
ChainHash: chainHash,
NodeID: bobPub,
@ -470,9 +454,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
Flags: lnwire.FFAnnounceChannel,
}
aliceChanReservation, err := alice.InitChannelReservation(aliceReq)
if err != nil {
t.Fatalf("unable to initialize funding reservation: %v", err)
}
require.NoError(t, err, "unable to initialize funding reservation")
aliceChanReservation.SetNumConfsRequired(numReqConfs)
channelConstraints := &channeldb.ChannelConstraints{
DustLimit: alice.Cfg.DefaultConstraints.DustLimit,
@ -485,9 +467,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
err = aliceChanReservation.CommitConstraints(
channelConstraints, defaultMaxLocalCsvDelay, false,
)
if err != nil {
t.Fatalf("unable to verify constraints: %v", err)
}
require.NoError(t, err, "unable to verify constraints")
// The channel reservation should now be populated with a multi-sig key
// from our HD chain, a change output with 3 BTC, and 2 outputs
@ -515,23 +495,17 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
Flags: lnwire.FFAnnounceChannel,
}
bobChanReservation, err := bob.InitChannelReservation(bobReq)
if err != nil {
t.Fatalf("bob unable to init channel reservation: %v", err)
}
require.NoError(t, err, "bob unable to init channel reservation")
err = bobChanReservation.CommitConstraints(
channelConstraints, defaultMaxLocalCsvDelay, true,
)
if err != nil {
t.Fatalf("unable to verify constraints: %v", err)
}
require.NoError(t, err, "unable to verify constraints")
bobChanReservation.SetNumConfsRequired(numReqConfs)
assertContributionInitPopulated(t, bobChanReservation.OurContribution())
err = bobChanReservation.ProcessContribution(aliceContribution)
if err != nil {
t.Fatalf("bob unable to process alice's contribution: %v", err)
}
require.NoError(t, err, "bob unable to process alice's contribution")
assertContributionInitPopulated(t, bobChanReservation.TheirContribution())
bobContribution := bobChanReservation.OurContribution()
@ -541,9 +515,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
// material required to craft the funding transaction and commitment
// transactions.
err = aliceChanReservation.ProcessContribution(bobContribution)
if err != nil {
t.Fatalf("alice unable to process bob's contribution: %v", err)
}
require.NoError(t, err, "alice unable to process bob's contribution")
assertContributionInitPopulated(t, aliceChanReservation.TheirContribution())
// At this point, all Alice's signatures should be fully populated.
@ -578,9 +550,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
_, err = bobChanReservation.CompleteReservation(
aliceFundingSigs, aliceCommitSig,
)
if err != nil {
t.Fatalf("unable to consume bob's sigs: %v", err)
}
require.NoError(t, err, "unable to consume bob's sigs")
// At this point, the funding tx should have been populated.
fundingTx := aliceChanReservation.FinalFundingTx()
@ -592,9 +562,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
// DB.
fundingSha := fundingTx.TxHash()
aliceChannels, err := alice.Cfg.Database.FetchOpenChannels(bobPub)
if err != nil {
t.Fatalf("unable to retrieve channel from DB: %v", err)
}
require.NoError(t, err, "unable to retrieve channel from DB")
if !bytes.Equal(aliceChannels[0].FundingOutpoint.Hash[:], fundingSha[:]) {
t.Fatalf("channel state not properly saved")
}
@ -602,9 +570,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
t.Fatalf("channel not detected as dual funder")
}
bobChannels, err := bob.Cfg.Database.FetchOpenChannels(alicePub)
if err != nil {
t.Fatalf("unable to retrieve channel from DB: %v", err)
}
require.NoError(t, err, "unable to retrieve channel from DB")
if !bytes.Equal(bobChannels[0].FundingOutpoint.Hash[:], fundingSha[:]) {
t.Fatalf("channel state not properly saved")
}
@ -614,24 +580,16 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
// Let Alice publish the funding transaction.
err = alice.PublishTransaction(fundingTx, "")
if err != nil {
t.Fatalf("unable to publish funding tx: %v", err)
}
require.NoError(t, err, "unable to publish funding tx")
// Mine a single block, the funding transaction should be included
// within this block.
err = waitForMempoolTx(miner, &fundingSha)
if err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
require.NoError(t, err, "tx not relayed to miner")
blockHashes, err := miner.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
block, err := miner.Client.GetBlock(blockHashes[0])
if err != nil {
t.Fatalf("unable to find block: %v", err)
}
require.NoError(t, err, "unable to find block")
if len(block.Transactions) != 2 {
t.Fatalf("funding transaction wasn't mined: %v", err)
}
@ -645,13 +603,9 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
// Wait for wallets to catch up to prevent issues in subsequent tests.
err = waitForWalletSync(miner, alice)
if err != nil {
t.Fatalf("unable to sync alice: %v", err)
}
require.NoError(t, err, "unable to sync alice")
err = waitForWalletSync(miner, bob)
if err != nil {
t.Fatalf("unable to sync bob: %v", err)
}
require.NoError(t, err, "unable to sync bob")
}
func testFundingTransactionLockedOutputs(miner *rpctest.Harness,
@ -659,13 +613,9 @@ func testFundingTransactionLockedOutputs(miner *rpctest.Harness,
// Create a single channel asking for 16 BTC total.
fundingAmount, err := btcutil.NewAmount(8)
if err != nil {
t.Fatalf("unable to create amt: %v", err)
}
require.NoError(t, err, "unable to create amt")
feePerKw, err := alice.Cfg.FeeEstimator.EstimateFeePerKW(1)
if err != nil {
t.Fatalf("unable to query fee estimator: %v", err)
}
require.NoError(t, err, "unable to query fee estimator")
req := &lnwallet.InitFundingReserveMsg{
ChainHash: chainHash,
NodeID: bobPub,
@ -686,9 +636,7 @@ func testFundingTransactionLockedOutputs(miner *rpctest.Harness,
// requesting 900 BTC. We only have around 64BTC worth of outpoints
// that aren't locked, so this should fail.
amt, err := btcutil.NewAmount(900)
if err != nil {
t.Fatalf("unable to create amt: %v", err)
}
require.NoError(t, err, "unable to create amt")
failedReq := &lnwallet.InitFundingReserveMsg{
ChainHash: chainHash,
NodeID: bobPub,
@ -717,15 +665,11 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness,
alice, _ *lnwallet.LightningWallet, t *testing.T) {
feePerKw, err := alice.Cfg.FeeEstimator.EstimateFeePerKW(1)
if err != nil {
t.Fatalf("unable to query fee estimator: %v", err)
}
require.NoError(t, err, "unable to query fee estimator")
// Create a reservation for 44 BTC.
fundingAmount, err := btcutil.NewAmount(44)
if err != nil {
t.Fatalf("unable to create amt: %v", err)
}
require.NoError(t, err, "unable to create amt")
req := &lnwallet.InitFundingReserveMsg{
ChainHash: chainHash,
NodeID: bobPub,
@ -739,9 +683,7 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness,
PendingChanID: [32]byte{2, 3, 4, 5},
}
chanReservation, err := alice.InitChannelReservation(req)
if err != nil {
t.Fatalf("unable to initialize funding reservation: %v", err)
}
require.NoError(t, err, "unable to initialize funding reservation")
// Attempt to create another channel with 44 BTC, this should fail.
req.PendingChanID = [32]byte{3, 4, 5, 6}
@ -784,9 +726,7 @@ func testCancelNonExistentReservation(miner *rpctest.Harness,
alice, _ *lnwallet.LightningWallet, t *testing.T) {
feePerKw, err := alice.Cfg.FeeEstimator.EstimateFeePerKW(1)
if err != nil {
t.Fatalf("unable to query fee estimator: %v", err)
}
require.NoError(t, err, "unable to query fee estimator")
// Create our own reservation, give it some ID.
res, err := lnwallet.NewChannelReservation(
@ -794,9 +734,7 @@ func testCancelNonExistentReservation(miner *rpctest.Harness,
lnwire.FFAnnounceChannel, lnwallet.CommitmentTypeTweakless,
nil, [32]byte{}, 0,
)
if err != nil {
t.Fatalf("unable to create res: %v", err)
}
require.NoError(t, err, "unable to create res")
// Attempt to cancel this reservation. This should fail, we know
// nothing of it.
@ -813,9 +751,7 @@ func testReservationInitiatorBalanceBelowDustCancel(miner *rpctest.Harness,
// and result in a failure to create the reservation.
const numBTC = 4
fundingAmount, err := btcutil.NewAmount(numBTC)
if err != nil {
t.Fatalf("unable to create amt: %v", err)
}
require.NoError(t, err, "unable to create amt")
feePerKw := chainfee.SatPerKWeight(
numBTC * numBTC * btcutil.SatoshiPerBitcoin,
@ -897,14 +833,10 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
// funded solely by us. We'll also initially push 1 BTC of the channel
// towards Bob's side.
fundingAmt, err := btcutil.NewAmount(4)
if err != nil {
t.Fatalf("unable to create amt: %v", err)
}
require.NoError(t, err, "unable to create amt")
pushAmt := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin)
feePerKw, err := alice.Cfg.FeeEstimator.EstimateFeePerKW(1)
if err != nil {
t.Fatalf("unable to query fee estimator: %v", err)
}
require.NoError(t, err, "unable to query fee estimator")
aliceReq := &lnwallet.InitFundingReserveMsg{
ChainHash: chainHash,
PendingChanID: pendingChanID,
@ -920,9 +852,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
ChanFunder: aliceChanFunder,
}
aliceChanReservation, err := alice.InitChannelReservation(aliceReq)
if err != nil {
t.Fatalf("unable to init channel reservation: %v", err)
}
require.NoError(t, err, "unable to init channel reservation")
aliceChanReservation.SetNumConfsRequired(numReqConfs)
channelConstraints := &channeldb.ChannelConstraints{
DustLimit: alice.Cfg.DefaultConstraints.DustLimit,
@ -935,9 +865,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
err = aliceChanReservation.CommitConstraints(
channelConstraints, defaultMaxLocalCsvDelay, false,
)
if err != nil {
t.Fatalf("unable to verify constraints: %v", err)
}
require.NoError(t, err, "unable to verify constraints")
// Verify all contribution fields have been set properly, but only if
// Alice is the funder herself.
@ -972,15 +900,11 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
CommitType: commitType,
}
bobChanReservation, err := bob.InitChannelReservation(bobReq)
if err != nil {
t.Fatalf("unable to create bob reservation: %v", err)
}
require.NoError(t, err, "unable to create bob reservation")
err = bobChanReservation.CommitConstraints(
channelConstraints, defaultMaxLocalCsvDelay, true,
)
if err != nil {
t.Fatalf("unable to verify constraints: %v", err)
}
require.NoError(t, err, "unable to verify constraints")
bobChanReservation.SetNumConfsRequired(numReqConfs)
// We'll ensure that Bob's contribution also gets generated properly.
@ -990,9 +914,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
// With his contribution generated, he can now process Alice's
// contribution.
err = bobChanReservation.ProcessSingleContribution(aliceContribution)
if err != nil {
t.Fatalf("bob unable to process alice's contribution: %v", err)
}
require.NoError(t, err, "bob unable to process alice's contribution")
assertContributionInitPopulated(t, bobChanReservation.TheirContribution())
// Bob will next send over his contribution to Alice, we simulate this
@ -1042,9 +964,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
_, err = bobChanReservation.CompleteReservationSingle(
fundingPoint, aliceCommitSig,
)
if err != nil {
t.Fatalf("bob unable to consume single reservation: %v", err)
}
require.NoError(t, err, "bob unable to consume single reservation")
// Finally, we'll conclude the reservation process by sending over
// Bob's commitment signature, which is the final thing Alice needs to
@ -1056,9 +976,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
_, err = aliceChanReservation.CompleteReservation(
nil, bobCommitSig,
)
if err != nil {
t.Fatalf("alice unable to complete reservation: %v", err)
}
require.NoError(t, err, "alice unable to complete reservation")
// If the caller provided an alternative way to obtain the funding tx,
// then we'll use that. Otherwise, we'll obtain it directly from Alice.
@ -1073,9 +991,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
// DB for both Alice and Bob.
fundingSha := fundingTx.TxHash()
aliceChannels, err := alice.Cfg.Database.FetchOpenChannels(bobPub)
if err != nil {
t.Fatalf("unable to retrieve channel from DB: %v", err)
}
require.NoError(t, err, "unable to retrieve channel from DB")
if len(aliceChannels) != 1 {
t.Fatalf("alice didn't save channel state: %v", err)
}
@ -1093,9 +1009,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
}
bobChannels, err := bob.Cfg.Database.FetchOpenChannels(alicePub)
if err != nil {
t.Fatalf("unable to retrieve channel from DB: %v", err)
}
require.NoError(t, err, "unable to retrieve channel from DB")
if len(bobChannels) != 1 {
t.Fatalf("bob didn't save channel state: %v", err)
}
@ -1114,24 +1028,16 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
// Let Alice publish the funding transaction.
err = alice.PublishTransaction(fundingTx, "")
if err != nil {
t.Fatalf("unable to publish funding tx: %v", err)
}
require.NoError(t, err, "unable to publish funding tx")
// Mine a single block, the funding transaction should be included
// within this block.
err = waitForMempoolTx(miner, &fundingSha)
if err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
require.NoError(t, err, "tx not relayed to miner")
blockHashes, err := miner.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
block, err := miner.Client.GetBlock(blockHashes[0])
if err != nil {
t.Fatalf("unable to find block: %v", err)
}
require.NoError(t, err, "unable to find block")
if len(block.Transactions) != 2 {
t.Fatalf("funding transaction wasn't mined: %d",
len(block.Transactions))
@ -1198,16 +1104,12 @@ func testListTransactionDetails(miner *rpctest.Harness,
// Get the miner's current best block height before we mine blocks.
_, startHeight, err := miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("cannot get best block: %v", err)
}
require.NoError(t, err, "cannot get best block")
// Generate 10 blocks to mine all the transactions created above.
const numBlocksMined = 10
blocks, err := miner.Client.Generate(numBlocksMined)
if err != nil {
t.Fatalf("unable to mine blocks: %v", err)
}
require.NoError(t, err, "unable to mine blocks")
// Our new best block height should be our start height + the number of
// blocks we just mined.
@ -1219,15 +1121,11 @@ func testListTransactionDetails(miner *rpctest.Harness,
// not include unconfirmed transactions, since all of our transactions
// should be confirmed.
err = waitForWalletSync(miner, alice)
if err != nil {
t.Fatalf("Couldn't sync Alice's wallet: %v", err)
}
require.NoError(t, err, "Couldn't sync Alice's wallet")
txDetails, err := alice.ListTransactionDetails(
startHeight, chainTip, "",
)
if err != nil {
t.Fatalf("unable to fetch tx details: %v", err)
}
require.NoError(t, err, "unable to fetch tx details")
// This is a mapping from:
// blockHash -> transactionHash -> transactionOutputs
@ -1311,33 +1209,23 @@ func testListTransactionDetails(miner *rpctest.Harness,
// Next create a transaction paying to an output which isn't under the
// wallet's control.
minerAddr, err := miner.NewAddress()
if err != nil {
t.Fatalf("unable to generate address: %v", err)
}
require.NoError(t, err, "unable to generate address")
outputScript, err := txscript.PayToAddrScript(minerAddr)
if err != nil {
t.Fatalf("unable to make output script: %v", err)
}
require.NoError(t, err, "unable to make output script")
burnOutput := wire.NewTxOut(outputAmt, outputScript)
burnTX, err := alice.SendOutputs(
[]*wire.TxOut{burnOutput}, 2500, 1, labels.External,
)
if err != nil {
t.Fatalf("unable to create burn tx: %v", err)
}
require.NoError(t, err, "unable to create burn tx")
burnTXID := burnTX.TxHash()
err = waitForMempoolTx(miner, &burnTXID)
if err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
require.NoError(t, err, "tx not relayed to miner")
// Before we mine the next block, we'll ensure that the above
// transaction shows up in the set of unconfirmed transactions returned
// by ListTransactionDetails.
err = waitForWalletSync(miner, alice)
if err != nil {
t.Fatalf("Couldn't sync Alice's wallet: %v", err)
}
require.NoError(t, err, "Couldn't sync Alice's wallet")
// Query our wallet for transactions from the chain tip, including
// unconfirmed transactions. The transaction above should be included
@ -1346,9 +1234,7 @@ func testListTransactionDetails(miner *rpctest.Harness,
txDetails, err = alice.ListTransactionDetails(
chainTip, btcwallet.UnconfirmedHeight, "",
)
if err != nil {
t.Fatalf("unable to fetch tx details: %v", err)
}
require.NoError(t, err, "unable to fetch tx details")
var mempoolTxFound bool
for _, txDetail := range txDetails {
if !bytes.Equal(txDetail.Hash[:], burnTXID[:]) {
@ -1387,9 +1273,7 @@ func testListTransactionDetails(miner *rpctest.Harness,
// Generate one block for our transaction to confirm in.
var numBlocks int32 = 1
burnBlock, err := miner.Client.Generate(uint32(numBlocks))
if err != nil {
t.Fatalf("unable to mine block: %v", err)
}
require.NoError(t, err, "unable to mine block")
// Progress our chain tip by the number of blocks we have just mined.
chainTip += numBlocks
@ -1399,13 +1283,9 @@ func testListTransactionDetails(miner *rpctest.Harness,
// are inclusive, so we use chainTip for both parameters to get only
// transactions from the last block.
err = waitForWalletSync(miner, alice)
if err != nil {
t.Fatalf("Couldn't sync Alice's wallet: %v", err)
}
require.NoError(t, err, "Couldn't sync Alice's wallet")
txDetails, err = alice.ListTransactionDetails(chainTip, chainTip, "")
if err != nil {
t.Fatalf("unable to fetch tx details: %v", err)
}
require.NoError(t, err, "unable to fetch tx details")
var burnTxFound bool
for _, txDetail := range txDetails {
if !bytes.Equal(txDetail.Hash[:], burnTXID[:]) {
@ -1438,21 +1318,15 @@ func testListTransactionDetails(miner *rpctest.Harness,
// Generate a block which has no wallet transactions in it.
chainTip += numBlocks
_, err = miner.Client.Generate(uint32(numBlocks))
if err != nil {
t.Fatalf("unable to mine block: %v", err)
}
require.NoError(t, err, "unable to mine block")
err = waitForWalletSync(miner, alice)
if err != nil {
t.Fatalf("Couldn't sync Alice's wallet: %v", err)
}
require.NoError(t, err, "Couldn't sync Alice's wallet")
// Query for transactions only in the latest block. We do not expect
// any transactions to be returned.
txDetails, err = alice.ListTransactionDetails(chainTip, chainTip, "")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
require.NoError(t, err, "unexpected error")
if len(txDetails) != 0 {
t.Fatalf("expected 0 transactions, got: %v", len(txDetails))
}
@ -1591,29 +1465,21 @@ func testTransactionSubscriptions(miner *rpctest.Harness,
b := txscript.NewScriptBuilder()
b.AddOp(txscript.OP_RETURN)
outputScript, err := b.Script()
if err != nil {
t.Fatalf("unable to make output script: %v", err)
}
require.NoError(t, err, "unable to make output script")
burnOutput := wire.NewTxOut(outputAmt, outputScript)
tx, err := alice.SendOutputs(
[]*wire.TxOut{burnOutput}, 2500, 1, labels.External,
)
if err != nil {
t.Fatalf("unable to create burn tx: %v", err)
}
require.NoError(t, err, "unable to create burn tx")
txid := tx.TxHash()
err = waitForMempoolTx(miner, &txid)
if err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
require.NoError(t, err, "tx not relayed to miner")
// Before we mine the next block, we'll ensure that the above
// transaction shows up in the set of unconfirmed transactions returned
// by ListTransactionDetails.
err = waitForWalletSync(miner, alice)
if err != nil {
t.Fatalf("Couldn't sync Alice's wallet: %v", err)
}
require.NoError(t, err, "Couldn't sync Alice's wallet")
// As we just sent the transaction and it was landed in the mempool, we
// should get a notification for a new unconfirmed transactions
@ -1781,9 +1647,7 @@ func newTx(t *testing.T, r *rpctest.Harness, pubKey *btcec.PublicKey,
t.Helper()
keyScript, err := scriptFromKey(pubKey)
if err != nil {
t.Fatalf("unable to generate script: %v", err)
}
require.NoError(t, err, "unable to generate script")
// Instruct the wallet to fund the output with a newly created
// transaction.
@ -1794,9 +1658,7 @@ func newTx(t *testing.T, r *rpctest.Harness, pubKey *btcec.PublicKey,
tx, err := alice.SendOutputs(
[]*wire.TxOut{newOutput}, 2500, 1, labels.External,
)
if err != nil {
t.Fatalf("unable to create output: %v", err)
}
require.NoError(t, err, "unable to create output")
// Query for the transaction generated above so we can located the
// index of our output.
@ -1824,9 +1686,7 @@ func testPublishTransaction(r *rpctest.Harness,
// Generate a pubkey, and pay-to-addr script.
keyDesc, err := alice.DeriveNextKey(keychain.KeyFamilyMultiSig)
if err != nil {
t.Fatalf("unable to obtain public key: %v", err)
}
require.NoError(t, err, "unable to obtain public key")
// We will first check that publishing a transaction already in the
// mempool does NOT return an error. Create the tx.
@ -1834,22 +1694,16 @@ func testPublishTransaction(r *rpctest.Harness,
// Publish the transaction.
err = alice.PublishTransaction(tx1, labels.External)
if err != nil {
t.Fatalf("unable to publish: %v", err)
}
require.NoError(t, err, "unable to publish")
txid1 := tx1.TxHash()
err = waitForMempoolTx(r, &txid1)
if err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
require.NoError(t, err, "tx not relayed to miner")
// Publish the exact same transaction again. This should not return an
// error, even though the transaction is already in the mempool.
err = alice.PublishTransaction(tx1, labels.External)
if err != nil {
t.Fatalf("unable to publish: %v", err)
}
require.NoError(t, err, "unable to publish")
// Mine the transaction.
if _, err := r.Client.Generate(1); err != nil {
@ -1867,9 +1721,7 @@ func testPublishTransaction(r *rpctest.Harness,
// Publish this tx.
err = alice.PublishTransaction(tx2, labels.External)
if err != nil {
t.Fatalf("unable to publish: %v", err)
}
require.NoError(t, err, "unable to publish")
// Mine the transaction.
if err := mineAndAssert(r, tx2); err != nil {
@ -1879,9 +1731,7 @@ func testPublishTransaction(r *rpctest.Harness,
// Publish the transaction again. It is already mined, and we don't
// expect this to return an error.
err = alice.PublishTransaction(tx2, labels.External)
if err != nil {
t.Fatalf("unable to publish: %v", err)
}
require.NoError(t, err, "unable to publish")
// We'll do the next mempool check on both RBF and non-RBF enabled
// transactions.
@ -2035,9 +1885,7 @@ func testSignOutputUsingTweaks(r *rpctest.Harness,
pubKey, err := alice.DeriveNextKey(
keychain.KeyFamilyMultiSig,
)
if err != nil {
t.Fatalf("unable to obtain public key: %v", err)
}
require.NoError(t, err, "unable to obtain public key")
// As we'd like to test both single tweak, and double tweak spends,
// we'll generate a commitment pre-image, then derive a revocation key
@ -2179,32 +2027,22 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet,
// create any new non-coinbase transactions. We'll then check if it's
// the same after the empty reorg.
_, err := r.Client.Generate(5)
if err != nil {
t.Fatalf("unable to generate blocks on passed node: %v", err)
}
require.NoError(t, err, "unable to generate blocks on passed node")
// Give wallet time to catch up.
err = waitForWalletSync(r, w)
if err != nil {
t.Fatalf("unable to sync wallet: %v", err)
}
require.NoError(t, err, "unable to sync wallet")
// Send some money from the miner to the wallet
err = loadTestCredits(r, w, 20, 4)
if err != nil {
t.Fatalf("unable to send money to lnwallet: %v", err)
}
require.NoError(t, err, "unable to send money to lnwallet")
// Send some money from the wallet back to the miner.
// Grab a fresh address from the miner to house this output.
minerAddr, err := r.NewAddress()
if err != nil {
t.Fatalf("unable to generate address for miner: %v", err)
}
require.NoError(t, err, "unable to generate address for miner")
script, err := txscript.PayToAddrScript(minerAddr)
if err != nil {
t.Fatalf("unable to create pay to addr script: %v", err)
}
require.NoError(t, err, "unable to create pay to addr script")
output := &wire.TxOut{
Value: 1e8,
PkScript: script,
@ -2212,46 +2050,30 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet,
tx, err := w.SendOutputs(
[]*wire.TxOut{output}, 2500, 1, labels.External,
)
if err != nil {
t.Fatalf("unable to send outputs: %v", err)
}
require.NoError(t, err, "unable to send outputs")
txid := tx.TxHash()
err = waitForMempoolTx(r, &txid)
if err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
require.NoError(t, err, "tx not relayed to miner")
_, err = r.Client.Generate(50)
if err != nil {
t.Fatalf("unable to generate blocks on passed node: %v", err)
}
require.NoError(t, err, "unable to generate blocks on passed node")
// Give wallet time to catch up.
err = waitForWalletSync(r, w)
if err != nil {
t.Fatalf("unable to sync wallet: %v", err)
}
require.NoError(t, err, "unable to sync wallet")
// Get the original balance.
origBalance, err := w.ConfirmedBalance(1, lnwallet.DefaultAccountName)
if err != nil {
t.Fatalf("unable to query for balance: %v", err)
}
require.NoError(t, err, "unable to query for balance")
// Now we cause a reorganization as follows.
// Step 1: create a new miner and start it.
r2, err := rpctest.New(r.ActiveNet, nil, []string{"--txindex"}, "")
if err != nil {
t.Fatalf("unable to create mining node: %v", err)
}
require.NoError(t, err, "unable to create mining node")
err = r2.SetUp(false, 0)
if err != nil {
t.Fatalf("unable to set up mining node: %v", err)
}
require.NoError(t, err, "unable to set up mining node")
defer r2.TearDown()
newBalance, err := w.ConfirmedBalance(1, lnwallet.DefaultAccountName)
if err != nil {
t.Fatalf("unable to query for balance: %v", err)
}
require.NoError(t, err, "unable to query for balance")
if origBalance != newBalance {
t.Fatalf("wallet balance incorrect, should have %v, "+
"instead have %v", origBalance, newBalance)
@ -2260,13 +2082,9 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet,
// Step 2: connect the miner to the passed miner and wait for
// synchronization.
err = r2.Client.AddNode(r.P2PAddress(), rpcclient.ANAdd)
if err != nil {
t.Fatalf("unable to connect mining nodes together: %v", err)
}
require.NoError(t, err, "unable to connect mining nodes together")
err = rpctest.JoinNodes([]*rpctest.Harness{r2, r}, rpctest.Blocks)
if err != nil {
t.Fatalf("unable to synchronize mining nodes: %v", err)
}
require.NoError(t, err, "unable to synchronize mining nodes")
// Step 3: Do a set of reorgs by disconnecting the two miners, mining
// one block on the passed miner and two on the created miner,
@ -2341,9 +2159,7 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet,
// Now we check that the wallet balance stays the same.
newBalance, err = w.ConfirmedBalance(1, lnwallet.DefaultAccountName)
if err != nil {
t.Fatalf("unable to query for balance: %v", err)
}
require.NoError(t, err, "unable to query for balance")
if origBalance != newBalance {
t.Fatalf("wallet balance incorrect, should have %v, "+
"instead have %v", origBalance, newBalance)
@ -2364,9 +2180,7 @@ func testChangeOutputSpendConfirmation(r *rpctest.Harness,
// we'll craft the following transaction so that Alice doesn't have any
// UTXOs left.
aliceBalance, err := alice.ConfirmedBalance(0, lnwallet.DefaultAccountName)
if err != nil {
t.Fatalf("unable to retrieve alice's balance: %v", err)
}
require.NoError(t, err, "unable to retrieve alice's balance")
bobPkScript := newPkScript(t, bob, lnwallet.WitnessPubKey)
// We'll use a transaction fee of 14380 satoshis, which will allow us to
@ -2389,9 +2203,7 @@ func testChangeOutputSpendConfirmation(r *rpctest.Harness,
// With the transaction sent and confirmed, Alice's balance should now
// be 0.
aliceBalance, err = alice.ConfirmedBalance(0, lnwallet.DefaultAccountName)
if err != nil {
t.Fatalf("unable to retrieve alice's balance: %v", err)
}
require.NoError(t, err, "unable to retrieve alice's balance")
if aliceBalance != 0 {
t.Fatalf("expected alice's balance to be 0 BTC, found %v",
aliceBalance)
@ -2445,9 +2257,7 @@ func testSpendUnconfirmed(miner *rpctest.Harness,
// First we will empty out bob's wallet, sending the entire balance
// to alice.
bobBalance, err := bob.ConfirmedBalance(0, lnwallet.DefaultAccountName)
if err != nil {
t.Fatalf("unable to retrieve bob's balance: %v", err)
}
require.NoError(t, err, "unable to retrieve bob's balance")
txFee := btcutil.Amount(28760)
output := &wire.TxOut{
Value: int64(bobBalance - txFee),
@ -2504,9 +2314,7 @@ func testSpendUnconfirmed(miner *rpctest.Harness,
// Mine the unconfirmed transactions.
err = waitForMempoolTx(miner, &txHash)
if err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
require.NoError(t, err, "tx not relayed to miner")
if _, err := miner.Client.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
@ -2601,9 +2409,7 @@ func testCreateSimpleTx(r *rpctest.Harness, w *lnwallet.LightningWallet,
// Send some money from the miner to the wallet
err := loadTestCredits(r, w, 20, 4)
if err != nil {
t.Fatalf("unable to send money to lnwallet: %v", err)
}
require.NoError(t, err, "unable to send money to lnwallet")
// The test cases we will run through for all backends.
testCases := []struct {
@ -3083,13 +2889,9 @@ func testSingleFunderExternalFundingTx(miner *rpctest.Harness,
// First, we'll obtain multi-sig keys from both Alice and Bob which
// simulates them exchanging keys on a higher level.
aliceFundingKey, err := alice.DeriveNextKey(keychain.KeyFamilyMultiSig)
if err != nil {
t.Fatalf("unable to obtain alice funding key: %v", err)
}
require.NoError(t, err, "unable to obtain alice funding key")
bobFundingKey, err := bob.DeriveNextKey(keychain.KeyFamilyMultiSig)
if err != nil {
t.Fatalf("unable to obtain bob funding key: %v", err)
}
require.NoError(t, err, "unable to obtain bob funding key")
// We'll now set up for them to open a 4 BTC channel, with 1 BTC pushed
// to Bob's side.
@ -3120,9 +2922,7 @@ func testSingleFunderExternalFundingTx(miner *rpctest.Harness,
)
},
})
if err != nil {
t.Fatalf("unable to perform coin selection: %v", err)
}
require.NoError(t, err, "unable to perform coin selection")
// With our intent created, we'll instruct it to finalize the funding
// transaction, and also hand us the outpoint so we can simulate
@ -3168,9 +2968,7 @@ func testSingleFunderExternalFundingTx(miner *rpctest.Harness,
)
},
})
if err != nil {
t.Fatalf("unable to create shim intent for bob: %v", err)
}
require.NoError(t, err, "unable to create shim intent for bob")
// At this point, we have everything we need to carry out our test, so
// we'll being the funding flow between Alice and Bob.
@ -3180,9 +2978,7 @@ func testSingleFunderExternalFundingTx(miner *rpctest.Harness,
// from Alice.
pendingChanID := testHdSeed
err = bob.RegisterFundingIntent(pendingChanID, bobShimIntent)
if err != nil {
t.Fatalf("unable to register intent: %v", err)
}
require.NoError(t, err, "unable to register intent")
// Now we can carry out the single funding flow as normal, we'll
// specify our external funder and funding transaction, as well as the
@ -3219,9 +3015,7 @@ func TestLightningWallet(t *testing.T, targetBackEnd string) {
miningNode, err := rpctest.New(
netParams, nil, []string{"--txindex"}, "",
)
if err != nil {
t.Fatalf("unable to create mining node: %v", err)
}
require.NoError(t, err, "unable to create mining node")
defer miningNode.TearDown()
if err := miningNode.SetUp(true, 25); err != nil {
t.Fatalf("unable to set up mining node: %v", err)
@ -3237,27 +3031,19 @@ func TestLightningWallet(t *testing.T, targetBackEnd string) {
rpcConfig := miningNode.RPCConfig()
tempDir, err := ioutil.TempDir("", "channeldb")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
db, err := channeldb.Open(tempDir)
if err != nil {
t.Fatalf("unable to create db: %v", err)
}
require.NoError(t, err, "unable to create db")
testCfg := chainntnfs.CacheConfig{
QueryDisable: false,
}
hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db.Backend)
if err != nil {
t.Fatalf("unable to create height hint cache: %v", err)
}
require.NoError(t, err, "unable to create height hint cache")
blockCache := blockcache.NewBlockCache(10000)
chainNotifier, err := btcdnotify.New(
&rpcConfig, netParams, hintCache, hintCache, blockCache,
)
if err != nil {
t.Fatalf("unable to create notifier: %v", err)
}
require.NoError(t, err, "unable to create notifier")
if err := chainNotifier.Start(); err != nil {
t.Fatalf("unable to start notifier: %v", err)
}
@ -3300,15 +3086,11 @@ func runTests(t *testing.T, walletDriver *lnwallet.WalletDriver,
)
tempTestDirAlice, err := ioutil.TempDir("", "lnwallet")
if err != nil {
t.Fatalf("unable to create temp directory: %v", err)
}
require.NoError(t, err, "unable to create temp directory")
defer os.RemoveAll(tempTestDirAlice)
tempTestDirBob, err := ioutil.TempDir("", "lnwallet")
if err != nil {
t.Fatalf("unable to create temp directory: %v", err)
}
require.NoError(t, err, "unable to create temp directory")
defer os.RemoveAll(tempTestDirBob)
blockCache := blockcache.NewBlockCache(10000)
@ -3622,18 +3404,14 @@ func runTests(t *testing.T, walletDriver *lnwallet.WalletDriver,
chainNotifier, aliceWalletController, aliceKeyRing,
aliceSigner, bio,
)
if err != nil {
t.Fatalf("unable to create test ln wallet: %v", err)
}
require.NoError(t, err, "unable to create test ln wallet")
defer alice.Shutdown()
bob, err := createTestWallet(
tempTestDirBob, miningNode, netParams,
chainNotifier, bobWalletController, bobKeyRing, bobSigner, bio,
)
if err != nil {
t.Fatalf("unable to create test ln wallet: %v", err)
}
require.NoError(t, err, "unable to create test ln wallet")
defer bob.Shutdown()
// Both wallets should now have 80BTC available for

View file

@ -495,9 +495,7 @@ func testSpendValidation(t *testing.T, tweakless bool) {
// doesn't need to exist, as we'll only be validating spending from the
// transaction that references this.
txid, err := chainhash.NewHash(testHdSeed.CloneBytes())
if err != nil {
t.Fatalf("unable to create txid: %v", err)
}
require.NoError(t, err, "unable to create txid")
fundingOut := &wire.OutPoint{
Hash: *txid,
Index: 50,
@ -585,9 +583,7 @@ func testSpendValidation(t *testing.T, tweakless bool) {
// We're testing an uncooperative close, output sweep, so construct a
// transaction which sweeps the funds to a random address.
targetOutput, err := input.CommitScriptUnencumbered(aliceKeyPub)
if err != nil {
t.Fatalf("unable to create target output: %v", err)
}
require.NoError(t, err, "unable to create target output")
sweepTx := wire.NewMsgTx(2)
sweepTx.AddTxIn(wire.NewTxIn(&wire.OutPoint{
Hash: commitmentTx.TxHash(),
@ -602,9 +598,7 @@ func testSpendValidation(t *testing.T, tweakless bool) {
delayScript, err := input.CommitScriptToSelf(
csvTimeout, aliceDelayKey, revokePubKey,
)
if err != nil {
t.Fatalf("unable to generate alice delay script: %v", err)
}
require.NoError(t, err, "unable to generate alice delay script")
sweepTx.TxIn[0].Sequence = input.LockTimeToSequence(false, csvTimeout)
signDesc := &input.SignDescriptor{
WitnessScript: delayScript,
@ -622,18 +616,14 @@ func testSpendValidation(t *testing.T, tweakless bool) {
aliceWitnessSpend, err := input.CommitSpendTimeout(
aliceSelfOutputSigner, signDesc, sweepTx,
)
if err != nil {
t.Fatalf("unable to generate delay commit spend witness: %v", err)
}
require.NoError(t, err, "unable to generate delay commit spend witness")
sweepTx.TxIn[0].Witness = aliceWitnessSpend
vm, err := txscript.NewEngine(
delayOutput.PkScript, sweepTx, 0, txscript.StandardVerifyFlags,
nil, nil, int64(channelBalance),
txscript.NewCannedPrevOutputFetcher(nil, 0),
)
if err != nil {
t.Fatalf("unable to create engine: %v", err)
}
require.NoError(t, err, "unable to create engine")
if err := vm.Execute(); err != nil {
t.Fatalf("spend from delay output is invalid: %v", err)
}
@ -658,18 +648,14 @@ func testSpendValidation(t *testing.T, tweakless bool) {
}
bobWitnessSpend, err := input.CommitSpendRevoke(localSigner, signDesc,
sweepTx)
if err != nil {
t.Fatalf("unable to generate revocation witness: %v", err)
}
require.NoError(t, err, "unable to generate revocation witness")
sweepTx.TxIn[0].Witness = bobWitnessSpend
vm, err = txscript.NewEngine(
delayOutput.PkScript, sweepTx, 0, txscript.StandardVerifyFlags,
nil, nil, int64(channelBalance),
txscript.NewCannedPrevOutputFetcher(nil, 0),
)
if err != nil {
t.Fatalf("unable to create engine: %v", err)
}
require.NoError(t, err, "unable to create engine")
if err := vm.Execute(); err != nil {
t.Fatalf("revocation spend is invalid: %v", err)
}
@ -687,9 +673,7 @@ func testSpendValidation(t *testing.T, tweakless bool) {
// Finally, we test bob sweeping his output as normal in the case that
// Alice broadcasts this commitment transaction.
bobScriptP2WKH, err := input.CommitScriptUnencumbered(bobPayKey)
if err != nil {
t.Fatalf("unable to create bob p2wkh script: %v", err)
}
require.NoError(t, err, "unable to create bob p2wkh script")
signDesc = &input.SignDescriptor{
KeyDesc: keychain.KeyDescriptor{
PubKey: bobKeyPub,
@ -709,9 +693,7 @@ func testSpendValidation(t *testing.T, tweakless bool) {
bobRegularSpend, err := input.CommitSpendNoDelay(
localSigner, signDesc, sweepTx, tweakless,
)
if err != nil {
t.Fatalf("unable to create bob regular spend: %v", err)
}
require.NoError(t, err, "unable to create bob regular spend")
sweepTx.TxIn[0].Witness = bobRegularSpend
vm, err = txscript.NewEngine(
regularOutput.PkScript,
@ -719,9 +701,7 @@ func testSpendValidation(t *testing.T, tweakless bool) {
nil, int64(channelBalance),
txscript.NewCannedPrevOutputFetcher(bobScriptP2WKH, 0),
)
if err != nil {
t.Fatalf("unable to create engine: %v", err)
}
require.NoError(t, err, "unable to create engine")
if err := vm.Execute(); err != nil {
t.Fatalf("bob p2wkh spend is invalid: %v", err)
}

View file

@ -8,6 +8,7 @@ import (
"testing/quick"
"github.com/lightningnetwork/lnd/tlv"
"github.com/stretchr/testify/require"
)
// TestExtraOpaqueDataEncodeDecode tests that we're able to encode/decode
@ -128,9 +129,7 @@ func TestExtraOpaqueDataPackUnpackRecords(t *testing.T) {
&recordProducer{tlv.MakePrimitiveRecord(type2, &hop2)},
}
typeMap, err := extraBytes.ExtractRecords(newRecords...)
if err != nil {
t.Fatalf("unable to extract record: %v", err)
}
require.NoError(t, err, "unable to extract record")
// We should find that the new backing values have been populated with
// the proper value.

View file

@ -6,6 +6,7 @@ import (
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/stretchr/testify/require"
)
func TestNetAddressDisplay(t *testing.T) {
@ -13,14 +14,10 @@ func TestNetAddressDisplay(t *testing.T) {
pubKeyStr := "036a0c5ea35df8a528b98edf6f290b28676d51d0fe202b073fe677612a39c0aa09"
pubHex, err := hex.DecodeString(pubKeyStr)
if err != nil {
t.Fatalf("unable to decode str: %v", err)
}
require.NoError(t, err, "unable to decode str")
pubKey, err := btcec.ParsePubKey(pubHex)
if err != nil {
t.Fatalf("unable to parse pubkey: %v", err)
}
require.NoError(t, err, "unable to parse pubkey")
addr, _ := net.ResolveTCPAddr("tcp", "10.0.0.2:9000")
netAddr := NetAddress{

View file

@ -9,6 +9,7 @@ import (
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/stretchr/testify/require"
)
var (
@ -101,9 +102,7 @@ func TestChannelUpdateCompatabilityParsing(t *testing.T) {
err := parseChannelUpdateCompatabilityMode(
bufio.NewReader(&b), &newChanUpdate, 0,
)
if err != nil {
t.Fatalf("unable to parse channel update: %v", err)
}
require.NoError(t, err, "unable to parse channel update")
// At this point, we'll ensure that we get the exact same failure out
// on the other side.
@ -130,9 +129,7 @@ func TestChannelUpdateCompatabilityParsing(t *testing.T) {
err = parseChannelUpdateCompatabilityMode(
bufio.NewReader(&b), &newChanUpdate2, 0,
)
if err != nil {
t.Fatalf("unable to parse channel update: %v", err)
}
require.NoError(t, err, "unable to parse channel update")
if !reflect.DeepEqual(newChanUpdate2, newChanUpdate) {
t.Fatalf("mismatched channel updates: %v", err)
@ -158,9 +155,7 @@ func TestWriteOnionErrorChanUpdate(t *testing.T) {
// onion error message.
var errorBuf bytes.Buffer
err := writeOnionErrorChanUpdate(&errorBuf, &update, 0)
if err != nil {
t.Fatalf("unable to encode onion error: %v", err)
}
require.NoError(t, err, "unable to encode onion error")
// Finally, read the length encoded and ensure that it matches the raw
// length.
@ -188,9 +183,7 @@ func TestFailIncorrectDetailsOptionalAmount(t *testing.T) {
}
onionError2, err := DecodeFailure(bytes.NewReader(b.Bytes()), 0)
if err != nil {
t.Fatalf("unable to decode error: %v", err)
}
require.NoError(t, err, "unable to decode error")
invalidDetailsErr, ok := onionError2.(*FailIncorrectDetails)
if !ok {
@ -241,9 +234,7 @@ func TestFailIncorrectDetailsOptionalHeight(t *testing.T) {
}
onionError2, err := DecodeFailure(bytes.NewReader(b.Bytes()), 0)
if err != nil {
t.Fatalf("unable to decode error: %v", err)
}
require.NoError(t, err, "unable to decode error")
invalidDetailsErr, ok := onionError2.(*FailIncorrectDetails)
if !ok {

View file

@ -23,9 +23,7 @@ func createDummyMacaroon(t *testing.T) *macaroon.Macaroon {
dummyMacaroon, err := macaroon.New(
testRootKey, testID, testLocation, testVersion,
)
if err != nil {
t.Fatalf("Error creating initial macaroon: %v", err)
}
require.NoError(t, err, "Error creating initial macaroon")
return dummyMacaroon
}
@ -41,9 +39,7 @@ func TestAddConstraints(t *testing.T) {
newMac, err := macaroons.AddConstraints(
initialMac, macaroons.TimeoutConstraint(1),
)
if err != nil {
t.Fatalf("Error adding constraint: %v", err)
}
require.NoError(t, err, "Error adding constraint")
if &newMac == &initialMac {
t.Fatalf("Initial macaroon has been changed, something " +
"went wrong!")
@ -66,9 +62,7 @@ func TestTimeoutConstraint(t *testing.T) {
// function to.
testMacaroon := createDummyMacaroon(t)
err := constraintFunc(testMacaroon)
if err != nil {
t.Fatalf("Error applying timeout constraint: %v", err)
}
require.NoError(t, err, "Error applying timeout constraint")
// Finally, check that the created caveat has an
// acceptable value.
@ -92,9 +86,7 @@ func TestIpLockConstraint(t *testing.T) {
// function to.
testMacaroon := createDummyMacaroon(t)
err := constraintFunc(testMacaroon)
if err != nil {
t.Fatalf("Error applying timeout constraint: %v", err)
}
require.NoError(t, err, "Error applying timeout constraint")
// Finally, check that the created caveat has an
// acceptable value.

View file

@ -35,16 +35,12 @@ var (
// and read the store on its own.
func setupTestRootKeyStorage(t *testing.T) (string, kvdb.Backend) {
tempDir, err := ioutil.TempDir("", "macaroonstore-")
if err != nil {
t.Fatalf("Error creating temp dir: %v", err)
}
require.NoError(t, err, "Error creating temp dir")
db, err := kvdb.Create(
kvdb.BoltBackendName, path.Join(tempDir, "macaroons.db"), true,
kvdb.DefaultDBTimeout,
)
if err != nil {
t.Fatalf("Error opening store DB: %v", err)
}
require.NoError(t, err, "Error opening store DB")
store, err := macaroons.NewRootKeyStorage(db)
if err != nil {
db.Close()
@ -52,9 +48,7 @@ func setupTestRootKeyStorage(t *testing.T) (string, kvdb.Backend) {
}
defer store.Close()
err = store.CreateUnlock(&defaultPw)
if err != nil {
t.Fatalf("error creating unlock: %v", err)
}
require.NoError(t, err, "error creating unlock")
return tempDir, db
}
@ -70,14 +64,10 @@ func TestNewService(t *testing.T) {
service, err := macaroons.NewService(
db, "lnd", false, macaroons.IPLockChecker,
)
if err != nil {
t.Fatalf("Error creating new service: %v", err)
}
require.NoError(t, err, "Error creating new service")
defer service.Close()
err = service.CreateUnlock(&defaultPw)
if err != nil {
t.Fatalf("Error unlocking root key storage: %v", err)
}
require.NoError(t, err, "Error unlocking root key storage")
// Third, check if the created service can bake macaroons.
_, err = service.NewMacaroon(context.TODO(), nil, testOperation)
@ -88,9 +78,7 @@ func TestNewService(t *testing.T) {
macaroon, err := service.NewMacaroon(
context.TODO(), macaroons.DefaultRootKeyID, testOperation,
)
if err != nil {
t.Fatalf("Error creating macaroon from service: %v", err)
}
require.NoError(t, err, "Error creating macaroon from service")
if macaroon.Namespace().String() != "std:" {
t.Fatalf("The created macaroon has an invalid namespace: %s",
macaroon.Namespace().String())
@ -121,28 +109,20 @@ func TestValidateMacaroon(t *testing.T) {
service, err := macaroons.NewService(
db, "lnd", false, macaroons.IPLockChecker,
)
if err != nil {
t.Fatalf("Error creating new service: %v", err)
}
require.NoError(t, err, "Error creating new service")
defer service.Close()
err = service.CreateUnlock(&defaultPw)
if err != nil {
t.Fatalf("Error unlocking root key storage: %v", err)
}
require.NoError(t, err, "Error unlocking root key storage")
// Then, create a new macaroon that we can serialize.
macaroon, err := service.NewMacaroon(
context.TODO(), macaroons.DefaultRootKeyID, testOperation,
testOperationURI,
)
if err != nil {
t.Fatalf("Error creating macaroon from service: %v", err)
}
require.NoError(t, err, "Error creating macaroon from service")
macaroonBinary, err := macaroon.M().MarshalBinary()
if err != nil {
t.Fatalf("Error serializing macaroon: %v", err)
}
require.NoError(t, err, "Error serializing macaroon")
// Because the macaroons are always passed in a context, we need to
// mock one that has just the serialized macaroon as a value.
@ -155,18 +135,14 @@ func TestValidateMacaroon(t *testing.T) {
err = service.ValidateMacaroon(
mockContext, []bakery.Op{testOperation}, "FooMethod",
)
if err != nil {
t.Fatalf("Error validating the macaroon: %v", err)
}
require.NoError(t, err, "Error validating the macaroon")
// If the macaroon has the method specific URI permission, the list of
// required entity/action pairs is irrelevant.
err = service.ValidateMacaroon(
mockContext, []bakery.Op{{Entity: "irrelevant"}}, "SomeMethod",
)
if err != nil {
t.Fatalf("Error validating the macaroon: %v", err)
}
require.NoError(t, err, "Error validating the macaroon")
}
// TestListMacaroonIDs checks that ListMacaroonIDs returns the expected result.

View file

@ -17,6 +17,7 @@ import (
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/netann"
"github.com/stretchr/testify/require"
)
var (
@ -34,9 +35,7 @@ func randOutpoint(t *testing.T) wire.OutPoint {
var buf [36]byte
_, err := io.ReadFull(rand.Reader, buf[:])
if err != nil {
t.Fatalf("unable to generate random outpoint: %v", err)
}
require.NoError(t, err, "unable to generate random outpoint")
op := wire.OutPoint{}
copy(op.Hash[:], buf[:32])
@ -86,9 +85,7 @@ func createEdgePolicies(t *testing.T, channel *channeldb.OpenChannel,
// Generate and set pubkey2 for THEIR pubkey.
privKey2, err := btcec.NewPrivateKey()
if err != nil {
t.Fatalf("unable to generate key pair: %v", err)
}
require.NoError(t, err, "unable to generate key pair")
copy(pubkey2[:], privKey2.PubKey().SerializeCompressed())
// Set pubkey1 to the lower of the two pubkeys.
@ -316,9 +313,7 @@ func newManagerCfg(t *testing.T, numChannels int,
t.Helper()
privKey, err := btcec.NewPrivateKey()
if err != nil {
t.Fatalf("unable to generate key pair: %v", err)
}
require.NoError(t, err, "unable to generate key pair")
privKeySigner := keychain.NewPrivKeyMessageSigner(privKey, testKeyLoc)
graph := newMockGraph(
@ -362,14 +357,10 @@ func newHarness(t *testing.T, numChannels int,
cfg, graph, htlcSwitch := newManagerCfg(t, numChannels, startEnabled)
mgr, err := netann.NewChanStatusManager(cfg)
if err != nil {
t.Fatalf("unable to create chan status manager: %v", err)
}
require.NoError(t, err, "unable to create chan status manager")
err = mgr.Start()
if err != nil {
t.Fatalf("unable to start chan status manager: %v", err)
}
require.NoError(t, err, "unable to start chan status manager")
h := testHarness{
t: t,

View file

@ -10,6 +10,7 @@ import (
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCreateChanAnnouncement(t *testing.T) {
@ -59,9 +60,7 @@ func TestCreateChanAnnouncement(t *testing.T) {
chanAnn, _, _, err := CreateChanAnnouncement(
chanProof, chanInfo, nil, nil,
)
if err != nil {
t.Fatalf("unable to create channel announcement: %v", err)
}
require.NoError(t, err, "unable to create channel announcement")
assert.Equal(t, chanAnn, expChanAnn)
}

View file

@ -48,9 +48,7 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) {
alicePeer, bobChan, cleanUp, err := createTestPeer(
notifier, broadcastTxChan, noUpdate, mockSwitch,
)
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}
require.NoError(t, err, "unable to create test channels")
defer cleanUp()
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
@ -102,14 +100,10 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) {
bobSig, _, _, err := bobChan.CreateCloseProposal(
aliceFee, dummyDeliveryScript, respDeliveryScript,
)
if err != nil {
t.Fatalf("error creating close proposal: %v", err)
}
require.NoError(t, err, "error creating close proposal")
parsedSig, err := lnwire.NewSigFromSignature(bobSig)
if err != nil {
t.Fatalf("error parsing signature: %v", err)
}
require.NoError(t, err, "error parsing signature")
closingSigned := lnwire.NewClosingSigned(chanID, aliceFee, parsedSig)
alicePeer.chanCloseMsgs <- &closeMsg{
cid: chanID,
@ -156,9 +150,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) {
alicePeer, bobChan, cleanUp, err := createTestPeer(
notifier, broadcastTxChan, noUpdate, mockSwitch,
)
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}
require.NoError(t, err, "unable to create test channels")
defer cleanUp()
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
@ -218,13 +210,9 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) {
bobSig, _, _, err := bobChan.CreateCloseProposal(
bobFee, dummyDeliveryScript, aliceDeliveryScript,
)
if err != nil {
t.Fatalf("unable to create close proposal: %v", err)
}
require.NoError(t, err, "unable to create close proposal")
parsedSig, err := lnwire.NewSigFromSignature(bobSig)
if err != nil {
t.Fatalf("unable to parse signature: %v", err)
}
require.NoError(t, err, "unable to parse signature")
closingSigned := lnwire.NewClosingSigned(shutdownMsg.ChannelID,
bobFee, parsedSig)
@ -283,9 +271,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
alicePeer, bobChan, cleanUp, err := createTestPeer(
notifier, broadcastTxChan, noUpdate, mockSwitch,
)
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}
require.NoError(t, err, "unable to create test channels")
defer cleanUp()
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
@ -337,14 +323,10 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
bobSig, _, _, err := bobChan.CreateCloseProposal(
increasedFee, dummyDeliveryScript, aliceDeliveryScript,
)
if err != nil {
t.Fatalf("error creating close proposal: %v", err)
}
require.NoError(t, err, "error creating close proposal")
parsedSig, err := lnwire.NewSigFromSignature(bobSig)
if err != nil {
t.Fatalf("error parsing signature: %v", err)
}
require.NoError(t, err, "error parsing signature")
closingSigned := lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
alicePeer.chanCloseMsgs <- &closeMsg{
cid: chanID,
@ -381,14 +363,10 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
bobSig, _, _, err = bobChan.CreateCloseProposal(
increasedFee, dummyDeliveryScript, aliceDeliveryScript,
)
if err != nil {
t.Fatalf("error creating close proposal: %v", err)
}
require.NoError(t, err, "error creating close proposal")
parsedSig, err = lnwire.NewSigFromSignature(bobSig)
if err != nil {
t.Fatalf("error parsing signature: %v", err)
}
require.NoError(t, err, "error parsing signature")
closingSigned = lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
alicePeer.chanCloseMsgs <- &closeMsg{
cid: chanID,
@ -427,14 +405,10 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
bobSig, _, _, err = bobChan.CreateCloseProposal(
aliceFee, dummyDeliveryScript, aliceDeliveryScript,
)
if err != nil {
t.Fatalf("error creating close proposal: %v", err)
}
require.NoError(t, err, "error creating close proposal")
parsedSig, err = lnwire.NewSigFromSignature(bobSig)
if err != nil {
t.Fatalf("error parsing signature: %v", err)
}
require.NoError(t, err, "error parsing signature")
closingSigned = lnwire.NewClosingSigned(chanID, aliceFee, parsedSig)
alicePeer.chanCloseMsgs <- &closeMsg{
cid: chanID,
@ -482,9 +456,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
alicePeer, bobChan, cleanUp, err := createTestPeer(
notifier, broadcastTxChan, noUpdate, mockSwitch,
)
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}
require.NoError(t, err, "unable to create test channels")
defer cleanUp()
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
@ -549,14 +521,10 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
bobSig, _, _, err := bobChan.CreateCloseProposal(
increasedFee, dummyDeliveryScript, aliceDeliveryScript,
)
if err != nil {
t.Fatalf("error creating close proposal: %v", err)
}
require.NoError(t, err, "error creating close proposal")
parsedSig, err := lnwire.NewSigFromSignature(bobSig)
if err != nil {
t.Fatalf("unable to parse signature: %v", err)
}
require.NoError(t, err, "unable to parse signature")
closingSigned := lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
alicePeer.chanCloseMsgs <- &closeMsg{
@ -596,14 +564,10 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
bobSig, _, _, err = bobChan.CreateCloseProposal(
increasedFee, dummyDeliveryScript, aliceDeliveryScript,
)
if err != nil {
t.Fatalf("error creating close proposal: %v", err)
}
require.NoError(t, err, "error creating close proposal")
parsedSig, err = lnwire.NewSigFromSignature(bobSig)
if err != nil {
t.Fatalf("error parsing signature: %v", err)
}
require.NoError(t, err, "error parsing signature")
closingSigned = lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
alicePeer.chanCloseMsgs <- &closeMsg{
@ -640,14 +604,10 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
bobSig, _, _, err = bobChan.CreateCloseProposal(
aliceFee, dummyDeliveryScript, aliceDeliveryScript,
)
if err != nil {
t.Fatalf("error creating close proposal: %v", err)
}
require.NoError(t, err, "error creating close proposal")
parsedSig, err = lnwire.NewSigFromSignature(bobSig)
if err != nil {
t.Fatalf("error parsing signature: %v", err)
}
require.NoError(t, err, "error parsing signature")
closingSigned = lnwire.NewClosingSigned(chanID, aliceFee, parsedSig)
alicePeer.chanCloseMsgs <- &closeMsg{
cid: chanID,
@ -1024,14 +984,10 @@ func genScript(t *testing.T, address string) lnwire.DeliveryAddress {
address,
&chaincfg.TestNet3Params,
)
if err != nil {
t.Fatalf("invalid delivery address: %v", err)
}
require.NoError(t, err, "invalid delivery address")
script, err := txscript.PayToAddrScript(deliveryAddr)
if err != nil {
t.Fatalf("cannot create script: %v", err)
}
require.NoError(t, err, "cannot create script")
return script
}

View file

@ -11,6 +11,7 @@ import (
"github.com/lightningnetwork/lnd/buffer"
"github.com/lightningnetwork/lnd/pool"
"github.com/stretchr/testify/require"
)
type workerPoolTest struct {
@ -256,9 +257,7 @@ func startGeneric(t *testing.T, p interface{}) {
t.Fatalf("unknown worker pool type: %T", p)
}
if err != nil {
t.Fatalf("unable to start worker pool: %v", err)
}
require.NoError(t, err, "unable to start worker pool")
}
func stopGeneric(t *testing.T, p interface{}) {
@ -276,9 +275,7 @@ func stopGeneric(t *testing.T, p interface{}) {
t.Fatalf("unknown worker pool type: %T", p)
}
if err != nil {
t.Fatalf("unable to stop worker pool: %v", err)
}
require.NoError(t, err, "unable to stop worker pool")
}
func submitGeneric(p interface{}, sem <-chan struct{}) error {

View file

@ -30,6 +30,7 @@ import (
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
var (
@ -200,34 +201,22 @@ func testFilterBlockNotifications(node *rpctest.Harness,
// To start the test, we'll create to fresh outputs paying to the
// private key that we generated above.
txid1, err := getTestTXID(node)
if err != nil {
t.Fatalf("unable to get test txid: %v", err)
}
require.NoError(t, err, "unable to get test txid")
err = waitForMempoolTx(node, txid1)
if err != nil {
t.Fatalf("unable to get test txid in mempool: %v", err)
}
require.NoError(t, err, "unable to get test txid in mempool")
txid2, err := getTestTXID(node)
if err != nil {
t.Fatalf("unable to get test txid: %v", err)
}
require.NoError(t, err, "unable to get test txid")
err = waitForMempoolTx(node, txid2)
if err != nil {
t.Fatalf("unable to get test txid in mempool: %v", err)
}
require.NoError(t, err, "unable to get test txid in mempool")
blockChan := chainView.FilteredBlocks()
// Next we'll mine a block confirming the output generated above.
newBlockHashes, err := node.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
_, currentHeight, err := node.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// We should get an update, however it shouldn't yet contain any
// filtered transaction as the filter hasn't been update.
@ -243,34 +232,22 @@ func testFilterBlockNotifications(node *rpctest.Harness,
// so we can add them to the filter, and also craft transaction
// spending the outputs we created.
tx1, err := node.Client.GetRawTransaction(txid1)
if err != nil {
t.Fatalf("unable to fetch transaction: %v", err)
}
require.NoError(t, err, "unable to fetch transaction")
tx2, err := node.Client.GetRawTransaction(txid2)
if err != nil {
t.Fatalf("unable to fetch transaction: %v", err)
}
require.NoError(t, err, "unable to fetch transaction")
targetScript, err := txscript.PayToAddrScript(testAddr)
if err != nil {
t.Fatalf("unable to create target output: %v", err)
}
require.NoError(t, err, "unable to create target output")
// Next, we'll locate the two outputs generated above that pay to use
// so we can properly add them to the filter.
outPoint1, _, err := locateOutput(tx1.MsgTx(), targetScript)
if err != nil {
t.Fatalf("unable to find output: %v", err)
}
require.NoError(t, err, "unable to find output")
outPoint2, _, err := locateOutput(tx2.MsgTx(), targetScript)
if err != nil {
t.Fatalf("unable to find output: %v", err)
}
require.NoError(t, err, "unable to find output")
_, currentHeight, err = node.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Now we'll add both outpoints to the current filter.
filter := []channeldb.EdgePoint{
@ -278,35 +255,23 @@ func testFilterBlockNotifications(node *rpctest.Harness,
{FundingPkScript: targetScript, OutPoint: *outPoint2},
}
err = chainView.UpdateFilter(filter, uint32(currentHeight))
if err != nil {
t.Fatalf("unable to update filter: %v", err)
}
require.NoError(t, err, "unable to update filter")
// With the filter updated, we'll now create two transaction spending
// the outputs we created.
spendingTx1, err := craftSpendTransaction(*outPoint1, targetScript)
if err != nil {
t.Fatalf("unable to create spending tx: %v", err)
}
require.NoError(t, err, "unable to create spending tx")
spendingTx2, err := craftSpendTransaction(*outPoint2, targetScript)
if err != nil {
t.Fatalf("unable to create spending tx: %v", err)
}
require.NoError(t, err, "unable to create spending tx")
// Now we'll broadcast the first spending transaction and also mine a
// block which should include it.
spendTxid1, err := node.Client.SendRawTransaction(spendingTx1, true)
if err != nil {
t.Fatalf("unable to broadcast transaction: %v", err)
}
require.NoError(t, err, "unable to broadcast transaction")
err = waitForMempoolTx(node, spendTxid1)
if err != nil {
t.Fatalf("unable to get spending txid in mempool: %v", err)
}
require.NoError(t, err, "unable to get spending txid in mempool")
newBlockHashes, err = node.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
// We should receive a notification over the channel. The notification
// should correspond to the current block height and have that single
@ -322,17 +287,11 @@ func testFilterBlockNotifications(node *rpctest.Harness,
// Next, mine the second transaction which spends the second output.
// This should also generate a notification.
spendTxid2, err := node.Client.SendRawTransaction(spendingTx2, true)
if err != nil {
t.Fatalf("unable to broadcast transaction: %v", err)
}
require.NoError(t, err, "unable to broadcast transaction")
err = waitForMempoolTx(node, spendTxid2)
if err != nil {
t.Fatalf("unable to get spending txid in mempool: %v", err)
}
require.NoError(t, err, "unable to get spending txid in mempool")
newBlockHashes, err = node.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
select {
case filteredBlock := <-blockChan:
@ -354,22 +313,16 @@ func testUpdateFilterBackTrack(node *rpctest.Harness,
t.Fatalf("unable to get test txid")
}
err = waitForMempoolTx(node, txid)
if err != nil {
t.Fatalf("unable to get test txid in mempool: %v", err)
}
require.NoError(t, err, "unable to get test txid in mempool")
// Next we'll mine a block confirming the output generated above.
initBlockHashes, err := node.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
blockChan := chainView.FilteredBlocks()
_, currentHeight, err := node.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Consume the notification sent which contains an empty filtered
// block.
@ -384,29 +337,17 @@ func testUpdateFilterBackTrack(node *rpctest.Harness,
// Next, create a transaction which spends the output created above,
// mining the spend into a block.
tx, err := node.Client.GetRawTransaction(txid)
if err != nil {
t.Fatalf("unable to fetch transaction: %v", err)
}
require.NoError(t, err, "unable to fetch transaction")
outPoint, _, err := locateOutput(tx.MsgTx(), testScript)
if err != nil {
t.Fatalf("unable to find output: %v", err)
}
require.NoError(t, err, "unable to find output")
spendingTx, err := craftSpendTransaction(*outPoint, testScript)
if err != nil {
t.Fatalf("unable to create spending tx: %v", err)
}
require.NoError(t, err, "unable to create spending tx")
spendTxid, err := node.Client.SendRawTransaction(spendingTx, true)
if err != nil {
t.Fatalf("unable to broadcast transaction: %v", err)
}
require.NoError(t, err, "unable to broadcast transaction")
err = waitForMempoolTx(node, spendTxid)
if err != nil {
t.Fatalf("unable to get spending txid in mempool: %v", err)
}
require.NoError(t, err, "unable to get spending txid in mempool")
newBlockHashes, err := node.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
// We should have received another empty filtered block notification.
select {
@ -423,9 +364,7 @@ func testUpdateFilterBackTrack(node *rpctest.Harness,
{FundingPkScript: testScript, OutPoint: *outPoint},
}
err = chainView.UpdateFilter(filter, uint32(currentHeight))
if err != nil {
t.Fatalf("unable to update filter: %v", err)
}
require.NoError(t, err, "unable to update filter")
// We should now receive a fresh filtered block notification that
// includes the transaction spend we included above.
@ -451,30 +390,22 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView,
t.Fatalf("unable to get test txid")
}
err = waitForMempoolTx(node, txid1)
if err != nil {
t.Fatalf("unable to get test txid in mempool: %v", err)
}
require.NoError(t, err, "unable to get test txid in mempool")
txid2, err := getTestTXID(node)
if err != nil {
t.Fatalf("unable to get test txid")
}
err = waitForMempoolTx(node, txid2)
if err != nil {
t.Fatalf("unable to get test txid in mempool: %v", err)
}
require.NoError(t, err, "unable to get test txid in mempool")
blockChan := chainView.FilteredBlocks()
// Next we'll mine a block confirming the output generated above.
newBlockHashes, err := node.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
_, currentHeight, err := node.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// We should get an update, however it shouldn't yet contain any
// filtered transaction as the filter hasn't been updated.
@ -487,37 +418,23 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView,
}
tx1, err := node.Client.GetRawTransaction(txid1)
if err != nil {
t.Fatalf("unable to fetch transaction: %v", err)
}
require.NoError(t, err, "unable to fetch transaction")
tx2, err := node.Client.GetRawTransaction(txid2)
if err != nil {
t.Fatalf("unable to fetch transaction: %v", err)
}
require.NoError(t, err, "unable to fetch transaction")
// Next, we'll create a block that includes two transactions, each
// which spend one of the outputs created.
outPoint1, _, err := locateOutput(tx1.MsgTx(), testScript)
if err != nil {
t.Fatalf("unable to find output: %v", err)
}
require.NoError(t, err, "unable to find output")
outPoint2, _, err := locateOutput(tx2.MsgTx(), testScript)
if err != nil {
t.Fatalf("unable to find output: %v", err)
}
require.NoError(t, err, "unable to find output")
spendingTx1, err := craftSpendTransaction(*outPoint1, testScript)
if err != nil {
t.Fatalf("unable to create spending tx: %v", err)
}
require.NoError(t, err, "unable to create spending tx")
spendingTx2, err := craftSpendTransaction(*outPoint2, testScript)
if err != nil {
t.Fatalf("unable to create spending tx: %v", err)
}
require.NoError(t, err, "unable to create spending tx")
txns := []*btcutil.Tx{btcutil.NewTx(spendingTx1), btcutil.NewTx(spendingTx2)}
block, err := node.GenerateAndSubmitBlock(txns, 11, time.Time{})
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
require.NoError(t, err, "unable to generate block")
select {
case filteredBlock := <-blockChan:
@ -528,9 +445,7 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView,
}
_, currentHeight, err = node.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Now we'll manually trigger filtering the block generated above.
// First, we'll add the two outpoints to our filter.
@ -539,9 +454,7 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView,
{FundingPkScript: testScript, OutPoint: *outPoint2},
}
err = chainView.UpdateFilter(filter, uint32(currentHeight))
if err != nil {
t.Fatalf("unable to update filter: %v", err)
}
require.NoError(t, err, "unable to update filter")
// We set the filter with the current height, so we shouldn't get any
// notifications.
@ -554,9 +467,7 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView,
// Now we'll manually rescan that past block. This should include two
// filtered transactions, the spending transactions we created above.
filteredBlock, err := chainView.FilterBlock(block.Hash())
if err != nil {
t.Fatalf("unable to filter block: %v", err)
}
require.NoError(t, err, "unable to filter block")
txn1, txn2 := spendingTx1.TxHash(), spendingTx2.TxHash()
expectedTxns := []*chainhash.Hash{&txn1, &txn2}
assertFilteredBlock(t, filteredBlock, currentHeight, block.Hash(),
@ -573,9 +484,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
// Create a node that has a shorter chain than the main chain, so we
// can trigger a reorg.
reorgNode, err := rpctest.New(netParams, nil, []string{"--txindex"}, "")
if err != nil {
t.Fatalf("unable to create mining node: %v", err)
}
require.NoError(t, err, "unable to create mining node")
defer reorgNode.TearDown()
// We want to overwrite some of the connection settings to make the
@ -592,17 +501,13 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
}
_, bestHeight, err := reorgNode.Client.GetBestBlock()
if err != nil {
t.Fatalf("error getting best block: %v", err)
}
require.NoError(t, err, "error getting best block")
// Init a chain view that has this node as its block source.
cleanUpFunc, reorgView, err := chainViewInit(
reorgNode.RPCConfig(), reorgNode.P2PAddress(), bestHeight,
)
if err != nil {
t.Fatalf("unable to create chain view: %v", err)
}
require.NoError(t, err, "unable to create chain view")
defer func() {
if cleanUpFunc != nil {
cleanUpFunc()
@ -625,9 +530,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
}
_, oldHeight, err := reorgNode.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Now connect the node with the short chain to the main node, and wait
// for their chains to synchronize. The short chain will be reorged all
@ -641,9 +544,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
}
_, newHeight, err := reorgNode.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// We should be getting oldHeight number of blocks marked as
// stale/disconnected. We expect to first get all stale blocks,
@ -681,16 +582,12 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
// Now we trigger a small reorg, by disconnecting the nodes, mining
// a few blocks on each, then connecting them again.
peers, err := reorgNode.Client.GetPeerInfo()
if err != nil {
t.Fatalf("unable to get peer info: %v", err)
}
require.NoError(t, err, "unable to get peer info")
numPeers := len(peers)
// Disconnect the nodes.
err = reorgNode.Client.AddNode(node.P2PAddress(), rpcclient.ANRemove)
if err != nil {
t.Fatalf("unable to disconnect mining nodes: %v", err)
}
require.NoError(t, err, "unable to disconnect mining nodes")
// Wait for disconnection
for {
@ -732,9 +629,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
}
_, oldHeight, err = reorgNode.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current height: %v", err)
}
require.NoError(t, err, "unable to get current height")
// Now connect the two nodes, and wait for their chains to sync up.
if err := rpctest.ConnectNode(reorgNode, node); err != nil {
@ -1135,9 +1030,7 @@ func TestFilteredChainView(t *testing.T) {
// this node with a chain length of 125, so we have plenty of BTC to
// play around with.
miner, err := rpctest.New(netParams, nil, []string{"--txindex"}, "")
if err != nil {
t.Fatalf("unable to create mining node: %v", err)
}
require.NoError(t, err, "unable to create mining node")
defer miner.TearDown()
if err := miner.SetUp(true, 25); err != nil {
t.Fatalf("unable to set up mining node: %v", err)

View file

@ -15,6 +15,7 @@ import (
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/routing/route"
"github.com/stretchr/testify/require"
)
var (
@ -48,9 +49,7 @@ func TestControlTowerSubscribeUnknown(t *testing.T) {
t.Parallel()
db, err := initDB()
if err != nil {
t.Fatalf("unable to init db: %v", err)
}
require.NoError(t, err, "unable to init db")
pControl := NewControlTower(channeldb.NewPaymentControl(db))
@ -67,9 +66,7 @@ func TestControlTowerSubscribeSuccess(t *testing.T) {
t.Parallel()
db, err := initDB()
if err != nil {
t.Fatalf("unable to init db: %v", err)
}
require.NoError(t, err, "unable to init db")
pControl := NewControlTower(channeldb.NewPaymentControl(db))
@ -87,9 +84,7 @@ func TestControlTowerSubscribeSuccess(t *testing.T) {
// Subscription should succeed and immediately report the InFlight
// status.
subscriber1, err := pControl.SubscribePayment(info.PaymentIdentifier)
if err != nil {
t.Fatalf("expected subscribe to succeed, but got: %v", err)
}
require.NoError(t, err, "expected subscribe to succeed, but got")
// Register an attempt.
err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt)
@ -99,9 +94,7 @@ func TestControlTowerSubscribeSuccess(t *testing.T) {
// Register a second subscriber after the first attempt has started.
subscriber2, err := pControl.SubscribePayment(info.PaymentIdentifier)
if err != nil {
t.Fatalf("expected subscribe to succeed, but got: %v", err)
}
require.NoError(t, err, "expected subscribe to succeed, but got")
// Mark the payment as successful.
settleInfo := channeldb.HTLCSettleInfo{
@ -119,9 +112,7 @@ func TestControlTowerSubscribeSuccess(t *testing.T) {
// Register a third subscriber after the payment succeeded.
subscriber3, err := pControl.SubscribePayment(info.PaymentIdentifier)
if err != nil {
t.Fatalf("expected subscribe to succeed, but got: %v", err)
}
require.NoError(t, err, "expected subscribe to succeed, but got")
// We expect all subscribers to now report the final outcome followed by
// no other events.
@ -184,9 +175,7 @@ func TestPaymentControlSubscribeFail(t *testing.T) {
func testPaymentControlSubscribeFail(t *testing.T, registerAttempt bool) {
db, err := initDB()
if err != nil {
t.Fatalf("unable to init db: %v", err)
}
require.NoError(t, err, "unable to init db")
pControl := NewControlTower(channeldb.NewPaymentControl(db))
@ -203,9 +192,7 @@ func testPaymentControlSubscribeFail(t *testing.T, registerAttempt bool) {
// Subscription should succeed.
subscriber1, err := pControl.SubscribePayment(info.PaymentIdentifier)
if err != nil {
t.Fatalf("expected subscribe to succeed, but got: %v", err)
}
require.NoError(t, err, "expected subscribe to succeed, but got")
// Conditionally register the attempt based on the test type. This
// allows us to simulate failing after attempting with an htlc or before
@ -239,9 +226,7 @@ func testPaymentControlSubscribeFail(t *testing.T, registerAttempt bool) {
// Register a second subscriber after the payment failed.
subscriber2, err := pControl.SubscribePayment(info.PaymentIdentifier)
if err != nil {
t.Fatalf("expected subscribe to succeed, but got: %v", err)
}
require.NoError(t, err, "expected subscribe to succeed, but got")
// We expect all subscribers to now report the final outcome followed by
// no other events.

View file

@ -57,9 +57,7 @@ func TestProbabilityExtrapolation(t *testing.T) {
// modifications anywhere in the chain of components that is involved in
// this test.
attempts, err := ctx.testPayment(1)
if err != nil {
t.Fatalf("payment failed: %v", err)
}
require.NoError(t, err, "payment failed")
if len(attempts) != 5 {
t.Fatalf("expected 5 attempts, but needed %v", len(attempts))
}
@ -69,9 +67,7 @@ func TestProbabilityExtrapolation(t *testing.T) {
// first before switching to the paid channel.
ctx.mcCfg.AprioriWeight = 1
attempts, err = ctx.testPayment(1)
if err != nil {
t.Fatalf("payment failed: %v", err)
}
require.NoError(t, err, "payment failed")
if len(attempts) != 11 {
t.Fatalf("expected 11 attempts, but needed %v", len(attempts))
}

View file

@ -400,9 +400,7 @@ func TestEdgeUpdateNotification(t *testing.T) {
fundingTx, chanPoint, chanID, err := createChannelEdge(ctx,
bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(),
chanValue, 0)
if err != nil {
t.Fatalf("unable create channel edge: %v", err)
}
require.NoError(t, err, "unable create channel edge")
// We'll also add a record for the block that included our funding
// transaction.
@ -414,13 +412,9 @@ func TestEdgeUpdateNotification(t *testing.T) {
// Next we'll create two test nodes that the fake channel will be open
// between.
node1, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
node2, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
// Finally, to conclude our test set up, we'll create a channel
// update to announce the created channel between the two nodes.
@ -445,9 +439,7 @@ func TestEdgeUpdateNotification(t *testing.T) {
// With the channel edge now in place, we'll subscribe for topology
// notifications.
ntfnClient, err := ctx.router.SubscribeTopology()
if err != nil {
t.Fatalf("unable to subscribe for channel notifications: %v", err)
}
require.NoError(t, err, "unable to subscribe for channel notifications")
// Create random policy edges that are stemmed to the channel id
// created above.
@ -514,13 +506,9 @@ func TestEdgeUpdateNotification(t *testing.T) {
}
node1Pub, err := node1.PubKey()
if err != nil {
t.Fatalf("unable to encode key: %v", err)
}
require.NoError(t, err, "unable to encode key")
node2Pub, err := node2.PubKey()
if err != nil {
t.Fatalf("unable to encode key: %v", err)
}
require.NoError(t, err, "unable to encode key")
const numEdgePolicies = 2
for i := 0; i < numEdgePolicies; i++ {
@ -593,9 +581,7 @@ func TestNodeUpdateNotification(t *testing.T) {
bitcoinKey1.SerializeCompressed(),
bitcoinKey2.SerializeCompressed(),
chanValue, startingBlockHeight)
if err != nil {
t.Fatalf("unable create channel edge: %v", err)
}
require.NoError(t, err, "unable create channel edge")
// We'll also add a record for the block that included our funding
// transaction.
@ -608,13 +594,9 @@ func TestNodeUpdateNotification(t *testing.T) {
// them to trigger notifications by sending updated node announcement
// messages.
node1, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
node2, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
testFeaturesBuf := new(bytes.Buffer)
require.NoError(t, testFeatures.Encode(testFeaturesBuf))
@ -641,9 +623,7 @@ func TestNodeUpdateNotification(t *testing.T) {
// Create a new client to receive notifications.
ntfnClient, err := ctx.router.SubscribeTopology()
if err != nil {
t.Fatalf("unable to subscribe for channel notifications: %v", err)
}
require.NoError(t, err, "unable to subscribe for channel notifications")
// Change network topology by adding the updated info for the two nodes
// to the channel router.
@ -778,9 +758,7 @@ func TestNotificationCancellation(t *testing.T) {
// Create a new client to receive notifications.
ntfnClient, err := ctx.router.SubscribeTopology()
if err != nil {
t.Fatalf("unable to subscribe for channel notifications: %v", err)
}
require.NoError(t, err, "unable to subscribe for channel notifications")
// We'll create the utxo for a new channel.
const chanValue = 10000
@ -788,9 +766,7 @@ func TestNotificationCancellation(t *testing.T) {
bitcoinKey1.SerializeCompressed(),
bitcoinKey2.SerializeCompressed(),
chanValue, startingBlockHeight)
if err != nil {
t.Fatalf("unable create channel edge: %v", err)
}
require.NoError(t, err, "unable create channel edge")
// We'll also add a record for the block that included our funding
// transaction.
@ -802,13 +778,9 @@ func TestNotificationCancellation(t *testing.T) {
// We'll create a fresh new node topology update to feed to the channel
// router.
node1, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
node2, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
// Before we send the message to the channel router, we'll cancel the
// notifications for this client. As a result, the notification
@ -870,9 +842,7 @@ func TestChannelCloseNotification(t *testing.T) {
fundingTx, chanUtxo, chanID, err := createChannelEdge(ctx,
bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(),
chanValue, startingBlockHeight)
if err != nil {
t.Fatalf("unable create channel edge: %v", err)
}
require.NoError(t, err, "unable create channel edge")
// We'll also add a record for the block that included our funding
// transaction.
@ -884,13 +854,9 @@ func TestChannelCloseNotification(t *testing.T) {
// Next we'll create two test nodes that the fake channel will be open
// between.
node1, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
node2, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
// Finally, to conclude our test set up, we'll create a channel
// announcement to announce the created channel between the two nodes.
@ -914,9 +880,7 @@ func TestChannelCloseNotification(t *testing.T) {
// With the channel edge now in place, we'll subscribe for topology
// notifications.
ntfnClient, err := ctx.router.SubscribeTopology()
if err != nil {
t.Fatalf("unable to subscribe for channel notifications: %v", err)
}
require.NoError(t, err, "unable to subscribe for channel notifications")
// Next, we'll simulate the closure of our channel by generating a new
// block at height 102 which spends the original multi-sig output of

View file

@ -962,9 +962,7 @@ func runFindLowestFeePath(t *testing.T, useCache bool) {
paymentAmt := lnwire.NewMSatFromSatoshis(100)
target := ctx.keyFromAlias("target")
path, err := ctx.findPath(target, paymentAmt)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
route, err := newRoute(
ctx.source, path, startingHeight,
finalHopParams{
@ -973,9 +971,7 @@ func runFindLowestFeePath(t *testing.T, useCache bool) {
records: nil,
},
)
if err != nil {
t.Fatalf("unable to create path: %v", err)
}
require.NoError(t, err, "unable to create path")
// Assert that the lowest fee route is returned.
if route.Hops[1].PubKeyBytes != ctx.keyFromAlias("b") {
@ -1058,9 +1054,7 @@ var basicGraphPathFindingTests = []basicGraphPathFindingTestCase{
func runBasicGraphPathFinding(t *testing.T, useCache bool) {
testGraphInstance, err := parseTestGraph(useCache, basicGraphFilePath)
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer testGraphInstance.cleanUp()
// With the test graph loaded, we'll test some basic path finding using
@ -1083,9 +1077,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc
expectedHopCount := len(expectedHops)
sourceNode, err := graphInstance.graph.SourceNode()
if err != nil {
t.Fatalf("unable to fetch source node: %v", err)
}
require.NoError(t, err, "unable to fetch source node")
sourceVertex := route.Vertex(sourceNode.PubKeyBytes)
const (
@ -1112,9 +1104,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc
}
return
}
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
route, err := newRoute(
sourceVertex, path, startingHeight,
@ -1124,9 +1114,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc
records: nil,
},
)
if err != nil {
t.Fatalf("unable to create path: %v", err)
}
require.NoError(t, err, "unable to create path")
if len(route.Hops) != len(expectedHops) {
t.Fatalf("route is of incorrect length, expected %v got %v",
@ -1148,9 +1136,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc
// properly points to the channel ID that the HTLC should be forwarded
// along.
sphinxPath, err := route.ToSphinxPath()
if err != nil {
t.Fatalf("unable to make sphinx path: %v", err)
}
require.NoError(t, err, "unable to make sphinx path")
if sphinxPath.TrueRouteLength() != expectedHopCount {
t.Fatalf("incorrect number of hop payloads: expected %v, got %v",
expectedHopCount, sphinxPath.TrueRouteLength())
@ -1178,9 +1164,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc
lastHopIndex := len(expectedHops) - 1
hopData, err := sphinxPath[lastHopIndex].HopPayload.HopData()
if err != nil {
t.Fatalf("unable to create hop data: %v", err)
}
require.NoError(t, err, "unable to create hop data")
if !bytes.Equal(hopData.NextAddress[:], exitHop[:]) {
t.Fatalf("first hop has incorrect next hop: expected %x, got %x",
@ -1235,15 +1219,11 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc
// appropriate circumstances.
func runPathFindingWithAdditionalEdges(t *testing.T, useCache bool) {
graph, err := parseTestGraph(useCache, basicGraphFilePath)
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer graph.cleanUp()
sourceNode, err := graph.graph.SourceNode()
if err != nil {
t.Fatalf("unable to fetch source node: %v", err)
}
require.NoError(t, err, "unable to fetch source node")
paymentAmt := lnwire.NewMSatFromSatoshis(100)
@ -1254,13 +1234,9 @@ func runPathFindingWithAdditionalEdges(t *testing.T, useCache bool) {
// to find a path from our source node, roasbeef, to doge.
dogePubKeyHex := "03dd46ff29a6941b4a2607525b043ec9b020b3f318a1bf281536fd7011ec59c882"
dogePubKeyBytes, err := hex.DecodeString(dogePubKeyHex)
if err != nil {
t.Fatalf("unable to decode public key: %v", err)
}
require.NoError(t, err, "unable to decode public key")
dogePubKey, err := btcec.ParsePubKey(dogePubKeyBytes)
if err != nil {
t.Fatalf("unable to parse public key from bytes: %v", err)
}
require.NoError(t, err, "unable to parse public key from bytes")
doge := &channeldb.LightningNode{}
doge.AddPubKey(dogePubKey)
@ -1298,9 +1274,7 @@ func runPathFindingWithAdditionalEdges(t *testing.T, useCache bool) {
// We should now be able to find a path from roasbeef to doge.
path, err := find(noRestrictions)
if err != nil {
t.Fatalf("unable to find private path to doge: %v", err)
}
require.NoError(t, err, "unable to find private path to doge")
// The path should represent the following hops:
// roasbeef -> songoku -> doge
@ -1330,9 +1304,7 @@ func runPathFindingWithAdditionalEdges(t *testing.T, useCache bool) {
restrictions.DestFeatures = tlvFeatures
path, err = find(&restrictions)
if err != nil {
t.Fatalf("path should have been found: %v", err)
}
require.NoError(t, err, "path should have been found")
assertExpectedPath(t, graph.aliasMap, path, "songoku", "doge")
}
@ -1675,9 +1647,7 @@ func runNewRoutePathTooLong(t *testing.T, useCache bool) {
node20 := ctx.keyFromAlias("node-20")
payAmt := lnwire.MilliSatoshi(100001)
_, err := ctx.findPath(node20, payAmt)
if err != nil {
t.Fatalf("unexpected pathfinding failure: %v", err)
}
require.NoError(t, err, "unexpected pathfinding failure")
// Assert that finding a 21 hop route fails.
node21 := ctx.keyFromAlias("node-21")
@ -1700,24 +1670,18 @@ func runNewRoutePathTooLong(t *testing.T, useCache bool) {
func runPathNotAvailable(t *testing.T, useCache bool) {
graph, err := parseTestGraph(useCache, basicGraphFilePath)
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer graph.cleanUp()
sourceNode, err := graph.graph.SourceNode()
if err != nil {
t.Fatalf("unable to fetch source node: %v", err)
}
require.NoError(t, err, "unable to fetch source node")
// With the test graph loaded, we'll test that queries for target that
// are either unreachable within the graph, or unknown result in an
// error.
unknownNodeStr := "03dd46ff29a6941b4a2607525b043ec9b020b3f318a1bf281536fd7011ec59c882"
unknownNodeBytes, err := hex.DecodeString(unknownNodeStr)
if err != nil {
t.Fatalf("unable to parse bytes: %v", err)
}
require.NoError(t, err, "unable to parse bytes")
var unknownNode route.Vertex
copy(unknownNode[:], unknownNodeBytes)
@ -1767,9 +1731,7 @@ func runDestTLVGraphFallback(t *testing.T, useCache bool) {
defer ctx.cleanup()
sourceNode, err := ctx.graph.SourceNode()
if err != nil {
t.Fatalf("unable to fetch source node: %v", err)
}
require.NoError(t, err, "unable to fetch source node")
find := func(r *RestrictParams,
target route.Vertex) ([]*channeldb.CachedEdgePolicy, error) {
@ -1801,9 +1763,7 @@ func runDestTLVGraphFallback(t *testing.T, useCache bool) {
// However, path to satoshi should succeed via the fallback because his
// node ann features have the TLV bit.
path, err := find(&restrictions, satoshi)
if err != nil {
t.Fatalf("path should have been found: %v", err)
}
require.NoError(t, err, "path should have been found")
assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "satoshi")
// Add empty destination features. This should cause both paths to fail,
@ -1824,9 +1784,7 @@ func runDestTLVGraphFallback(t *testing.T, useCache bool) {
restrictions.DestFeatures = tlvFeatures
path, err = find(&restrictions, luoji)
if err != nil {
t.Fatalf("path should have been found: %v", err)
}
require.NoError(t, err, "path should have been found")
assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "luoji")
}
@ -1888,9 +1846,7 @@ func runMissingFeatureDep(t *testing.T, useCache bool) {
ctx.restrictParams.DestFeatures = tlvPayAddrFeatures
path, err := ctx.findPath(conner, 100)
if err != nil {
t.Fatalf("path should have been found: %v", err)
}
require.NoError(t, err, "path should have been found")
assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "conner")
// Finally, try to find a route to joost through conner. The
@ -2006,23 +1962,17 @@ func runDestPaymentAddr(t *testing.T, useCache bool) {
ctx.restrictParams.DestFeatures = tlvPayAddrFeatures
path, err := ctx.findPath(luoji, 100)
if err != nil {
t.Fatalf("path should have been found: %v", err)
}
require.NoError(t, err, "path should have been found")
assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "luoji")
}
func runPathInsufficientCapacity(t *testing.T, useCache bool) {
graph, err := parseTestGraph(useCache, basicGraphFilePath)
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer graph.cleanUp()
sourceNode, err := graph.graph.SourceNode()
if err != nil {
t.Fatalf("unable to fetch source node: %v", err)
}
require.NoError(t, err, "unable to fetch source node")
// Next, test that attempting to find a path in which the current
// channel graph cannot support due to insufficient capacity triggers
@ -2049,15 +1999,11 @@ func runPathInsufficientCapacity(t *testing.T, useCache bool) {
// smaller than the advertised minHTLC of an edge, then path finding fails.
func runRouteFailMinHTLC(t *testing.T, useCache bool) {
graph, err := parseTestGraph(useCache, basicGraphFilePath)
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer graph.cleanUp()
sourceNode, err := graph.graph.SourceNode()
if err != nil {
t.Fatalf("unable to fetch source node: %v", err)
}
require.NoError(t, err, "unable to fetch source node")
// We'll not attempt to route an HTLC of 10 SAT from roasbeef to Son
// Goku. However, the min HTLC of Son Goku is 1k SAT, as a result, this
@ -2111,17 +2057,13 @@ func runRouteFailMaxHTLC(t *testing.T, useCache bool) {
target := ctx.keyFromAlias("target")
payAmt := lnwire.MilliSatoshi(100001)
_, err := ctx.findPath(target, payAmt)
if err != nil {
t.Fatalf("graph should've been able to support payment: %v", err)
}
require.NoError(t, err, "graph should've been able to support payment")
// Next, update the middle edge policy to only allow payments up to 100k
// msat.
graph := ctx.testGraphInstance.graph
_, midEdge, _, err := graph.FetchChannelEdgesByID(firstToSecondID)
if err != nil {
t.Fatalf("unable to fetch channel edges by ID: %v", err)
}
require.NoError(t, err, "unable to fetch channel edges by ID")
midEdge.MessageFlags = 1
midEdge.MaxHTLC = payAmt - 1
if err := graph.UpdateEdgePolicy(midEdge); err != nil {
@ -2143,15 +2085,11 @@ func runRouteFailMaxHTLC(t *testing.T, useCache bool) {
// found among the bandwidth hints.
func runRouteFailDisabledEdge(t *testing.T, useCache bool) {
graph, err := parseTestGraph(useCache, basicGraphFilePath)
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer graph.cleanUp()
sourceNode, err := graph.graph.SourceNode()
if err != nil {
t.Fatalf("unable to fetch source node: %v", err)
}
require.NoError(t, err, "unable to fetch source node")
// First, we'll try to route from roasbeef -> sophon. This should
// succeed without issue, and return a single path via phamnuwen
@ -2162,18 +2100,14 @@ func runRouteFailDisabledEdge(t *testing.T, useCache bool) {
noRestrictions, testPathFindingConfig,
sourceNode.PubKeyBytes, target, payAmt, 0, 0,
)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
// Disable the edge roasbeef->phamnuwen. This should not impact the
// path finding, as we don't consider the disable flag for local
// channels (and roasbeef is the source).
roasToPham := uint64(999991)
_, e1, e2, err := graph.graph.FetchChannelEdgesByID(roasToPham)
if err != nil {
t.Fatalf("unable to fetch edge: %v", err)
}
require.NoError(t, err, "unable to fetch edge")
e1.ChannelFlags |= lnwire.ChanUpdateDisabled
if err := graph.graph.UpdateEdgePolicy(e1); err != nil {
t.Fatalf("unable to update edge: %v", err)
@ -2188,17 +2122,13 @@ func runRouteFailDisabledEdge(t *testing.T, useCache bool) {
noRestrictions, testPathFindingConfig,
sourceNode.PubKeyBytes, target, payAmt, 0, 0,
)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
// Now, we'll modify the edge from phamnuwen -> sophon, to read that
// it's disabled.
phamToSophon := uint64(99999)
_, e, _, err := graph.graph.FetchChannelEdgesByID(phamToSophon)
if err != nil {
t.Fatalf("unable to fetch edge: %v", err)
}
require.NoError(t, err, "unable to fetch edge")
e.ChannelFlags |= lnwire.ChanUpdateDisabled
if err := graph.graph.UpdateEdgePolicy(e); err != nil {
t.Fatalf("unable to update edge: %v", err)
@ -2221,15 +2151,11 @@ func runRouteFailDisabledEdge(t *testing.T, useCache bool) {
// use a local channel.
func runPathSourceEdgesBandwidth(t *testing.T, useCache bool) {
graph, err := parseTestGraph(useCache, basicGraphFilePath)
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer graph.cleanUp()
sourceNode, err := graph.graph.SourceNode()
if err != nil {
t.Fatalf("unable to fetch source node: %v", err)
}
require.NoError(t, err, "unable to fetch source node")
// First, we'll try to route from roasbeef -> sophon. This should
// succeed without issue, and return a path via songoku, as that's the
@ -2241,9 +2167,7 @@ func runPathSourceEdgesBandwidth(t *testing.T, useCache bool) {
noRestrictions, testPathFindingConfig,
sourceNode.PubKeyBytes, target, payAmt, 0, 0,
)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
assertExpectedPath(t, graph.aliasMap, path, "songoku", "sophon")
// Now we'll set the bandwidth of the edge roasbeef->songoku and
@ -2279,18 +2203,14 @@ func runPathSourceEdgesBandwidth(t *testing.T, useCache bool) {
noRestrictions, testPathFindingConfig,
sourceNode.PubKeyBytes, target, payAmt, 0, 0,
)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
assertExpectedPath(t, graph.aliasMap, path, "phamnuwen", "sophon")
// Finally, set the roasbeef->songoku bandwidth, but also set its
// disable flag.
bandwidths.hints[roasToSongoku] = 2 * payAmt
_, e1, e2, err := graph.graph.FetchChannelEdgesByID(roasToSongoku)
if err != nil {
t.Fatalf("unable to fetch edge: %v", err)
}
require.NoError(t, err, "unable to fetch edge")
e1.ChannelFlags |= lnwire.ChanUpdateDisabled
if err := graph.graph.UpdateEdgePolicy(e1); err != nil {
t.Fatalf("unable to update edge: %v", err)
@ -2307,9 +2227,7 @@ func runPathSourceEdgesBandwidth(t *testing.T, useCache bool) {
noRestrictions, testPathFindingConfig,
sourceNode.PubKeyBytes, target, payAmt, 0, 0,
)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
assertExpectedPath(t, graph.aliasMap, path, "songoku", "sophon")
}
@ -2340,9 +2258,7 @@ func TestPathFindSpecExample(t *testing.T) {
// Bob.
bob := ctx.aliases["B"]
bobNode, err := ctx.graph.FetchLightningNode(bob)
if err != nil {
t.Fatalf("unable to find bob: %v", err)
}
require.NoError(t, err, "unable to find bob")
if err := ctx.graph.SetSourceNode(bobNode); err != nil {
t.Fatalf("unable to set source node: %v", err)
}
@ -2354,9 +2270,7 @@ func TestPathFindSpecExample(t *testing.T) {
bobNode.PubKeyBytes, carol, amt, 0, noRestrictions, nil, nil,
MinCLTVDelta,
)
if err != nil {
t.Fatalf("unable to find route: %v", err)
}
require.NoError(t, err, "unable to find route")
// Now we'll examine the route returned for correctness.
//
@ -2390,17 +2304,13 @@ func TestPathFindSpecExample(t *testing.T) {
// the proper route for any queries starting with Alice.
alice := ctx.aliases["A"]
aliceNode, err := ctx.graph.FetchLightningNode(alice)
if err != nil {
t.Fatalf("unable to find alice: %v", err)
}
require.NoError(t, err, "unable to find alice")
if err := ctx.graph.SetSourceNode(aliceNode); err != nil {
t.Fatalf("unable to set source node: %v", err)
}
ctx.router.selfNode = aliceNode
source, err := ctx.graph.SourceNode()
if err != nil {
t.Fatalf("unable to retrieve source node: %v", err)
}
require.NoError(t, err, "unable to retrieve source node")
if source.PubKeyBytes != alice {
t.Fatalf("source node not set")
}
@ -2410,9 +2320,7 @@ func TestPathFindSpecExample(t *testing.T) {
source.PubKeyBytes, carol, amt, 0, noRestrictions, nil, nil,
MinCLTVDelta,
)
if err != nil {
t.Fatalf("unable to find routes: %v", err)
}
require.NoError(t, err, "unable to find routes")
// The route should be two hops.
if len(route.Hops) != 2 {
@ -2569,9 +2477,7 @@ func runRestrictOutgoingChannel(t *testing.T, useCache bool) {
// outgoing channel.
ctx.restrictParams.OutgoingChannelIDs = []uint64{outgoingChannelID}
path, err := ctx.findPath(target, paymentAmt)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
// Assert that the route starts with channel chanSourceB1, in line with
// the specified restriction.
@ -2587,9 +2493,7 @@ func runRestrictOutgoingChannel(t *testing.T, useCache bool) {
chanSourceB1, chanSourceTarget,
}
path, err = ctx.findPath(target, paymentAmt)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
if path[0].ChannelID != chanSourceTarget {
t.Fatalf("expected route to pass through channel %v",
chanSourceTarget)
@ -2629,9 +2533,7 @@ func runRestrictLastHop(t *testing.T, useCache bool) {
// This should force pathfinding to not take the lowest cost option.
ctx.restrictParams.LastHop = &lastHop
path, err := ctx.findPath(target, paymentAmt)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
if path[0].ChannelID != 3 {
t.Fatalf("expected route to pass through channel 3, "+
"but channel %v was selected instead",
@ -2704,9 +2606,7 @@ func testCltvLimit(t *testing.T, useCache bool, limit uint32,
}
t.Fatal("expected no path to be found")
}
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
const (
startingHeight = 100
@ -2720,9 +2620,7 @@ func testCltvLimit(t *testing.T, useCache bool, limit uint32,
records: nil,
},
)
if err != nil {
t.Fatalf("unable to create path: %v", err)
}
require.NoError(t, err, "unable to create path")
// Assert that the route starts with the expected channel.
if route.Hops[0].ChannelID != expectedChannel {
@ -3037,9 +2935,7 @@ func runNoCycle(t *testing.T, useCache bool) {
// Find the best path given the restriction to only use channel 2 as the
// outgoing channel.
path, err := ctx.findPath(target, paymentAmt)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
route, err := newRoute(
ctx.source, path, startingHeight,
finalHopParams{
@ -3048,9 +2944,7 @@ func runNoCycle(t *testing.T, useCache bool) {
records: nil,
},
)
if err != nil {
t.Fatalf("unable to create path: %v", err)
}
require.NoError(t, err, "unable to create path")
if len(route.Hops) != 2 {
t.Fatalf("unexpected route")
@ -3089,9 +2983,7 @@ func runRouteToSelf(t *testing.T, useCache bool) {
// Find the best path to self. We expect this to be source->a->source,
// because a charges the lowest forwarding fee.
path, err := ctx.findPath(target, paymentAmt)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
ctx.assertPath(path, []uint64{1, 1})
outgoingChanID := uint64(1)
@ -3102,9 +2994,7 @@ func runRouteToSelf(t *testing.T, useCache bool) {
// Find the best path to self given that we want to go out via channel 1
// and return through node b.
path, err = ctx.findPath(target, paymentAmt)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
ctx.assertPath(path, []uint64{1, 3, 2})
}
@ -3125,14 +3015,10 @@ func newPathFindingTestContext(t *testing.T, useCache bool,
testGraphInstance, err := createTestGraphFromChannels(
useCache, testChannels, source,
)
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
sourceNode, err := testGraphInstance.graph.SourceNode()
if err != nil {
t.Fatalf("unable to fetch source node: %v", err)
}
require.NoError(t, err, "unable to fetch source node")
ctx := &pathFindingTestContext{
t: t,

View file

@ -181,9 +181,7 @@ func TestRouterPaymentStateMachine(t *testing.T) {
}
testGraph, err := createTestGraphFromChannels(true, testChannels, "a")
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer testGraph.cleanUp()
paymentAmt := lnwire.NewMSatFromSatoshis(1000)
@ -191,9 +189,7 @@ func TestRouterPaymentStateMachine(t *testing.T) {
// We create a simple route that we will supply every time the router
// requests one.
rt, err := createTestRoute(paymentAmt, testGraph.aliasMap)
if err != nil {
t.Fatalf("unable to create route: %v", err)
}
require.NoError(t, err, "unable to create route")
tests := []paymentLifecycleTestCase{
{

View file

@ -1168,9 +1168,7 @@ func TestAddProof(t *testing.T) {
fundingTx, _, chanID, err := createChannelEdge(ctx,
bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(),
100, 0)
if err != nil {
t.Fatalf("unable create channel edge: %v", err)
}
require.NoError(t, err, "unable create channel edge")
fundingBlock := &wire.MsgBlock{
Transactions: []*wire.MsgTx{fundingTx},
}
@ -1197,9 +1195,7 @@ func TestAddProof(t *testing.T) {
}
info, _, _, err := ctx.router.GetChannelByID(*chanID)
if err != nil {
t.Fatalf("unable to get channel: %v", err)
}
require.NoError(t, err, "unable to get channel")
if info.AuthProof == nil {
t.Fatal("proof have been updated")
}
@ -1247,9 +1243,7 @@ func TestIgnoreChannelEdgePolicyForUnknownChannel(t *testing.T) {
testGraph, err := createTestGraphFromChannels(
true, testChannels, "roasbeef",
)
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer testGraph.cleanUp()
ctx, cleanUp := createTestCtxFromGraphInstance(
@ -1269,9 +1263,7 @@ func TestIgnoreChannelEdgePolicyForUnknownChannel(t *testing.T) {
ctx, bitcoinKey1.SerializeCompressed(),
bitcoinKey2.SerializeCompressed(), 10000, 500,
)
if err != nil {
t.Fatalf("unable to create channel edge: %v", err)
}
require.NoError(t, err, "unable to create channel edge")
fundingBlock := &wire.MsgBlock{
Transactions: []*wire.MsgTx{fundingTx},
}
@ -1335,16 +1327,12 @@ func TestAddEdgeUnknownVertexes(t *testing.T) {
// The two nodes we are about to add should not exist yet.
_, exists1, err := ctx.graph.HasLightningNode(pub1)
if err != nil {
t.Fatalf("unable to query graph: %v", err)
}
require.NoError(t, err, "unable to query graph")
if exists1 {
t.Fatalf("node already existed")
}
_, exists2, err := ctx.graph.HasLightningNode(pub2)
if err != nil {
t.Fatalf("unable to query graph: %v", err)
}
require.NoError(t, err, "unable to query graph")
if exists2 {
t.Fatalf("node already existed")
}
@ -1356,9 +1344,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) {
bitcoinKey2.SerializeCompressed(),
10000, 500,
)
if err != nil {
t.Fatalf("unable to create channel edge: %v", err)
}
require.NoError(t, err, "unable to create channel edge")
fundingBlock := &wire.MsgBlock{
Transactions: []*wire.MsgTx{fundingTx},
}
@ -1419,16 +1405,12 @@ func TestAddEdgeUnknownVertexes(t *testing.T) {
// After adding the edge between the two previously unknown nodes, they
// should have been added to the graph.
_, exists1, err = ctx.graph.HasLightningNode(pub1)
if err != nil {
t.Fatalf("unable to query graph: %v", err)
}
require.NoError(t, err, "unable to query graph")
if !exists1 {
t.Fatalf("node1 was not added to the graph")
}
_, exists2, err = ctx.graph.HasLightningNode(pub2)
if err != nil {
t.Fatalf("unable to query graph: %v", err)
}
require.NoError(t, err, "unable to query graph")
if !exists2 {
t.Fatalf("node2 was not added to the graph")
}
@ -1461,9 +1443,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) {
fundingTx, _, chanID, err = createChannelEdge(ctx,
pubKey1.SerializeCompressed(), pubKey2.SerializeCompressed(),
10000, 510)
if err != nil {
t.Fatalf("unable to create channel edge: %v", err)
}
require.NoError(t, err, "unable to create channel edge")
fundingBlock = &wire.MsgBlock{
Transactions: []*wire.MsgTx{fundingTx},
}
@ -1528,9 +1508,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) {
targetPubKeyBytes, paymentAmt, 0, noRestrictions, nil, nil,
MinCLTVDelta,
)
if err != nil {
t.Fatalf("unable to find any routes: %v", err)
}
require.NoError(t, err, "unable to find any routes")
// Now check that we can update the node info for the partial node
// without messing up the channel graph.
@ -1571,23 +1549,17 @@ func TestAddEdgeUnknownVertexes(t *testing.T) {
targetPubKeyBytes, paymentAmt, 0, noRestrictions, nil, nil,
MinCLTVDelta,
)
if err != nil {
t.Fatalf("unable to find any routes: %v", err)
}
require.NoError(t, err, "unable to find any routes")
copy1, err := ctx.graph.FetchLightningNode(pub1)
if err != nil {
t.Fatalf("unable to fetch node: %v", err)
}
require.NoError(t, err, "unable to fetch node")
if copy1.Alias != n1.Alias {
t.Fatalf("fetched node not equal to original")
}
copy2, err := ctx.graph.FetchLightningNode(pub2)
if err != nil {
t.Fatalf("unable to fetch node: %v", err)
}
require.NoError(t, err, "unable to fetch node")
if copy2.Alias != n2.Alias {
t.Fatalf("fetched node not equal to original")
@ -1642,9 +1614,7 @@ func TestWakeUpOnStaleBranch(t *testing.T) {
time.Sleep(time.Millisecond * 500)
_, forkHeight, err := ctx.chain.GetBestBlock()
if err != nil {
t.Fatalf("unable to ge best block: %v", err)
}
require.NoError(t, err, "unable to ge best block")
// Create 10 blocks on the minority chain, confirming chanID2.
for i := uint32(1); i <= 10; i++ {
@ -1675,13 +1645,9 @@ func TestWakeUpOnStaleBranch(t *testing.T) {
// Now add the two edges to the channel graph, and check that they
// correctly show up in the database.
node1, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
node2, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
edge1 := &channeldb.ChannelEdgeInfo{
ChannelID: chanID1,
@ -1851,9 +1817,7 @@ func TestDisconnectedBlocks(t *testing.T) {
time.Sleep(time.Millisecond * 500)
_, forkHeight, err := ctx.chain.GetBestBlock()
if err != nil {
t.Fatalf("unable to get best block: %v", err)
}
require.NoError(t, err, "unable to get best block")
// Create 10 blocks on the minority chain, confirming chanID2.
var minorityChain []*wire.MsgBlock
@ -1886,13 +1850,9 @@ func TestDisconnectedBlocks(t *testing.T) {
// Now add the two edges to the channel graph, and check that they
// correctly show up in the database.
node1, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
node2, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
edge1 := &channeldb.ChannelEdgeInfo{
ChannelID: chanID1,
@ -2032,9 +1992,7 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) {
bitcoinKey1.SerializeCompressed(),
bitcoinKey2.SerializeCompressed(),
chanValue, uint32(nextHeight))
if err != nil {
t.Fatalf("unable create channel edge: %v", err)
}
require.NoError(t, err, "unable create channel edge")
block102.Transactions = append(block102.Transactions, fundingTx1)
ctx.chain.addBlock(block102, uint32(nextHeight), rand.Uint32())
ctx.chain.setBestBlock(int32(nextHeight))
@ -2045,13 +2003,9 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) {
// for the ChannelRouter to properly recognize the channel we added
// above.
node1, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
node2, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
require.NoError(t, err, "unable to create test node")
edge1 := &channeldb.ChannelEdgeInfo{
ChannelID: chanID1.ToUint64(),
NodeKey1Bytes: node1.PubKeyBytes,
@ -2097,9 +2051,7 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) {
// At this point, our starting height should be 107.
_, chainHeight, err := ctx.chain.GetBestBlock()
if err != nil {
t.Fatalf("unable to get best block: %v", err)
}
require.NoError(t, err, "unable to get best block")
if chainHeight != 107 {
t.Fatalf("incorrect chain height: expected %v, got %v",
107, chainHeight)
@ -2139,9 +2091,7 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) {
// At this point, our starting height should be 112.
_, chainHeight, err = ctx.chain.GetBestBlock()
if err != nil {
t.Fatalf("unable to get best block: %v", err)
}
require.NoError(t, err, "unable to get best block")
if chainHeight != 112 {
t.Fatalf("incorrect chain height: expected %v, got %v",
112, chainHeight)
@ -2394,9 +2344,7 @@ func testPruneChannelGraphDoubleDisabled(t *testing.T, assumeValid bool) {
testGraph, err := createTestGraphFromChannels(
true, testChannels, "self",
)
if err != nil {
t.Fatalf("unable to create test graph: %v", err)
}
require.NoError(t, err, "unable to create test graph")
defer testGraph.cleanUp()
const startingHeight = 100
@ -2450,9 +2398,7 @@ func TestFindPathFeeWeighting(t *testing.T) {
copy(preImage[:], bytes.Repeat([]byte{9}, 32))
sourceNode, err := ctx.graph.SourceNode()
if err != nil {
t.Fatalf("unable to fetch source node: %v", err)
}
require.NoError(t, err, "unable to fetch source node")
amt := lnwire.MilliSatoshi(100)
@ -2467,9 +2413,7 @@ func TestFindPathFeeWeighting(t *testing.T) {
testPathFindingConfig,
sourceNode.PubKeyBytes, target, amt, 0, 0,
)
if err != nil {
t.Fatalf("unable to find path: %v", err)
}
require.NoError(t, err, "unable to find path")
// The route that was chosen should be exactly one hop, and should be
// directly to luoji.
@ -2503,9 +2447,7 @@ func TestIsStaleNode(t *testing.T) {
bitcoinKey1.SerializeCompressed(),
bitcoinKey2.SerializeCompressed(),
10000, 500)
if err != nil {
t.Fatalf("unable to create channel edge: %v", err)
}
require.NoError(t, err, "unable to create channel edge")
fundingBlock := &wire.MsgBlock{
Transactions: []*wire.MsgTx{fundingTx},
}
@ -2582,9 +2524,7 @@ func TestIsKnownEdge(t *testing.T) {
bitcoinKey1.SerializeCompressed(),
bitcoinKey2.SerializeCompressed(),
10000, 500)
if err != nil {
t.Fatalf("unable to create channel edge: %v", err)
}
require.NoError(t, err, "unable to create channel edge")
fundingBlock := &wire.MsgBlock{
Transactions: []*wire.MsgTx{fundingTx},
}
@ -2633,9 +2573,7 @@ func TestIsStaleEdgePolicy(t *testing.T) {
bitcoinKey1.SerializeCompressed(),
bitcoinKey2.SerializeCompressed(),
10000, 500)
if err != nil {
t.Fatalf("unable to create channel edge: %v", err)
}
require.NoError(t, err, "unable to create channel edge")
fundingBlock := &wire.MsgBlock{
Transactions: []*wire.MsgTx{fundingTx},
}
@ -2765,9 +2703,7 @@ func TestUnknownErrorSource(t *testing.T) {
testGraph, err := createTestGraphFromChannels(true, testChannels, "a")
defer testGraph.cleanUp()
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
const startingBlockHeight = 101
ctx, cleanUp := createTestCtxFromGraphInstance(
@ -2807,9 +2743,7 @@ func TestUnknownErrorSource(t *testing.T) {
// which should pruning the channel a->b. We expect the payment to
// succeed via a->d.
_, _, err = ctx.router.SendPayment(&payment)
if err != nil {
t.Fatalf("expected payment to succeed, but got: %v", err)
}
require.NoError(t, err, "expected payment to succeed, but got")
// Next we modify payment result to return an unknown failure.
ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcherOld).setPaymentResult(
@ -2900,9 +2834,7 @@ func TestSendToRouteStructuredError(t *testing.T) {
}
testGraph, err := createTestGraphFromChannels(true, testChannels, "a")
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer testGraph.cleanUp()
const startingBlockHeight = 101
@ -2938,9 +2870,7 @@ func TestSendToRouteStructuredError(t *testing.T) {
}
rt, err := route.NewRouteFromHops(payAmt, 100, ctx.aliases["a"], hops)
if err != nil {
t.Fatalf("unable to create route: %v", err)
}
require.NoError(t, err, "unable to create route")
finalHopIndex := len(hops)
testCases := map[int]lnwire.FailureMessage{
@ -3036,9 +2966,7 @@ func TestSendToRouteMultiShardSend(t *testing.T) {
rt, err := route.NewRouteFromHops(
payAmt, 100, sourceNode.PubKeyBytes, hops,
)
if err != nil {
t.Fatalf("unable to create route: %v", err)
}
require.NoError(t, err, "unable to create route")
// The first shard we send we'll fail immediately, to check that we are
// still allowed to retry with other shards after a failed one.
@ -3149,9 +3077,7 @@ func TestSendToRouteMaxHops(t *testing.T) {
}
testGraph, err := createTestGraphFromChannels(true, testChannels, "a")
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer testGraph.cleanUp()
const startingBlockHeight = 101
@ -3184,9 +3110,7 @@ func TestSendToRouteMaxHops(t *testing.T) {
}
rt, err := route.NewRouteFromHops(payAmt, 100, ctx.aliases["a"], hops)
if err != nil {
t.Fatalf("unable to create route: %v", err)
}
require.NoError(t, err, "unable to create route")
// Send off the payment request to the router. We expect an error back
// indicating that the route is too long.
@ -3260,9 +3184,7 @@ func TestBuildRoute(t *testing.T) {
}
testGraph, err := createTestGraphFromChannels(true, testChannels, "a")
if err != nil {
t.Fatalf("unable to create graph: %v", err)
}
require.NoError(t, err, "unable to create graph")
defer testGraph.cleanUp()
const startingBlockHeight = 101
@ -4358,7 +4280,5 @@ func TestBlockDifferenceFix(t *testing.T) {
return nil
}, testTimeout)
if err != nil {
t.Fatalf("block height wasn't updated: %v", err)
}
require.NoError(t, err, "block height wasn't updated")
}

View file

@ -20,6 +20,7 @@ import (
"time"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/stretchr/testify/require"
)
// TestTLSAutoRegeneration creates an expired TLS certificate, to test that a
@ -37,9 +38,7 @@ func TestTLSAutoRegeneration(t *testing.T) {
certDerBytes, keyBytes := genExpiredCertPair(t, tempDirPath)
expiredCert, err := x509.ParseCertificate(certDerBytes)
if err != nil {
t.Fatalf("failed to parse certificate: %v", err)
}
require.NoError(t, err, "failed to parse certificate")
certBuf := bytes.Buffer{}
err = pem.Encode(
@ -48,9 +47,7 @@ func TestTLSAutoRegeneration(t *testing.T) {
Bytes: certDerBytes,
},
)
if err != nil {
t.Fatalf("failed to encode certificate: %v", err)
}
require.NoError(t, err, "failed to encode certificate")
keyBuf := bytes.Buffer{}
err = pem.Encode(
@ -59,19 +56,13 @@ func TestTLSAutoRegeneration(t *testing.T) {
Bytes: keyBytes,
},
)
if err != nil {
t.Fatalf("failed to encode private key: %v", err)
}
require.NoError(t, err, "failed to encode private key")
// Write cert and key files.
err = ioutil.WriteFile(tempDirPath+"/tls.cert", certBuf.Bytes(), 0644)
if err != nil {
t.Fatalf("failed to write cert file: %v", err)
}
require.NoError(t, err, "failed to write cert file")
err = ioutil.WriteFile(tempDirPath+"/tls.key", keyBuf.Bytes(), 0600)
if err != nil {
t.Fatalf("failed to write key file: %v", err)
}
require.NoError(t, err, "failed to write key file")
rpcListener := net.IPAddr{IP: net.ParseIP("127.0.0.1"), Zone: ""}
rpcListeners := make([]net.Addr, 0)
@ -118,9 +109,7 @@ func genExpiredCertPair(t *testing.T, certDirPath string) ([]byte, []byte) {
// Generate a serial number that's below the serialNumberLimit.
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
t.Fatalf("failed to generate serial number: %s", err)
}
require.NoError(t, err, "failed to generate serial number")
host := "lightning"
@ -157,14 +146,10 @@ func genExpiredCertPair(t *testing.T, certDirPath string) ([]byte, []byte) {
certDerBytes, err := x509.CreateCertificate(
rand.Reader, &template, &template, &priv.PublicKey, priv,
)
if err != nil {
t.Fatalf("failed to create certificate: %v", err)
}
require.NoError(t, err, "failed to create certificate")
keyBytes, err := x509.MarshalECPrivateKey(priv)
if err != nil {
t.Fatalf("unable to encode privkey: %v", err)
}
require.NoError(t, err, "unable to encode privkey")
return certDerBytes, keyBytes
}

View file

@ -5,6 +5,7 @@ import (
"testing"
"github.com/go-errors/errors"
"github.com/stretchr/testify/require"
)
// bitsToIndex is a helper function which takes 'n' last bits as input and
@ -48,13 +49,9 @@ func generateTests(t *testing.T) []deriveTest {
)
from, err = bitsToIndex(0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
to, err = bitsToIndex(0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
tests = append(tests, deriveTest{
name: "zero 'from' 'to'",
from: from,
@ -64,13 +61,9 @@ func generateTests(t *testing.T) []deriveTest {
})
from, err = bitsToIndex(0, 1, 0, 0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
to, err = bitsToIndex(0, 1, 0, 0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
tests = append(tests, deriveTest{
name: "same indexes #1",
from: from,
@ -80,13 +73,9 @@ func generateTests(t *testing.T) []deriveTest {
})
from, err = bitsToIndex(1)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
to, err = bitsToIndex(0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
tests = append(tests, deriveTest{
name: "same indexes #2",
from: from,
@ -95,13 +84,9 @@ func generateTests(t *testing.T) []deriveTest {
})
from, err = bitsToIndex(0, 0, 0, 0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
to, err = bitsToIndex(0, 0, 1, 0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
tests = append(tests, deriveTest{
name: "test seed 'from'",
from: from,
@ -111,13 +96,9 @@ func generateTests(t *testing.T) []deriveTest {
})
from, err = bitsToIndex(1, 1, 0, 0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
to, err = bitsToIndex(0, 1, 0, 0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
tests = append(tests, deriveTest{
name: "not the same indexes",
from: from,
@ -126,13 +107,9 @@ func generateTests(t *testing.T) []deriveTest {
})
from, err = bitsToIndex(1, 0, 1, 0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
to, err = bitsToIndex(1, 0, 0, 0)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
tests = append(tests, deriveTest{
name: "'from' index greater then 'to' index",
from: from,
@ -141,13 +118,9 @@ func generateTests(t *testing.T) []deriveTest {
})
from, err = bitsToIndex(1)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
to, err = bitsToIndex(1)
if err != nil {
t.Fatalf("can't generate from index: %v", err)
}
require.NoError(t, err, "can't generate from index")
tests = append(tests, deriveTest{
name: "zero number trailing zeros",
from: from,

View file

@ -6,6 +6,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/stretchr/testify/require"
)
// TestStore asserts that the store persists the presented data to disk and is
@ -125,9 +126,7 @@ func testStore(t *testing.T, createStore func() (SweeperStore, error)) {
}
txns, err := store.ListSweeps()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
require.NoError(t, err, "unexpected error")
// Create a map containing the sweeps we expect to be returned by list
// sweeps.

View file

@ -1237,9 +1237,7 @@ func TestBumpFeeRBF(t *testing.T) {
bumpResult, err := ctx.sweeper.UpdateParams(
*input.OutPoint(), ParamsUpdate{Fee: highFeePref},
)
if err != nil {
t.Fatalf("unable to bump input's fee: %v", err)
}
require.NoError(t, err, "unable to bump input's fee")
// A higher fee rate transaction should be immediately broadcast.
ctx.tick()

View file

@ -12,6 +12,7 @@ import (
"github.com/lightningnetwork/lnd/lntest/mock"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/stretchr/testify/require"
)
// TestDetermineFeePerKw tests that given a fee preference, the
@ -352,9 +353,7 @@ func TestCraftSweepAllTx(t *testing.T) {
0, 10, nil, deliveryAddr, coinSelectLocker, utxoSource,
utxoLocker, feeEstimator, signer, 0,
)
if err != nil {
t.Fatalf("unable to make sweep tx: %v", err)
}
require.NoError(t, err, "unable to make sweep tx")
// At this point, all of the UTXOs that we made above should be locked
// and none of them unlocked.

View file

@ -82,9 +82,7 @@ func testParsedTypes(t *testing.T, test parsedTypeTest) {
parsedTypes, err := decStream.DecodeWithParsedTypes(
bytes.NewReader(b.Bytes()),
)
if err != nil {
t.Fatalf("error decoding: %v", err)
}
require.NoError(t, err, "error decoding")
if !reflect.DeepEqual(parsedTypes, test.expParsedTypes) {
t.Fatalf("error mismatch on parsed types")
}

View file

@ -17,9 +17,7 @@ func TestOnionFile(t *testing.T) {
t.Parallel()
tempDir, err := ioutil.TempDir("", "onion_store")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
require.NoError(t, err, "unable to create temp dir")
privateKey := []byte("hide_me_plz")
privateKeyPath := filepath.Join(tempDir, "secret")
@ -36,9 +34,7 @@ func TestOnionFile(t *testing.T) {
t.Fatalf("unable to store private key: %v", err)
}
storePrivateKey, err := onionFile.PrivateKey(V2)
if err != nil {
t.Fatalf("unable to retrieve private key: %v", err)
}
require.NoError(t, err, "unable to retrieve private key")
if !bytes.Equal(storePrivateKey, privateKey) {
t.Fatalf("expected private key \"%v\", got \"%v\"",
string(privateKey), string(storePrivateKey))

View file

@ -166,9 +166,7 @@ func testBlobJusticeKitEncryptDecrypt(t *testing.T, test descriptorTest) {
// party's commitment txid as the key.
var key blob.BreachKey
_, err := rand.Read(key[:])
if err != nil {
t.Fatalf("unable to generate blob encryption key: %v", err)
}
require.NoError(t, err, "unable to generate blob encryption key")
// Encrypt the blob plaintext using the generated key and
// target version for this test.

View file

@ -15,6 +15,7 @@ import (
"github.com/lightningnetwork/lnd/watchtower/wtdb"
"github.com/lightningnetwork/lnd/watchtower/wtmock"
"github.com/lightningnetwork/lnd/watchtower/wtpolicy"
"github.com/stretchr/testify/require"
)
type mockPunisher struct {
@ -116,13 +117,9 @@ func TestLookoutBreachMatching(t *testing.T) {
// Insert both sessions into the watchtower's database.
err := db.InsertSessionInfo(sessionInfo1)
if err != nil {
t.Fatalf("unable to insert session info: %v", err)
}
require.NoError(t, err, "unable to insert session info")
err = db.InsertSessionInfo(sessionInfo2)
if err != nil {
t.Fatalf("unable to insert session info: %v", err)
}
require.NoError(t, err, "unable to insert session info")
// Construct two distinct transactions, that will be used to test the
// breach hint matching.
@ -160,15 +157,11 @@ func TestLookoutBreachMatching(t *testing.T) {
// Encrypt the first justice kit under breach key one.
encBlob1, err := blob1.Encrypt(key1)
if err != nil {
t.Fatalf("unable to encrypt sweep detail 1: %v", err)
}
require.NoError(t, err, "unable to encrypt sweep detail 1")
// Encrypt the second justice kit under breach key two.
encBlob2, err := blob2.Encrypt(key2)
if err != nil {
t.Fatalf("unable to encrypt sweep detail 2: %v", err)
}
require.NoError(t, err, "unable to encrypt sweep detail 2")
// Add both state updates to the tower's database.
txBlob1 := &wtdb.SessionStateUpdate{

View file

@ -23,6 +23,7 @@ import (
"github.com/lightningnetwork/lnd/watchtower/wtdb"
"github.com/lightningnetwork/lnd/watchtower/wtmock"
"github.com/lightningnetwork/lnd/watchtower/wtpolicy"
"github.com/stretchr/testify/require"
)
const csvDelay uint32 = 144
@ -602,9 +603,7 @@ func testBackupTask(t *testing.T, test backupTaskTest) {
// Now, we'll construct, sign, and encrypt the blob containing the parts
// needed to reconstruct the justice transaction.
hint, encBlob, err := task.craftSessionPayload(test.signer)
if err != nil {
t.Fatalf("unable to craft session payload: %v", err)
}
require.NoError(t, err, "unable to craft session payload")
// Verify that the breach hint matches the breach txid's prefix.
breachTxID := test.breachInfo.BreachTxHash
@ -618,9 +617,7 @@ func testBackupTask(t *testing.T, test backupTaskTest) {
// contents.
key := blob.NewBreachKeyFromHash(&breachTxID)
jKit, err := blob.Decrypt(key, encBlob, policy.BlobType)
if err != nil {
t.Fatalf("unable to decrypt blob: %v", err)
}
require.NoError(t, err, "unable to decrypt blob")
keyRing := test.breachInfo.KeyRing
expToLocalPK := keyRing.ToLocalKey.SerializeCompressed()

View file

@ -11,6 +11,7 @@ import (
"github.com/btcsuite/btcd/btcec/v2"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/watchtower/wtdb"
"github.com/stretchr/testify/require"
)
func init() {
@ -35,9 +36,7 @@ func randAddr(t *testing.T) net.Addr {
func randTower(t *testing.T) *wtdb.Tower {
priv, err := btcec.NewPrivateKey()
if err != nil {
t.Fatalf("unable to create private key: %v", err)
}
require.NoError(t, err, "unable to create private key")
pubKey := priv.PubKey()
return &wtdb.Tower{
ID: wtdb.TowerID(rand.Uint64()),

View file

@ -69,9 +69,7 @@ func randPrivKey(t *testing.T) *btcec.PrivateKey {
t.Helper()
sk, err := btcec.NewPrivateKey()
if err != nil {
t.Fatalf("unable to generate pubkey: %v", err)
}
require.NoError(t, err, "unable to generate pubkey")
return sk
}
@ -200,21 +198,15 @@ func (c *mockChannel) createRemoteCommitTx(t *testing.T) {
toLocalScript, err := input.CommitScriptToSelf(
c.csvDelay, c.toLocalPK, c.revPK,
)
if err != nil {
t.Fatalf("unable to create to-local script: %v", err)
}
require.NoError(t, err, "unable to create to-local script")
// Compute the to-local witness script hash.
toLocalScriptHash, err := input.WitnessScriptHash(toLocalScript)
if err != nil {
t.Fatalf("unable to create to-local witness script hash: %v", err)
}
require.NoError(t, err, "unable to create to-local witness script hash")
// Compute the to-remote witness script hash.
toRemoteScriptHash, err := input.CommitScriptUnencumbered(c.toRemotePK)
if err != nil {
t.Fatalf("unable to create to-remote script: %v", err)
}
require.NoError(t, err, "unable to create to-remote script")
// Construct the remote commitment txn, containing the to-local and
// to-remote outputs. The balances are flipped since the transaction is
@ -400,14 +392,10 @@ type harnessCfg struct {
func newHarness(t *testing.T, cfg harnessCfg) *testHarness {
towerTCPAddr, err := net.ResolveTCPAddr("tcp", towerAddrStr)
if err != nil {
t.Fatalf("Unable to resolve tower TCP addr: %v", err)
}
require.NoError(t, err, "Unable to resolve tower TCP addr")
privKey, err := btcec.NewPrivateKey()
if err != nil {
t.Fatalf("Unable to generate tower private key: %v", err)
}
require.NoError(t, err, "Unable to generate tower private key")
privKeyECDH := &keychain.PrivKeyECDH{PrivKey: privKey}
towerPubKey := privKey.PubKey()
@ -432,9 +420,7 @@ func newHarness(t *testing.T, cfg harnessCfg) *testHarness {
}
server, err := wtserver.New(serverCfg)
if err != nil {
t.Fatalf("unable to create wtserver: %v", err)
}
require.NoError(t, err, "unable to create wtserver")
signer := wtmock.NewMockSigner()
mockNet := newMockNet(server.InboundPeerConnected)
@ -457,9 +443,7 @@ func newHarness(t *testing.T, cfg harnessCfg) *testHarness {
ForceQuitDelay: 10 * time.Second,
}
client, err := wtclient.New(clientCfg)
if err != nil {
t.Fatalf("Unable to create wtclient: %v", err)
}
require.NoError(t, err, "Unable to create wtclient")
if err := server.Start(); err != nil {
t.Fatalf("Unable to start wtserver: %v", err)

View file

@ -16,6 +16,7 @@ import (
"github.com/lightningnetwork/lnd/watchtower/wtmock"
"github.com/lightningnetwork/lnd/watchtower/wtserver"
"github.com/lightningnetwork/lnd/watchtower/wtwire"
"github.com/stretchr/testify/require"
)
var (
@ -36,9 +37,7 @@ func randPubKey(t *testing.T) *btcec.PublicKey {
t.Helper()
sk, err := btcec.NewPrivateKey()
if err != nil {
t.Fatalf("unable to generate pubkey: %v", err)
}
require.NoError(t, err, "unable to generate pubkey")
return sk.PubKey()
}
@ -63,9 +62,7 @@ func initServer(t *testing.T, db wtserver.DB,
},
ChainHash: testnetChainHash,
})
if err != nil {
t.Fatalf("unable to create server: %v", err)
}
require.NoError(t, err, "unable to create server")
if err = s.Start(); err != nil {
t.Fatalf("unable to start server: %v", err)
@ -101,9 +98,7 @@ func TestServerOnlyAcceptOnePeer(t *testing.T) {
var b bytes.Buffer
_, err := wtwire.WriteMessage(&b, init, 0)
if err != nil {
t.Fatalf("unable to write message: %v", err)
}
require.NoError(t, err, "unable to write message")
msg := b.Bytes()

View file

@ -12,6 +12,7 @@ import (
"github.com/btcsuite/btcd/btcutil/bech32"
"github.com/btcsuite/btcd/chaincfg"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
// TestDecodeAmount ensures that the amount string in the hrp of the Invoice
@ -567,9 +568,7 @@ func TestParseMaxUint64Expiry(t *testing.T) {
expiryBytes := uint64ToBase32(expiry)
expiryReParse, err := base32ToUint64(expiryBytes)
if err != nil {
t.Fatalf("unable to parse uint64: %v", err)
}
require.NoError(t, err, "unable to parse uint64")
if expiryReParse != expiry {
t.Fatalf("wrong expiry: expected %v got %v", expiry,