mirror of
https://github.com/lightningnetwork/lnd.git
synced 2024-11-19 18:10:34 +01:00
1350f76627
This commit adds a new function, `TestLightningNetworkDaemonTemp`, to run the new tests.
239 lines
7.2 KiB
Go
239 lines
7.2 KiB
Go
package itest
|
|
|
|
import (
|
|
"flag"
|
|
"fmt"
|
|
"os"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/btcsuite/btcd/integration/rpctest"
|
|
"github.com/lightningnetwork/lnd/lntest"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
var (
|
|
// tempTest is a flag used to mark whether we should run the old or the
|
|
// new test cases. Used here so we can transit smoothly during our new
|
|
// itest construction.
|
|
//
|
|
// TODO(yy): remove temp flag.
|
|
tempTest = flag.Bool("temptest", false, "run the new tests(temp)")
|
|
)
|
|
|
|
// getTestCaseSplitTranche returns the sub slice of the test cases that should
|
|
// be run as the current split tranche as well as the index and slice offset of
|
|
// the tranche.
|
|
func getTestCaseSplitTrancheOld() ([]*testCase, uint, uint) {
|
|
numTranches := defaultSplitTranches
|
|
if testCasesSplitTranches != nil {
|
|
numTranches = *testCasesSplitTranches
|
|
}
|
|
runTranche := defaultRunTranche
|
|
if testCasesRunTranche != nil {
|
|
runTranche = *testCasesRunTranche
|
|
}
|
|
|
|
// There's a special flake-hunt mode where we run the same test multiple
|
|
// times in parallel. In that case the tranche index is equal to the
|
|
// thread ID, but we need to actually run all tests for the regex
|
|
// selection to work.
|
|
threadID := runTranche
|
|
if numTranches == 1 {
|
|
runTranche = 0
|
|
}
|
|
|
|
numCases := uint(len(allTestCases))
|
|
testsPerTranche := numCases / numTranches
|
|
trancheOffset := runTranche * testsPerTranche
|
|
trancheEnd := trancheOffset + testsPerTranche
|
|
if trancheEnd > numCases || runTranche == numTranches-1 {
|
|
trancheEnd = numCases
|
|
}
|
|
|
|
return allTestCases[trancheOffset:trancheEnd], threadID, trancheOffset
|
|
}
|
|
|
|
// TestLightningNetworkDaemon performs a series of integration tests amongst a
|
|
// programmatically driven network of lnd nodes.
|
|
func TestLightningNetworkDaemon(t *testing.T) {
|
|
if *tempTest {
|
|
t.Skip("Running new tests, old tests are skipped")
|
|
}
|
|
|
|
// If no tests are registered, then we can exit early.
|
|
if len(allTestCases) == 0 {
|
|
t.Skip("integration tests not selected with flag 'rpctest'")
|
|
}
|
|
|
|
// Parse testing flags that influence our test execution.
|
|
logDir := lntest.GetLogDir()
|
|
require.NoError(t, os.MkdirAll(logDir, 0700))
|
|
testCases, trancheIndex, trancheOffset := getTestCaseSplitTrancheOld()
|
|
lntest.ApplyPortOffset(uint32(trancheIndex) * 1000)
|
|
|
|
// Before we start any node, we need to make sure that any btcd node
|
|
// that is started through the RPC harness uses a unique port as well to
|
|
// avoid any port collisions.
|
|
rpctest.ListenAddressGenerator = lntest.GenerateBtcdListenerAddresses
|
|
|
|
// Declare the network harness here to gain access to its
|
|
// 'OnTxAccepted' call back.
|
|
var lndHarness *lntest.NetworkHarness
|
|
|
|
// Create an instance of the btcd's rpctest.Harness that will act as
|
|
// the miner for all tests. This will be used to fund the wallets of
|
|
// the nodes within the test network and to drive blockchain related
|
|
// events within the network. Revert the default setting of accepting
|
|
// non-standard transactions on simnet to reject them. Transactions on
|
|
// the lightning network should always be standard to get better
|
|
// guarantees of getting included in to blocks.
|
|
//
|
|
// We will also connect it to our chain backend.
|
|
miner, err := lntest.NewMiner()
|
|
require.NoError(t, err, "failed to create new miner")
|
|
defer func() {
|
|
require.NoError(t, miner.Stop(), "failed to stop miner")
|
|
}()
|
|
|
|
// Start a chain backend.
|
|
chainBackend, cleanUp, err := lntest.NewBackend(
|
|
miner.P2PAddress(), harnessNetParams,
|
|
)
|
|
require.NoError(t, err, "new backend")
|
|
defer func() {
|
|
require.NoError(t, cleanUp(), "cleanup")
|
|
}()
|
|
|
|
// Before we start anything, we want to overwrite some of the connection
|
|
// settings to make the tests more robust. We might need to restart the
|
|
// miner while there are already blocks present, which will take a bit
|
|
// longer than the 1 second the default settings amount to. Doubling
|
|
// both values will give us retries up to 4 seconds.
|
|
miner.MaxConnRetries = rpctest.DefaultMaxConnectionRetries * 2
|
|
miner.ConnectionRetryTimeout = rpctest.DefaultConnectionRetryTimeout * 2
|
|
|
|
// Set up miner and connect chain backend to it.
|
|
require.NoError(t, miner.SetUp(true, 50))
|
|
require.NoError(t, miner.Client.NotifyNewTransactions(false))
|
|
require.NoError(t, chainBackend.ConnectMiner(), "connect miner")
|
|
|
|
// Parse database backend
|
|
var dbBackend lntest.DatabaseBackend
|
|
switch *dbBackendFlag {
|
|
case "bbolt":
|
|
dbBackend = lntest.BackendBbolt
|
|
|
|
case "etcd":
|
|
dbBackend = lntest.BackendEtcd
|
|
|
|
case "postgres":
|
|
dbBackend = lntest.BackendPostgres
|
|
|
|
default:
|
|
require.Fail(t, "unknown db backend")
|
|
}
|
|
|
|
// Now we can set up our test harness (LND instance), with the chain
|
|
// backend we just created.
|
|
ht := newHarnessTest(t, nil)
|
|
binary := ht.getLndBinary()
|
|
lndHarness, err = lntest.NewNetworkHarness(
|
|
miner, chainBackend, binary, dbBackend,
|
|
)
|
|
if err != nil {
|
|
ht.Fatalf("unable to create lightning network harness: %v", err)
|
|
}
|
|
defer lndHarness.Stop()
|
|
|
|
// Spawn a new goroutine to watch for any fatal errors that any of the
|
|
// running lnd processes encounter. If an error occurs, then the test
|
|
// case should naturally as a result and we log the server error here to
|
|
// help debug.
|
|
go func() {
|
|
for {
|
|
select {
|
|
case err, more := <-lndHarness.ProcessErrors():
|
|
if !more {
|
|
return
|
|
}
|
|
ht.Logf("lnd finished with error (stderr):\n%v",
|
|
err)
|
|
}
|
|
}
|
|
}()
|
|
|
|
// Next mine enough blocks in order for segwit and the CSV package
|
|
// soft-fork to activate on SimNet.
|
|
numBlocks := harnessNetParams.MinerConfirmationWindow * 2
|
|
if _, err := miner.Client.Generate(numBlocks); err != nil {
|
|
ht.Fatalf("unable to generate blocks: %v", err)
|
|
}
|
|
|
|
// With the btcd harness created, we can now complete the
|
|
// initialization of the network. args - list of lnd arguments,
|
|
// example: "--debuglevel=debug"
|
|
// TODO(roasbeef): create master balanced channel with all the monies?
|
|
aliceBobArgs := []string{
|
|
"--default-remote-max-htlcs=483",
|
|
"--dust-threshold=5000000",
|
|
}
|
|
|
|
// Run the subset of the test cases selected in this tranche.
|
|
for idx, testCase := range testCases {
|
|
testCase := testCase
|
|
name := fmt.Sprintf("tranche%02d/%02d-of-%d/%s/%s",
|
|
trancheIndex, trancheOffset+uint(idx)+1,
|
|
len(allTestCases), chainBackend.Name(), testCase.name)
|
|
|
|
success := t.Run(name, func(t1 *testing.T) {
|
|
cleanTestCaseName := strings.ReplaceAll(
|
|
testCase.name, " ", "_",
|
|
)
|
|
|
|
err = lndHarness.SetUp(
|
|
t1, cleanTestCaseName, aliceBobArgs,
|
|
)
|
|
require.NoError(t1,
|
|
err, "unable to set up test lightning network",
|
|
)
|
|
defer func() {
|
|
require.NoError(t1, lndHarness.TearDown())
|
|
}()
|
|
|
|
lndHarness.EnsureConnected(
|
|
t1, lndHarness.Alice, lndHarness.Bob,
|
|
)
|
|
|
|
logLine := fmt.Sprintf(
|
|
"STARTING ============ %v ============\n",
|
|
testCase.name,
|
|
)
|
|
|
|
lndHarness.Alice.AddToLogf(logLine)
|
|
lndHarness.Bob.AddToLogf(logLine)
|
|
|
|
// Start every test with the default static fee estimate.
|
|
lndHarness.SetFeeEstimate(12500)
|
|
|
|
// Create a separate harness test for the testcase to
|
|
// avoid overwriting the external harness test that is
|
|
// tied to the parent test.
|
|
ht := newHarnessTest(t1, lndHarness)
|
|
ht.RunTestCase(testCase)
|
|
})
|
|
|
|
// Stop at the first failure. Mimic behavior of original test
|
|
// framework.
|
|
if !success {
|
|
// Log failure time to help relate the lnd logs to the
|
|
// failure.
|
|
t.Logf("Failure time: %v", time.Now().Format(
|
|
"2006-01-02 15:04:05.000",
|
|
))
|
|
break
|
|
}
|
|
}
|
|
}
|