mirror of
https://github.com/lightningnetwork/lnd.git
synced 2024-11-19 09:53:54 +01:00
Merge pull request #5260 from guggero/windows-itest
Travis: fix Windows itest
This commit is contained in:
commit
e39d00900c
@ -77,7 +77,9 @@ jobs:
|
||||
|
||||
- name: Btcd Integration Windows
|
||||
script:
|
||||
- make itest-parallel windows=1
|
||||
# The windows VM seems to be slower than the other Travis VMs. We only
|
||||
# run 2 test suites in parallel instead of the default 4.
|
||||
- make itest-parallel windows=1 ITEST_PARALLELISM=2
|
||||
os: windows
|
||||
before_install:
|
||||
- choco upgrade --no-progress -y make netcat curl findutils
|
||||
|
@ -32,6 +32,9 @@ import (
|
||||
// DefaultCSV is the CSV delay (remotedelay) we will start our test nodes with.
|
||||
const DefaultCSV = 4
|
||||
|
||||
// NodeOption is a function for updating a node's configuration.
|
||||
type NodeOption func(*NodeConfig)
|
||||
|
||||
// NetworkHarness is an integration testing harness for the lightning network.
|
||||
// The harness by default is created with two active nodes on the network:
|
||||
// Alice and Bob.
|
||||
@ -427,10 +430,11 @@ func (n *NetworkHarness) newNodeWithSeed(name string, extraArgs []string,
|
||||
// be used for regular rpc operations.
|
||||
func (n *NetworkHarness) RestoreNodeWithSeed(name string, extraArgs []string,
|
||||
password []byte, mnemonic []string, recoveryWindow int32,
|
||||
chanBackups *lnrpc.ChanBackupSnapshot) (*HarnessNode, error) {
|
||||
chanBackups *lnrpc.ChanBackupSnapshot,
|
||||
opts ...NodeOption) (*HarnessNode, error) {
|
||||
|
||||
node, err := n.newNode(
|
||||
name, extraArgs, true, password, n.embeddedEtcd, true,
|
||||
name, extraArgs, true, password, n.embeddedEtcd, true, opts...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -461,10 +465,10 @@ func (n *NetworkHarness) RestoreNodeWithSeed(name string, extraArgs []string,
|
||||
// can be used immediately. Otherwise, the node will require an additional
|
||||
// initialization phase where the wallet is either created or restored.
|
||||
func (n *NetworkHarness) newNode(name string, extraArgs []string, hasSeed bool,
|
||||
password []byte, embeddedEtcd, wait bool) (
|
||||
password []byte, embeddedEtcd, wait bool, opts ...NodeOption) (
|
||||
*HarnessNode, error) {
|
||||
|
||||
node, err := newNode(NodeConfig{
|
||||
cfg := &NodeConfig{
|
||||
Name: name,
|
||||
LogFilenamePrefix: n.currentTestCase,
|
||||
HasSeed: hasSeed,
|
||||
@ -474,7 +478,12 @@ func (n *NetworkHarness) newNode(name string, extraArgs []string, hasSeed bool,
|
||||
ExtraArgs: extraArgs,
|
||||
FeeURL: n.feeService.url,
|
||||
Etcd: embeddedEtcd,
|
||||
})
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(cfg)
|
||||
}
|
||||
|
||||
node, err := newNode(*cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// the node from seed, then manually recover
|
||||
// the channel backup.
|
||||
return chanRestoreViaRPC(
|
||||
net, password, mnemonic, multi,
|
||||
net, password, mnemonic, multi, oldNode,
|
||||
)
|
||||
},
|
||||
},
|
||||
@ -89,7 +89,7 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// create a new nodeRestorer that will restore
|
||||
// using the on-disk channels.backup.
|
||||
return chanRestoreViaRPC(
|
||||
net, password, mnemonic, multi,
|
||||
net, password, mnemonic, multi, oldNode,
|
||||
)
|
||||
},
|
||||
},
|
||||
@ -124,6 +124,7 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
return net.RestoreNodeWithSeed(
|
||||
"dave", nil, password,
|
||||
mnemonic, 1000, backupSnapshot,
|
||||
copyPorts(oldNode),
|
||||
)
|
||||
}, nil
|
||||
},
|
||||
@ -160,6 +161,7 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
newNode, err := net.RestoreNodeWithSeed(
|
||||
"dave", nil, password,
|
||||
mnemonic, 1000, nil,
|
||||
copyPorts(oldNode),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -206,7 +208,7 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
return func() (*lntest.HarnessNode, error) {
|
||||
newNode, err := net.RestoreNodeWithSeed(
|
||||
"dave", nil, password, mnemonic,
|
||||
1000, nil,
|
||||
1000, nil, copyPorts(oldNode),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to "+
|
||||
@ -276,7 +278,7 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// the node from seed, then manually recover
|
||||
// the channel backup.
|
||||
return chanRestoreViaRPC(
|
||||
net, password, mnemonic, multi,
|
||||
net, password, mnemonic, multi, oldNode,
|
||||
)
|
||||
},
|
||||
},
|
||||
@ -326,7 +328,7 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// the channel backup.
|
||||
multi := chanBackup.MultiChanBackup.MultiChanBackup
|
||||
return chanRestoreViaRPC(
|
||||
net, password, mnemonic, multi,
|
||||
net, password, mnemonic, multi, oldNode,
|
||||
)
|
||||
},
|
||||
},
|
||||
@ -353,7 +355,7 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// create a new nodeRestorer that will restore
|
||||
// using the on-disk channels.backup.
|
||||
return chanRestoreViaRPC(
|
||||
net, password, mnemonic, multi,
|
||||
net, password, mnemonic, multi, oldNode,
|
||||
)
|
||||
},
|
||||
},
|
||||
@ -384,7 +386,7 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// the node from seed, then manually recover the
|
||||
// channel backup.
|
||||
return chanRestoreViaRPC(
|
||||
net, password, mnemonic, multi,
|
||||
net, password, mnemonic, multi, oldNode,
|
||||
)
|
||||
},
|
||||
},
|
||||
@ -825,9 +827,12 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
||||
|
||||
ctxb := context.Background()
|
||||
|
||||
var nodeArgs []string
|
||||
nodeArgs := []string{
|
||||
"--minbackoff=50ms",
|
||||
"--maxbackoff=1s",
|
||||
}
|
||||
if testCase.anchorCommit {
|
||||
nodeArgs = commitTypeAnchors.Args()
|
||||
nodeArgs = append(nodeArgs, commitTypeAnchors.Args()...)
|
||||
}
|
||||
|
||||
// First, we'll create a brand new node we'll use within the test. If
|
||||
@ -1225,9 +1230,9 @@ func createLegacyRevocationChannel(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// chanRestoreViaRPC is a helper test method that returns a nodeRestorer
|
||||
// instance which will restore the target node from a password+seed, then
|
||||
// trigger a SCB restore using the RPC interface.
|
||||
func chanRestoreViaRPC(net *lntest.NetworkHarness,
|
||||
password []byte, mnemonic []string,
|
||||
multi []byte) (nodeRestorer, error) {
|
||||
func chanRestoreViaRPC(net *lntest.NetworkHarness, password []byte,
|
||||
mnemonic []string, multi []byte,
|
||||
oldNode *lntest.HarnessNode) (nodeRestorer, error) {
|
||||
|
||||
backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
|
||||
MultiChanBackup: multi,
|
||||
@ -1238,6 +1243,7 @@ func chanRestoreViaRPC(net *lntest.NetworkHarness,
|
||||
return func() (*lntest.HarnessNode, error) {
|
||||
newNode, err := net.RestoreNodeWithSeed(
|
||||
"dave", nil, password, mnemonic, 1000, nil,
|
||||
copyPorts(oldNode),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to "+
|
||||
@ -1257,3 +1263,14 @@ func chanRestoreViaRPC(net *lntest.NetworkHarness,
|
||||
return newNode, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
// copyPorts returns a node option function that copies the ports of an existing
|
||||
// node over to the newly created one.
|
||||
func copyPorts(oldNode *lntest.HarnessNode) lntest.NodeOption {
|
||||
return func(cfg *lntest.NodeConfig) {
|
||||
cfg.P2PPort = oldNode.Cfg.P2PPort
|
||||
cfg.RPCPort = oldNode.Cfg.RPCPort
|
||||
cfg.RESTPort = oldNode.Cfg.RESTPort
|
||||
cfg.ProfilePort = oldNode.Cfg.ProfilePort
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,7 @@
|
||||
<time> [ERR] BRAR: Unable to broadcast justice tx: Transaction rejected: output already spent
|
||||
<time> [ERR] BRAR: Unable to check for spentness of outpoint=<chan_point>: TxNotifier is exiting
|
||||
<time> [ERR] BRAR: Unable to register for conf for txid(<hex>): TxNotifier is exiting
|
||||
<time> [ERR] BRAR: Unable to register for block notifications: chainntnfs: system interrupt while attempting to register for block epoch notification.
|
||||
<time> [ERR] BTCN: Broadcast attempt failed: rejected by <ip>: replacement transaction <hex> has an insufficient absolute fee: needs <amt>, has <amt>
|
||||
<time> [ERR] BTCN: Broadcast attempt failed: rejected by <ip>: replacement transaction <hex> has an insufficient fee rate: needs more than <amt>, has <amt>
|
||||
<time> [ERR] BTCN: Broadcast attempt failed: rejected by <ip>: transaction already exists
|
||||
@ -78,6 +80,7 @@
|
||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: unable add proof to the channel chanID=<hex>: edge marked as zombie
|
||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: unable add proof to the channel chanID=<hex>: edge not found
|
||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: unable to register for confirmation of ChannelPoint(<chan_point>): chain notifier shutting down
|
||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: unable to register for confirmation of ChannelPoint(<chan_point>): TxNotifier is exiting
|
||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: error sending channel announcement: gossiper is shutting down
|
||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: error sending channel announcement: router shutting down
|
||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: funding manager shutting down
|
||||
@ -242,6 +245,7 @@
|
||||
<time> [ERR] RPCS: [/lnrpc.Lightning/GetInfo]: expected 1 macaroon, got 0
|
||||
<time> [ERR] RPCS: [/lnrpc.Lightning/GetInfo]: permission denied
|
||||
<time> [ERR] RPCS: [/lnrpc.Lightning/GetInfo]: the RPC server is in the process of starting up, but not yet ready to accept calls
|
||||
<time> [ERR] RPCS: [/lnrpc.Lightning/GetInfo]: wallet locked, unlock it to enable full RPC access
|
||||
<time> [ERR] RPCS: [/lnrpc.Lightning/ListMacaroonIDs]: cannot retrieve macaroon: cannot get macaroon: root key with id 1 doesn't exist
|
||||
<time> [ERR] RPCS: [/lnrpc.Lightning/NewAddress]: permission denied
|
||||
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=channel too large
|
||||
|
@ -155,13 +155,19 @@ func GenerateBtcdListenerAddresses() (string, string) {
|
||||
// generateListeningPorts returns four ints representing ports to listen on
|
||||
// designated for the current lightning network test. This returns the next
|
||||
// available ports for the p2p, rpc, rest and profiling services.
|
||||
func generateListeningPorts() (int, int, int, int) {
|
||||
p2p := NextAvailablePort()
|
||||
rpc := NextAvailablePort()
|
||||
rest := NextAvailablePort()
|
||||
profile := NextAvailablePort()
|
||||
|
||||
return p2p, rpc, rest, profile
|
||||
func generateListeningPorts(cfg *NodeConfig) {
|
||||
if cfg.P2PPort == 0 {
|
||||
cfg.P2PPort = NextAvailablePort()
|
||||
}
|
||||
if cfg.RPCPort == 0 {
|
||||
cfg.RPCPort = NextAvailablePort()
|
||||
}
|
||||
if cfg.RESTPort == 0 {
|
||||
cfg.RESTPort = NextAvailablePort()
|
||||
}
|
||||
if cfg.ProfilePort == 0 {
|
||||
cfg.ProfilePort = NextAvailablePort()
|
||||
}
|
||||
}
|
||||
|
||||
// BackendConfig is an interface that abstracts away the specific chain backend
|
||||
@ -411,7 +417,7 @@ func newNode(cfg NodeConfig) (*HarnessNode, error) {
|
||||
cfg.ReadMacPath = filepath.Join(networkDir, "readonly.macaroon")
|
||||
cfg.InvoiceMacPath = filepath.Join(networkDir, "invoice.macaroon")
|
||||
|
||||
cfg.P2PPort, cfg.RPCPort, cfg.RESTPort, cfg.ProfilePort = generateListeningPorts()
|
||||
generateListeningPorts(&cfg)
|
||||
|
||||
// Run all tests with accept keysend. The keysend code is very isolated
|
||||
// and it is highly unlikely that it would affect regular itests when
|
||||
|
Loading…
Reference in New Issue
Block a user