lntemp: re-introduce HarnessTest as a test manager

This commit adds a new component, `HarnessTest`, as a test manager, which
is responsible for managing the state change in the itest. It is built
on top of `HarnessNode` and will be handling assertions so that a test
can be created without unnecessary attention to node's unwanted
failures. This commit also adds a minimal set of assertions.
This commit is contained in:
yyforyongyu 2022-07-22 20:56:01 +08:00
parent 89d275169a
commit a3fa4ba10a
No known key found for this signature in database
GPG Key ID: 9BCD95C4FF296868
2 changed files with 226 additions and 0 deletions

82
lntemp/harness.go Normal file
View File

@ -0,0 +1,82 @@
package lntemp
import (
"context"
"testing"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest"
)
// TestCase defines a test case that's been used in the integration test.
type TestCase struct {
// Name specifies the test name.
Name string
// TestFunc is the test case wrapped in a function.
TestFunc func(t *HarnessTest)
}
// standbyNodes are a list of nodes which are created during the initialization
// of the test and used across all test cases.
type standbyNodes struct {
// Alice and Bob are the initial seeder nodes that are automatically
// created to be the initial participants of the test network.
Alice *node.HarnessNode
Bob *node.HarnessNode
}
// HarnessTest builds on top of a testing.T with enhanced error detection. It
// is responsible for managing the interactions among different nodes, and
// providing easy-to-use assertions.
type HarnessTest struct {
*testing.T
// Embed the standbyNodes so we can easily access them via `ht.Alice`.
standbyNodes
// Miner is a reference to a running full node that can be used to
// create new blocks on the network.
Miner *HarnessMiner
// manager handles the start and stop of a given node.
manager *nodeManager
// feeService is a web service that provides external fee estimates to
// lnd.
feeService *feeService
// Channel for transmitting stderr output from failed lightning node
// to main process.
lndErrorChan chan error
// runCtx is a context with cancel method. It's used to signal when the
// node needs to quit, and used as the parent context when spawning
// children contexts for RPC requests.
runCtx context.Context
cancel context.CancelFunc
// stopChainBackend points to the cleanup function returned by the
// chainBackend.
stopChainBackend func()
}
// NewHarnessTest creates a new instance of a harnessTest from a regular
// testing.T instance.
func NewHarnessTest(t *testing.T, lndBinary string,
dbBackend lntest.DatabaseBackend) *HarnessTest {
// Create the run context.
ctxt, cancel := context.WithCancel(context.Background())
manager := newNodeManager(lndBinary, dbBackend)
return &HarnessTest{
T: t,
manager: manager,
runCtx: ctxt,
cancel: cancel,
// We need to use buffered channel here as we don't want to
// block sending errors.
lndErrorChan: make(chan error, 10),
}
}

144
lntemp/harness_assertion.go Normal file
View File

@ -0,0 +1,144 @@
package lntemp
import (
"context"
"fmt"
"strings"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
// WaitForBlockchainSync waits until the node is synced to chain.
func (h *HarnessTest) WaitForBlockchainSync(hn *node.HarnessNode) {
err := wait.NoError(func() error {
resp := hn.RPC.GetInfo()
if resp.SyncedToChain {
return nil
}
return fmt.Errorf("%s is not synced to chain", hn.Name())
}, DefaultTimeout)
require.NoError(h, err, "timeout waiting for blockchain sync")
}
// AssertPeerConnected asserts that the given node b is connected to a.
func (h *HarnessTest) AssertPeerConnected(a, b *node.HarnessNode) {
err := wait.NoError(func() error {
// We require the RPC call to be succeeded and won't wait for
// it as it's an unexpected behavior.
resp := a.RPC.ListPeers()
// If node B is seen in the ListPeers response from node A,
// then we can return true as the connection has been fully
// established.
for _, peer := range resp.Peers {
if peer.PubKey == b.PubKeyStr {
return nil
}
}
return fmt.Errorf("%s not found in %s's ListPeers",
b.Name(), a.Name())
}, DefaultTimeout)
require.NoError(h, err, "unable to connect %s to %s, got error: "+
"peers not connected within %v seconds",
a.Name(), b.Name(), DefaultTimeout)
}
// ConnectNodes creates a connection between the two nodes and asserts the
// connection is succeeded.
func (h *HarnessTest) ConnectNodes(a, b *node.HarnessNode) {
bobInfo := b.RPC.GetInfo()
req := &lnrpc.ConnectPeerRequest{
Addr: &lnrpc.LightningAddress{
Pubkey: bobInfo.IdentityPubkey,
Host: b.Cfg.P2PAddr(),
},
}
a.RPC.ConnectPeer(req)
h.AssertPeerConnected(a, b)
}
// DisconnectNodes disconnects the given two nodes and asserts the
// disconnection is succeeded. The request is made from node a and sent to node
// b.
func (h *HarnessTest) DisconnectNodes(a, b *node.HarnessNode) {
bobInfo := b.RPC.GetInfo()
a.RPC.DisconnectPeer(bobInfo.IdentityPubkey)
}
// EnsureConnected will try to connect to two nodes, returning no error if they
// are already connected. If the nodes were not connected previously, this will
// behave the same as ConnectNodes. If a pending connection request has already
// been made, the method will block until the two nodes appear in each other's
// peers list, or until the DefaultTimeout expires.
func (h *HarnessTest) EnsureConnected(a, b *node.HarnessNode) {
// errConnectionRequested is used to signal that a connection was
// requested successfully, which is distinct from already being
// connected to the peer.
errConnectionRequested := "connection request in progress"
tryConnect := func(a, b *node.HarnessNode) error {
bInfo := b.RPC.GetInfo()
req := &lnrpc.ConnectPeerRequest{
Addr: &lnrpc.LightningAddress{
Pubkey: bInfo.IdentityPubkey,
Host: b.Cfg.P2PAddr(),
},
}
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := a.RPC.LN.ConnectPeer(ctxt, req)
// Request was successful.
if err == nil {
return nil
}
// If the connection is in process, we return no error.
if strings.Contains(err.Error(), errConnectionRequested) {
return nil
}
// If the two are already connected, we return early with no
// error.
if strings.Contains(err.Error(), "already connected to peer") {
return nil
}
// We may get connection refused error if we happens to be in
// the middle of a previous node disconnection, e.g., a restart
// from one of the nodes.
if strings.Contains(err.Error(), "connection refused") {
return nil
}
return err
}
// Return any critical errors returned by either alice or bob.
require.NoError(h, tryConnect(a, b), "connection failed between %s "+
"and %s", a.Cfg.Name, b.Cfg.Name)
// When Alice and Bob each makes a connection to the other side at the
// same time, it's likely neither connections could succeed. Bob's
// connection will be canceled by Alice since she has an outbound
// connection to Bob already, and same happens to Alice's. Thus the two
// connections cancel each other out.
// TODO(yy): move this back when the above issue is fixed.
// require.NoError(h, tryConnect(b, a), "connection failed between %s "+
// "and %s", a.Cfg.Name, b.Cfg.Name)
// Otherwise one or both requested a connection, so we wait for the
// peers lists to reflect the connection.
h.AssertPeerConnected(a, b)
h.AssertPeerConnected(b, a)
}