2022-08-12 11:03:44 +02:00
|
|
|
package lntest
|
2022-07-22 14:56:01 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-07-26 06:11:43 +02:00
|
|
|
"encoding/hex"
|
2022-07-22 14:57:23 +02:00
|
|
|
"fmt"
|
2022-07-22 14:56:01 +02:00
|
|
|
"testing"
|
2022-08-03 23:37:43 +02:00
|
|
|
"time"
|
2022-07-22 14:56:01 +02:00
|
|
|
|
2022-08-04 14:15:01 +02:00
|
|
|
"github.com/btcsuite/btcd/blockchain"
|
2022-07-22 14:57:23 +02:00
|
|
|
"github.com/btcsuite/btcd/btcutil"
|
2022-07-22 11:33:26 +02:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
2022-07-22 14:57:23 +02:00
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/go-errors/errors"
|
2022-08-04 21:34:42 +02:00
|
|
|
"github.com/lightningnetwork/lnd/kvdb/etcd"
|
2022-07-22 14:57:23 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2022-07-26 10:59:57 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
2022-08-04 16:19:46 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
2022-08-12 11:03:44 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lntest/node"
|
|
|
|
"github.com/lightningnetwork/lnd/lntest/rpc"
|
2022-07-22 14:57:23 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
|
|
|
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
2022-07-22 11:33:26 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2022-07-22 14:57:23 +02:00
|
|
|
"github.com/stretchr/testify/require"
|
2022-07-22 14:56:01 +02:00
|
|
|
)
|
|
|
|
|
2022-07-26 06:11:43 +02:00
|
|
|
const (
|
|
|
|
// defaultMinerFeeRate specifies the fee rate in sats when sending
|
|
|
|
// outputs from the miner.
|
|
|
|
defaultMinerFeeRate = 7500
|
2022-10-25 06:30:12 +02:00
|
|
|
|
|
|
|
// numBlocksSendOutput specifies the number of blocks to mine after
|
|
|
|
// sending outputs from the miner.
|
|
|
|
numBlocksSendOutput = 2
|
|
|
|
|
|
|
|
// numBlocksOpenChannel specifies the number of blocks mined when
|
|
|
|
// opening a channel.
|
|
|
|
numBlocksOpenChannel = 6
|
2022-08-12 09:49:54 +02:00
|
|
|
|
|
|
|
// lndErrorChanSize specifies the buffer size used to receive errors
|
|
|
|
// from lnd process.
|
|
|
|
lndErrorChanSize = 10
|
2022-07-26 06:11:43 +02:00
|
|
|
)
|
|
|
|
|
2022-07-22 14:56:01 +02:00
|
|
|
// TestCase defines a test case that's been used in the integration test.
|
|
|
|
type TestCase struct {
|
|
|
|
// Name specifies the test name.
|
|
|
|
Name string
|
|
|
|
|
|
|
|
// TestFunc is the test case wrapped in a function.
|
|
|
|
TestFunc func(t *HarnessTest)
|
|
|
|
}
|
|
|
|
|
|
|
|
// standbyNodes are a list of nodes which are created during the initialization
|
|
|
|
// of the test and used across all test cases.
|
|
|
|
type standbyNodes struct {
|
|
|
|
// Alice and Bob are the initial seeder nodes that are automatically
|
|
|
|
// created to be the initial participants of the test network.
|
|
|
|
Alice *node.HarnessNode
|
|
|
|
Bob *node.HarnessNode
|
|
|
|
}
|
|
|
|
|
|
|
|
// HarnessTest builds on top of a testing.T with enhanced error detection. It
|
|
|
|
// is responsible for managing the interactions among different nodes, and
|
|
|
|
// providing easy-to-use assertions.
|
|
|
|
type HarnessTest struct {
|
|
|
|
*testing.T
|
|
|
|
|
|
|
|
// Embed the standbyNodes so we can easily access them via `ht.Alice`.
|
|
|
|
standbyNodes
|
|
|
|
|
|
|
|
// Miner is a reference to a running full node that can be used to
|
|
|
|
// create new blocks on the network.
|
|
|
|
Miner *HarnessMiner
|
|
|
|
|
|
|
|
// manager handles the start and stop of a given node.
|
|
|
|
manager *nodeManager
|
|
|
|
|
|
|
|
// feeService is a web service that provides external fee estimates to
|
|
|
|
// lnd.
|
2022-08-16 11:31:20 +02:00
|
|
|
feeService WebFeeService
|
2022-07-22 14:56:01 +02:00
|
|
|
|
|
|
|
// Channel for transmitting stderr output from failed lightning node
|
|
|
|
// to main process.
|
|
|
|
lndErrorChan chan error
|
|
|
|
|
|
|
|
// runCtx is a context with cancel method. It's used to signal when the
|
|
|
|
// node needs to quit, and used as the parent context when spawning
|
|
|
|
// children contexts for RPC requests.
|
2022-08-12 09:49:54 +02:00
|
|
|
runCtx context.Context //nolint:containedctx
|
2022-07-22 14:56:01 +02:00
|
|
|
cancel context.CancelFunc
|
|
|
|
|
|
|
|
// stopChainBackend points to the cleanup function returned by the
|
|
|
|
// chainBackend.
|
|
|
|
stopChainBackend func()
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
// cleaned specifies whether the cleanup has been applied for the
|
|
|
|
// current HarnessTest.
|
|
|
|
cleaned bool
|
2022-07-22 14:56:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewHarnessTest creates a new instance of a harnessTest from a regular
|
|
|
|
// testing.T instance.
|
2022-08-16 11:31:20 +02:00
|
|
|
func NewHarnessTest(t *testing.T, lndBinary string, feeService WebFeeService,
|
2023-12-12 14:50:15 +01:00
|
|
|
dbBackend node.DatabaseBackend, nativeSQL bool) *HarnessTest {
|
2022-07-22 14:56:01 +02:00
|
|
|
|
2022-08-12 09:49:54 +02:00
|
|
|
t.Helper()
|
|
|
|
|
2022-07-22 14:56:01 +02:00
|
|
|
// Create the run context.
|
|
|
|
ctxt, cancel := context.WithCancel(context.Background())
|
|
|
|
|
2023-12-12 14:50:15 +01:00
|
|
|
manager := newNodeManager(lndBinary, dbBackend, nativeSQL)
|
2022-08-12 09:49:54 +02:00
|
|
|
|
2022-07-22 14:56:01 +02:00
|
|
|
return &HarnessTest{
|
2022-08-16 11:31:20 +02:00
|
|
|
T: t,
|
|
|
|
manager: manager,
|
|
|
|
feeService: feeService,
|
|
|
|
runCtx: ctxt,
|
|
|
|
cancel: cancel,
|
2022-07-22 14:56:01 +02:00
|
|
|
// We need to use buffered channel here as we don't want to
|
|
|
|
// block sending errors.
|
2022-08-12 09:49:54 +02:00
|
|
|
lndErrorChan: make(chan error, lndErrorChanSize),
|
2022-07-22 14:56:01 +02:00
|
|
|
}
|
|
|
|
}
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
// Start will assemble the chain backend and the miner for the HarnessTest. It
|
|
|
|
// also starts the fee service and watches lnd process error.
|
|
|
|
func (h *HarnessTest) Start(chain node.BackendConfig, miner *HarnessMiner) {
|
|
|
|
// Spawn a new goroutine to watch for any fatal errors that any of the
|
|
|
|
// running lnd processes encounter. If an error occurs, then the test
|
|
|
|
// case should naturally as a result and we log the server error here
|
|
|
|
// to help debug.
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case err, more := <-h.lndErrorChan:
|
|
|
|
if !more {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.Logf("lnd finished with error (stderr):\n%v", err)
|
|
|
|
|
|
|
|
case <-h.runCtx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Start the fee service.
|
2022-08-16 11:31:20 +02:00
|
|
|
err := h.feeService.Start()
|
|
|
|
require.NoError(h, err, "failed to start fee service")
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
// Assemble the node manager with chainBackend and feeServiceURL.
|
|
|
|
h.manager.chainBackend = chain
|
2022-08-16 11:31:20 +02:00
|
|
|
h.manager.feeServiceURL = h.feeService.URL()
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
// Assemble the miner.
|
|
|
|
h.Miner = miner
|
|
|
|
}
|
|
|
|
|
|
|
|
// ChainBackendName returns the chain backend name used in the test.
|
|
|
|
func (h *HarnessTest) ChainBackendName() string {
|
|
|
|
return h.manager.chainBackend.Name()
|
|
|
|
}
|
|
|
|
|
2022-08-04 21:34:42 +02:00
|
|
|
// Context returns the run context used in this test. Usaually it should be
|
|
|
|
// managed by the test itself otherwise undefined behaviors will occur. It can
|
|
|
|
// be used, however, when a test needs to have its own context being managed
|
|
|
|
// differently. In that case, instead of using a background context, the run
|
|
|
|
// context should be used such that the test context scope can be fully
|
|
|
|
// controlled.
|
|
|
|
func (h *HarnessTest) Context() context.Context {
|
|
|
|
return h.runCtx
|
|
|
|
}
|
|
|
|
|
2024-01-08 23:13:38 +01:00
|
|
|
// setupWatchOnlyNode initializes a node with the watch-only accounts of an
|
|
|
|
// associated remote signing instance.
|
|
|
|
func (h *HarnessTest) setupWatchOnlyNode(name string,
|
|
|
|
signerNode *node.HarnessNode, password []byte) *node.HarnessNode {
|
|
|
|
|
|
|
|
// Prepare arguments for watch-only node connected to the remote signer.
|
|
|
|
remoteSignerArgs := []string{
|
|
|
|
"--remotesigner.enable",
|
|
|
|
fmt.Sprintf("--remotesigner.rpchost=localhost:%d",
|
|
|
|
signerNode.Cfg.RPCPort),
|
|
|
|
fmt.Sprintf("--remotesigner.tlscertpath=%s",
|
|
|
|
signerNode.Cfg.TLSCertPath),
|
|
|
|
fmt.Sprintf("--remotesigner.macaroonpath=%s",
|
|
|
|
signerNode.Cfg.AdminMacPath),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch watch-only accounts from the signer node.
|
|
|
|
resp := signerNode.RPC.ListAccounts(&walletrpc.ListAccountsRequest{})
|
|
|
|
watchOnlyAccounts, err := walletrpc.AccountsToWatchOnly(resp.Accounts)
|
|
|
|
require.NoErrorf(h, err, "unable to find watch only accounts for %s",
|
|
|
|
name)
|
|
|
|
|
|
|
|
// Create a new watch-only node with remote signer configuration.
|
|
|
|
return h.NewNodeRemoteSigner(
|
|
|
|
name, remoteSignerArgs, password,
|
|
|
|
&lnrpc.WatchOnly{
|
|
|
|
MasterKeyBirthdayTimestamp: 0,
|
|
|
|
MasterKeyFingerprint: nil,
|
|
|
|
Accounts: watchOnlyAccounts,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// createAndSendOutput send amt satoshis from the internal mining node to the
|
|
|
|
// targeted lightning node using a P2WKH address. No blocks are mined so
|
|
|
|
// transactions will sit unconfirmed in mempool.
|
|
|
|
func (h *HarnessTest) createAndSendOutput(target *node.HarnessNode,
|
|
|
|
amt btcutil.Amount, addrType lnrpc.AddressType) {
|
|
|
|
|
|
|
|
req := &lnrpc.NewAddressRequest{Type: addrType}
|
|
|
|
resp := target.RPC.NewAddress(req)
|
|
|
|
addr := h.DecodeAddress(resp.Address)
|
|
|
|
addrScript := h.PayToAddrScript(addr)
|
|
|
|
|
|
|
|
output := &wire.TxOut{
|
|
|
|
PkScript: addrScript,
|
|
|
|
Value: int64(amt),
|
|
|
|
}
|
|
|
|
h.Miner.SendOutput(output, defaultMinerFeeRate)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetupRemoteSigningStandbyNodes starts the initial seeder nodes within the
|
|
|
|
// test harness in a remote signing configuration. The initial node's wallets
|
|
|
|
// will be funded wallets with 100x1 BTC outputs each.
|
|
|
|
func (h *HarnessTest) SetupRemoteSigningStandbyNodes() {
|
|
|
|
h.Log("Setting up standby nodes Alice and Bob with remote " +
|
|
|
|
"signing configurations...")
|
|
|
|
defer h.Log("Finished the setup, now running tests...")
|
|
|
|
|
|
|
|
password := []byte("itestpassword")
|
|
|
|
|
|
|
|
// Setup remote signing nodes for Alice and Bob.
|
|
|
|
signerAlice := h.NewNode("SignerAlice", nil)
|
|
|
|
signerBob := h.NewNode("SignerBob", nil)
|
|
|
|
|
|
|
|
// Setup watch-only nodes for Alice and Bob, each configured with their
|
|
|
|
// own remote signing instance.
|
|
|
|
h.Alice = h.setupWatchOnlyNode("Alice", signerAlice, password)
|
|
|
|
h.Bob = h.setupWatchOnlyNode("Bob", signerBob, password)
|
|
|
|
|
|
|
|
// Fund each node with 100 BTC (using 100 separate transactions).
|
|
|
|
const fundAmount = 1 * btcutil.SatoshiPerBitcoin
|
|
|
|
const numOutputs = 100
|
|
|
|
const totalAmount = fundAmount * numOutputs
|
|
|
|
for _, node := range []*node.HarnessNode{h.Alice, h.Bob} {
|
|
|
|
h.manager.standbyNodes[node.Cfg.NodeID] = node
|
|
|
|
for i := 0; i < numOutputs; i++ {
|
|
|
|
h.createAndSendOutput(
|
|
|
|
node, fundAmount,
|
|
|
|
lnrpc.AddressType_WITNESS_PUBKEY_HASH,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We generate several blocks in order to give the outputs created
|
|
|
|
// above a good number of confirmations.
|
|
|
|
const totalTxes = 200
|
|
|
|
h.MineBlocksAndAssertNumTxes(numBlocksSendOutput, totalTxes)
|
|
|
|
|
|
|
|
// Now we want to wait for the nodes to catch up.
|
|
|
|
h.WaitForBlockchainSync(h.Alice)
|
|
|
|
h.WaitForBlockchainSync(h.Bob)
|
|
|
|
|
|
|
|
// Now block until both wallets have fully synced up.
|
|
|
|
h.WaitForBalanceConfirmed(h.Alice, totalAmount)
|
|
|
|
h.WaitForBalanceConfirmed(h.Bob, totalAmount)
|
|
|
|
}
|
|
|
|
|
2022-07-22 14:57:23 +02:00
|
|
|
// SetUp starts the initial seeder nodes within the test harness. The initial
|
|
|
|
// node's wallets will be funded wallets with 10x10 BTC outputs each.
|
|
|
|
func (h *HarnessTest) SetupStandbyNodes() {
|
|
|
|
h.Log("Setting up standby nodes Alice and Bob...")
|
2023-12-30 12:46:35 +01:00
|
|
|
defer h.Log("Finished the setup, now running tests...")
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
lndArgs := []string{
|
|
|
|
"--default-remote-max-htlcs=483",
|
|
|
|
"--dust-threshold=5000000",
|
|
|
|
}
|
2024-01-08 23:13:38 +01:00
|
|
|
|
|
|
|
// Start the initial seeder nodes within the test network.
|
2022-07-22 14:57:23 +02:00
|
|
|
h.Alice = h.NewNode("Alice", lndArgs)
|
|
|
|
h.Bob = h.NewNode("Bob", lndArgs)
|
|
|
|
|
2023-02-23 18:24:14 +01:00
|
|
|
// Load up the wallets of the seeder nodes with 100 outputs of 1 BTC
|
2022-07-22 14:57:23 +02:00
|
|
|
// each.
|
2024-01-08 23:13:38 +01:00
|
|
|
const fundAmount = 1 * btcutil.SatoshiPerBitcoin
|
|
|
|
const numOutputs = 100
|
|
|
|
const totalAmount = fundAmount * numOutputs
|
|
|
|
for _, node := range []*node.HarnessNode{h.Alice, h.Bob} {
|
|
|
|
h.manager.standbyNodes[node.Cfg.NodeID] = node
|
|
|
|
for i := 0; i < numOutputs; i++ {
|
|
|
|
h.createAndSendOutput(
|
|
|
|
node, fundAmount,
|
|
|
|
lnrpc.AddressType_WITNESS_PUBKEY_HASH,
|
2022-07-22 14:57:23 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We generate several blocks in order to give the outputs created
|
|
|
|
// above a good number of confirmations.
|
2023-02-23 18:24:14 +01:00
|
|
|
const totalTxes = 200
|
2022-12-13 04:19:59 +01:00
|
|
|
h.MineBlocksAndAssertNumTxes(numBlocksSendOutput, totalTxes)
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
// Now we want to wait for the nodes to catch up.
|
|
|
|
h.WaitForBlockchainSync(h.Alice)
|
|
|
|
h.WaitForBlockchainSync(h.Bob)
|
|
|
|
|
|
|
|
// Now block until both wallets have fully synced up.
|
2024-01-08 23:13:38 +01:00
|
|
|
h.WaitForBalanceConfirmed(h.Alice, totalAmount)
|
|
|
|
h.WaitForBalanceConfirmed(h.Bob, totalAmount)
|
2022-07-22 14:57:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stop stops the test harness.
|
|
|
|
func (h *HarnessTest) Stop() {
|
|
|
|
// Do nothing if it's not started.
|
|
|
|
if h.runCtx == nil {
|
|
|
|
h.Log("HarnessTest is not started")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop all running nodes.
|
|
|
|
for _, node := range h.manager.activeNodes {
|
|
|
|
h.Shutdown(node)
|
|
|
|
}
|
|
|
|
|
|
|
|
close(h.lndErrorChan)
|
|
|
|
|
|
|
|
// Stop the fee service.
|
2022-08-16 11:31:20 +02:00
|
|
|
err := h.feeService.Stop()
|
|
|
|
require.NoError(h, err, "failed to stop fee service")
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
// Stop the chainBackend.
|
|
|
|
h.stopChainBackend()
|
|
|
|
|
|
|
|
// Stop the miner.
|
|
|
|
h.Miner.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
// RunTestCase executes a harness test case. Any errors or panics will be
|
|
|
|
// represented as fatal.
|
|
|
|
func (h *HarnessTest) RunTestCase(testCase *TestCase) {
|
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
description := errors.Wrap(err, 2).ErrorStack()
|
|
|
|
h.Fatalf("Failed: (%v) panic with: \n%v",
|
|
|
|
testCase.Name, description)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
testCase.TestFunc(h)
|
|
|
|
}
|
|
|
|
|
|
|
|
// resetStandbyNodes resets all standby nodes by attaching the new testing.T
|
|
|
|
// and restarting them with the original config.
|
|
|
|
func (h *HarnessTest) resetStandbyNodes(t *testing.T) {
|
2022-08-12 09:49:54 +02:00
|
|
|
t.Helper()
|
|
|
|
|
2022-07-22 14:57:23 +02:00
|
|
|
for _, hn := range h.manager.standbyNodes {
|
|
|
|
// Inherit the testing.T.
|
|
|
|
h.T = t
|
|
|
|
|
|
|
|
// Reset the config so the node will be using the default
|
|
|
|
// config for the coming test. This will also inherit the
|
|
|
|
// test's running context.
|
|
|
|
h.RestartNodeWithExtraArgs(hn, hn.Cfg.OriginalExtraArgs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Subtest creates a child HarnessTest, which inherits the harness net and
|
|
|
|
// stand by nodes created by the parent test. It will return a cleanup function
|
|
|
|
// which resets all the standby nodes' configs back to its original state and
|
|
|
|
// create snapshots of each nodes' internal state.
|
2022-08-29 22:35:13 +02:00
|
|
|
func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
|
2022-10-25 06:30:12 +02:00
|
|
|
t.Helper()
|
|
|
|
|
2022-07-22 14:57:23 +02:00
|
|
|
st := &HarnessTest{
|
|
|
|
T: t,
|
|
|
|
manager: h.manager,
|
|
|
|
Miner: h.Miner,
|
|
|
|
standbyNodes: h.standbyNodes,
|
|
|
|
feeService: h.feeService,
|
2022-08-12 09:49:54 +02:00
|
|
|
lndErrorChan: make(chan error, lndErrorChanSize),
|
2022-07-22 14:57:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Inherit context from the main test.
|
|
|
|
st.runCtx, st.cancel = context.WithCancel(h.runCtx)
|
|
|
|
|
2022-08-01 20:16:17 +02:00
|
|
|
// Inherit the subtest for the miner.
|
|
|
|
st.Miner.T = st.T
|
|
|
|
|
2022-07-22 14:57:23 +02:00
|
|
|
// Reset the standby nodes.
|
|
|
|
st.resetStandbyNodes(t)
|
|
|
|
|
2022-08-15 13:03:14 +02:00
|
|
|
// Reset fee estimator.
|
|
|
|
st.SetFeeEstimate(DefaultFeeRateSatPerKw)
|
|
|
|
|
2022-08-01 20:16:17 +02:00
|
|
|
// Record block height.
|
|
|
|
_, startHeight := h.Miner.GetBestBlock()
|
|
|
|
|
2022-08-29 22:35:13 +02:00
|
|
|
st.Cleanup(func() {
|
2022-08-01 20:16:17 +02:00
|
|
|
_, endHeight := h.Miner.GetBestBlock()
|
|
|
|
|
|
|
|
st.Logf("finished test: %s, start height=%d, end height=%d, "+
|
|
|
|
"mined blocks=%d", st.manager.currentTestCase,
|
|
|
|
startHeight, endHeight, endHeight-startHeight)
|
|
|
|
|
2022-07-22 14:57:23 +02:00
|
|
|
// Don't bother run the cleanups if the test is failed.
|
|
|
|
if st.Failed() {
|
|
|
|
st.Log("test failed, skipped cleanup")
|
2022-10-28 19:01:58 +02:00
|
|
|
st.shutdownAllNodes()
|
2022-07-22 14:57:23 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't run cleanup if it's already done. This can happen if
|
|
|
|
// we have multiple level inheritance of the parent harness
|
|
|
|
// test. For instance, a `Subtest(st)`.
|
|
|
|
if st.cleaned {
|
|
|
|
st.Log("test already cleaned, skipped cleanup")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// When we finish the test, reset the nodes' configs and take a
|
|
|
|
// snapshot of each of the nodes' internal states.
|
|
|
|
for _, node := range st.manager.standbyNodes {
|
|
|
|
st.cleanupStandbyNode(node)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If found running nodes, shut them down.
|
|
|
|
st.shutdownNonStandbyNodes()
|
|
|
|
|
2023-01-16 00:28:23 +01:00
|
|
|
// We require the mempool to be cleaned from the test.
|
|
|
|
require.Empty(st, st.Miner.GetRawMempool(), "mempool not "+
|
|
|
|
"cleaned, please mine blocks to clean them all.")
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
// Finally, cancel the run context. We have to do it here
|
|
|
|
// because we need to keep the context alive for the above
|
|
|
|
// assertions used in cleanup.
|
|
|
|
st.cancel()
|
|
|
|
|
|
|
|
// We now want to mark the parent harness as cleaned to avoid
|
|
|
|
// running cleanup again since its internal state has been
|
|
|
|
// cleaned up by its child harness tests.
|
|
|
|
h.cleaned = true
|
2022-08-29 22:35:13 +02:00
|
|
|
})
|
2022-07-22 14:57:23 +02:00
|
|
|
|
2022-08-29 22:35:13 +02:00
|
|
|
return st
|
2022-07-22 14:57:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// shutdownNonStandbyNodes will shutdown any non-standby nodes.
|
|
|
|
func (h *HarnessTest) shutdownNonStandbyNodes() {
|
2022-10-28 19:01:58 +02:00
|
|
|
h.shutdownNodes(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// shutdownAllNodes will shutdown all running nodes.
|
|
|
|
func (h *HarnessTest) shutdownAllNodes() {
|
|
|
|
h.shutdownNodes(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// shutdownNodes will shutdown any non-standby nodes. If skipStandby is false,
|
|
|
|
// all the standby nodes will be shutdown too.
|
|
|
|
func (h *HarnessTest) shutdownNodes(skipStandby bool) {
|
2022-07-28 10:20:51 +02:00
|
|
|
for nid, node := range h.manager.activeNodes {
|
2022-07-22 14:57:23 +02:00
|
|
|
// If it's a standby node, skip.
|
2022-07-28 10:20:51 +02:00
|
|
|
_, ok := h.manager.standbyNodes[nid]
|
2022-10-28 19:01:58 +02:00
|
|
|
if ok && skipStandby {
|
2022-07-22 14:57:23 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// The process may not be in a state to always shutdown
|
|
|
|
// immediately, so we'll retry up to a hard limit to ensure we
|
|
|
|
// eventually shutdown.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
return h.manager.shutdownNode(node)
|
|
|
|
}, DefaultTimeout)
|
2023-02-23 18:24:14 +01:00
|
|
|
|
2023-02-27 09:21:54 +01:00
|
|
|
if err == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-02-23 18:24:14 +01:00
|
|
|
// Instead of returning the error, we will log it instead. This
|
|
|
|
// is needed so other nodes can continue their shutdown
|
|
|
|
// processes.
|
|
|
|
h.Logf("unable to shutdown %s, got err: %v", node.Name(), err)
|
2022-07-22 14:57:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// cleanupStandbyNode is a function should be called with defer whenever a
|
|
|
|
// subtest is created. It will reset the standby nodes configs, snapshot the
|
|
|
|
// states, and validate the node has a clean state.
|
|
|
|
func (h *HarnessTest) cleanupStandbyNode(hn *node.HarnessNode) {
|
|
|
|
// Remove connections made from this test.
|
|
|
|
h.removeConnectionns(hn)
|
|
|
|
|
|
|
|
// Delete all payments made from this test.
|
|
|
|
hn.RPC.DeleteAllPayments()
|
|
|
|
|
2023-03-14 14:24:50 +01:00
|
|
|
// Check the node's current state with timeout.
|
|
|
|
//
|
|
|
|
// NOTE: we need to do this in a `wait` because it takes some time for
|
|
|
|
// the node to update its internal state. Once the RPCs are synced we
|
|
|
|
// can then remove this wait.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
// Update the node's internal state.
|
|
|
|
hn.UpdateState()
|
|
|
|
|
|
|
|
// Check the node is in a clean state for the following tests.
|
|
|
|
return h.validateNodeState(hn)
|
|
|
|
}, wait.DefaultTimeout)
|
|
|
|
require.NoError(h, err, "timeout checking node's state")
|
2022-07-22 14:57:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// removeConnectionns will remove all connections made on the standby nodes
|
|
|
|
// expect the connections between Alice and Bob.
|
|
|
|
func (h *HarnessTest) removeConnectionns(hn *node.HarnessNode) {
|
|
|
|
resp := hn.RPC.ListPeers()
|
|
|
|
for _, peer := range resp.Peers {
|
|
|
|
// Skip disconnecting Alice and Bob.
|
|
|
|
switch peer.PubKey {
|
|
|
|
case h.Alice.PubKeyStr:
|
|
|
|
continue
|
|
|
|
case h.Bob.PubKeyStr:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
hn.RPC.DisconnectPeer(peer.PubKey)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetTestName set the test case name.
|
|
|
|
func (h *HarnessTest) SetTestName(name string) {
|
|
|
|
h.manager.currentTestCase = name
|
|
|
|
|
|
|
|
// Overwrite the old log filename so we can create new log files.
|
|
|
|
for _, node := range h.manager.standbyNodes {
|
|
|
|
node.Cfg.LogFilenamePrefix = name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewNode creates a new node and asserts its creation. The node is guaranteed
|
|
|
|
// to have finished its initialization and all its subservers are started.
|
|
|
|
func (h *HarnessTest) NewNode(name string,
|
|
|
|
extraArgs []string) *node.HarnessNode {
|
|
|
|
|
2022-07-28 11:28:09 +02:00
|
|
|
node, err := h.manager.newNode(h.T, name, extraArgs, nil, false)
|
2022-07-22 14:57:23 +02:00
|
|
|
require.NoErrorf(h, err, "unable to create new node for %s", name)
|
|
|
|
|
2022-07-28 11:28:09 +02:00
|
|
|
// Start the node.
|
|
|
|
err = node.Start(h.runCtx)
|
|
|
|
require.NoError(h, err, "failed to start node %s", node.Name())
|
|
|
|
|
2022-07-22 14:57:23 +02:00
|
|
|
return node
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown shuts down the given node and asserts that no errors occur.
|
|
|
|
func (h *HarnessTest) Shutdown(node *node.HarnessNode) {
|
|
|
|
// The process may not be in a state to always shutdown immediately, so
|
|
|
|
// we'll retry up to a hard limit to ensure we eventually shutdown.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
return h.manager.shutdownNode(node)
|
|
|
|
}, DefaultTimeout)
|
2022-10-25 06:30:12 +02:00
|
|
|
|
2023-11-25 00:18:47 +01:00
|
|
|
require.NoErrorf(h, err, "unable to shutdown %v in %v", node.Name(),
|
|
|
|
h.manager.currentTestCase)
|
2022-07-22 14:57:23 +02:00
|
|
|
}
|
|
|
|
|
2022-07-28 11:35:32 +02:00
|
|
|
// SuspendNode stops the given node and returns a callback that can be used to
|
|
|
|
// start it again.
|
|
|
|
func (h *HarnessTest) SuspendNode(node *node.HarnessNode) func() error {
|
|
|
|
err := node.Stop()
|
|
|
|
require.NoErrorf(h, err, "failed to stop %s", node.Name())
|
|
|
|
|
2022-07-28 19:24:54 +02:00
|
|
|
// Remove the node from active nodes.
|
|
|
|
delete(h.manager.activeNodes, node.Cfg.NodeID)
|
|
|
|
|
|
|
|
return func() error {
|
|
|
|
h.manager.registerNode(node)
|
|
|
|
|
2022-08-08 06:19:48 +02:00
|
|
|
if err := node.Start(h.runCtx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
h.WaitForBlockchainSync(node)
|
|
|
|
|
|
|
|
return nil
|
2022-07-28 19:24:54 +02:00
|
|
|
}
|
2022-07-28 11:35:32 +02:00
|
|
|
}
|
|
|
|
|
2022-08-09 06:40:27 +02:00
|
|
|
// RestartNode restarts a given node, unlocks it and asserts it's successfully
|
|
|
|
// started.
|
|
|
|
func (h *HarnessTest) RestartNode(hn *node.HarnessNode) {
|
|
|
|
err := h.manager.restartNode(h.runCtx, hn, nil)
|
|
|
|
require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
|
|
|
|
|
|
|
|
err = h.manager.unlockNode(hn)
|
|
|
|
require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
|
|
|
|
|
|
|
|
if !hn.Cfg.SkipUnlock {
|
|
|
|
// Give the node some time to catch up with the chain before we
|
|
|
|
// continue with the tests.
|
|
|
|
h.WaitForBlockchainSync(hn)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-09 09:18:39 +02:00
|
|
|
// RestartNodeNoUnlock restarts a given node without unlocking its wallet.
|
|
|
|
func (h *HarnessTest) RestartNodeNoUnlock(hn *node.HarnessNode) {
|
|
|
|
err := h.manager.restartNode(h.runCtx, hn, nil)
|
|
|
|
require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
|
|
|
|
}
|
|
|
|
|
2022-08-09 06:40:27 +02:00
|
|
|
// RestartNodeWithChanBackups restarts a given node with the specified channel
|
|
|
|
// backups.
|
|
|
|
func (h *HarnessTest) RestartNodeWithChanBackups(hn *node.HarnessNode,
|
2022-07-22 14:57:23 +02:00
|
|
|
chanBackups ...*lnrpc.ChanBackupSnapshot) {
|
|
|
|
|
2022-08-09 06:40:27 +02:00
|
|
|
err := h.manager.restartNode(h.runCtx, hn, nil)
|
2022-07-22 14:57:23 +02:00
|
|
|
require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
|
|
|
|
|
2022-08-09 06:40:27 +02:00
|
|
|
err = h.manager.unlockNode(hn, chanBackups...)
|
|
|
|
require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
|
|
|
|
|
2022-07-22 14:57:23 +02:00
|
|
|
// Give the node some time to catch up with the chain before we
|
|
|
|
// continue with the tests.
|
|
|
|
h.WaitForBlockchainSync(hn)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RestartNodeWithExtraArgs updates the node's config and restarts it.
|
|
|
|
func (h *HarnessTest) RestartNodeWithExtraArgs(hn *node.HarnessNode,
|
|
|
|
extraArgs []string) {
|
|
|
|
|
|
|
|
hn.SetExtraArgs(extraArgs)
|
2022-08-09 06:40:27 +02:00
|
|
|
h.RestartNode(hn)
|
2022-07-22 14:57:23 +02:00
|
|
|
}
|
|
|
|
|
2022-07-28 11:35:32 +02:00
|
|
|
// NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh
|
|
|
|
// aezeed. The provided password is used as both the aezeed password and the
|
|
|
|
// wallet password. The generated mnemonic is returned along with the
|
|
|
|
// initialized harness node.
|
|
|
|
func (h *HarnessTest) NewNodeWithSeed(name string,
|
|
|
|
extraArgs []string, password []byte,
|
|
|
|
statelessInit bool) (*node.HarnessNode, []string, []byte) {
|
|
|
|
|
|
|
|
// Create a request to generate a new aezeed. The new seed will have
|
|
|
|
// the same password as the internal wallet.
|
|
|
|
req := &lnrpc.GenSeedRequest{
|
|
|
|
AezeedPassphrase: password,
|
|
|
|
SeedEntropy: nil,
|
|
|
|
}
|
|
|
|
|
|
|
|
return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// newNodeWithSeed creates and initializes a new HarnessNode such that it'll be
|
|
|
|
// ready to accept RPC calls. A `GenSeedRequest` is needed to generate the
|
|
|
|
// seed.
|
|
|
|
func (h *HarnessTest) newNodeWithSeed(name string,
|
|
|
|
extraArgs []string, req *lnrpc.GenSeedRequest,
|
|
|
|
statelessInit bool) (*node.HarnessNode, []string, []byte) {
|
|
|
|
|
|
|
|
node, err := h.manager.newNode(
|
|
|
|
h.T, name, extraArgs, req.AezeedPassphrase, true,
|
|
|
|
)
|
|
|
|
require.NoErrorf(h, err, "unable to create new node for %s", name)
|
|
|
|
|
|
|
|
// Start the node with seed only, which will only create the `State`
|
|
|
|
// and `WalletUnlocker` clients.
|
2022-08-09 06:40:27 +02:00
|
|
|
err = node.StartWithNoAuth(h.runCtx)
|
2022-07-28 11:35:32 +02:00
|
|
|
require.NoErrorf(h, err, "failed to start node %s", node.Name())
|
|
|
|
|
|
|
|
// Generate a new seed.
|
|
|
|
genSeedResp := node.RPC.GenSeed(req)
|
|
|
|
|
|
|
|
// With the seed created, construct the init request to the node,
|
|
|
|
// including the newly generated seed.
|
|
|
|
initReq := &lnrpc.InitWalletRequest{
|
|
|
|
WalletPassword: req.AezeedPassphrase,
|
|
|
|
CipherSeedMnemonic: genSeedResp.CipherSeedMnemonic,
|
|
|
|
AezeedPassphrase: req.AezeedPassphrase,
|
|
|
|
StatelessInit: statelessInit,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pass the init request via rpc to finish unlocking the node. This
|
|
|
|
// will also initialize the macaroon-authenticated LightningClient.
|
|
|
|
adminMac, err := h.manager.initWalletAndNode(node, initReq)
|
|
|
|
require.NoErrorf(h, err, "failed to unlock and init node %s",
|
|
|
|
node.Name())
|
|
|
|
|
|
|
|
// In stateless initialization mode we get a macaroon back that we have
|
|
|
|
// to return to the test, otherwise gRPC calls won't be possible since
|
|
|
|
// there are no macaroon files created in that mode.
|
|
|
|
// In stateful init the admin macaroon will just be nil.
|
|
|
|
return node, genSeedResp.CipherSeedMnemonic, adminMac
|
|
|
|
}
|
|
|
|
|
|
|
|
// RestoreNodeWithSeed fully initializes a HarnessNode using a chosen mnemonic,
|
|
|
|
// password, recovery window, and optionally a set of static channel backups.
|
|
|
|
// After providing the initialization request to unlock the node, this method
|
|
|
|
// will finish initializing the LightningClient such that the HarnessNode can
|
|
|
|
// be used for regular rpc operations.
|
|
|
|
func (h *HarnessTest) RestoreNodeWithSeed(name string, extraArgs []string,
|
|
|
|
password []byte, mnemonic []string, rootKey string,
|
2023-05-12 10:09:56 +02:00
|
|
|
recoveryWindow int32,
|
|
|
|
chanBackups *lnrpc.ChanBackupSnapshot) *node.HarnessNode {
|
2022-07-28 11:35:32 +02:00
|
|
|
|
2023-05-12 10:09:56 +02:00
|
|
|
n, err := h.manager.newNode(h.T, name, extraArgs, password, true)
|
2022-07-28 11:35:32 +02:00
|
|
|
require.NoErrorf(h, err, "unable to create new node for %s", name)
|
|
|
|
|
|
|
|
// Start the node with seed only, which will only create the `State`
|
|
|
|
// and `WalletUnlocker` clients.
|
2023-05-12 10:09:56 +02:00
|
|
|
err = n.StartWithNoAuth(h.runCtx)
|
|
|
|
require.NoErrorf(h, err, "failed to start node %s", n.Name())
|
2022-07-28 11:35:32 +02:00
|
|
|
|
|
|
|
// Create the wallet.
|
|
|
|
initReq := &lnrpc.InitWalletRequest{
|
|
|
|
WalletPassword: password,
|
|
|
|
CipherSeedMnemonic: mnemonic,
|
|
|
|
AezeedPassphrase: password,
|
|
|
|
ExtendedMasterKey: rootKey,
|
|
|
|
RecoveryWindow: recoveryWindow,
|
|
|
|
ChannelBackups: chanBackups,
|
|
|
|
}
|
2023-05-12 10:09:56 +02:00
|
|
|
_, err = h.manager.initWalletAndNode(n, initReq)
|
2022-07-28 11:35:32 +02:00
|
|
|
require.NoErrorf(h, err, "failed to unlock and init node %s",
|
2023-05-12 10:09:56 +02:00
|
|
|
n.Name())
|
2022-07-28 11:35:32 +02:00
|
|
|
|
2023-05-12 10:09:56 +02:00
|
|
|
return n
|
2022-07-28 11:35:32 +02:00
|
|
|
}
|
|
|
|
|
2022-08-04 21:34:42 +02:00
|
|
|
// NewNodeEtcd starts a new node with seed that'll use an external etcd
|
|
|
|
// database as its storage. The passed cluster flag indicates that we'd like
|
|
|
|
// the node to join the cluster leader election. We won't wait until RPC is
|
|
|
|
// available (this is useful when the node is not expected to become the leader
|
|
|
|
// right away).
|
|
|
|
func (h *HarnessTest) NewNodeEtcd(name string, etcdCfg *etcd.Config,
|
|
|
|
password []byte, cluster bool,
|
|
|
|
leaderSessionTTL int) *node.HarnessNode {
|
|
|
|
|
|
|
|
// We don't want to use the embedded etcd instance.
|
2022-08-12 07:07:16 +02:00
|
|
|
h.manager.dbBackend = node.BackendBbolt
|
2022-08-04 21:34:42 +02:00
|
|
|
|
|
|
|
extraArgs := node.ExtraArgsEtcd(
|
|
|
|
etcdCfg, name, cluster, leaderSessionTTL,
|
|
|
|
)
|
|
|
|
node, err := h.manager.newNode(h.T, name, extraArgs, password, true)
|
|
|
|
require.NoError(h, err, "failed to create new node with etcd")
|
|
|
|
|
|
|
|
// Start the node daemon only.
|
|
|
|
err = node.StartLndCmd(h.runCtx)
|
|
|
|
require.NoError(h, err, "failed to start node %s", node.Name())
|
|
|
|
|
|
|
|
return node
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewNodeWithSeedEtcd starts a new node with seed that'll use an external etcd
|
|
|
|
// database as its storage. The passed cluster flag indicates that we'd like
|
|
|
|
// the node to join the cluster leader election.
|
|
|
|
func (h *HarnessTest) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config,
|
2023-05-12 10:09:56 +02:00
|
|
|
password []byte, statelessInit, cluster bool,
|
2022-08-04 21:34:42 +02:00
|
|
|
leaderSessionTTL int) (*node.HarnessNode, []string, []byte) {
|
|
|
|
|
|
|
|
// We don't want to use the embedded etcd instance.
|
2022-08-12 07:07:16 +02:00
|
|
|
h.manager.dbBackend = node.BackendBbolt
|
2022-08-04 21:34:42 +02:00
|
|
|
|
|
|
|
// Create a request to generate a new aezeed. The new seed will have
|
|
|
|
// the same password as the internal wallet.
|
|
|
|
req := &lnrpc.GenSeedRequest{
|
|
|
|
AezeedPassphrase: password,
|
|
|
|
SeedEntropy: nil,
|
|
|
|
}
|
|
|
|
|
|
|
|
extraArgs := node.ExtraArgsEtcd(
|
|
|
|
etcdCfg, name, cluster, leaderSessionTTL,
|
|
|
|
)
|
|
|
|
|
|
|
|
return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
|
|
|
|
}
|
|
|
|
|
2022-08-11 09:56:06 +02:00
|
|
|
// NewNodeRemoteSigner creates a new remote signer node and asserts its
|
|
|
|
// creation.
|
|
|
|
func (h *HarnessTest) NewNodeRemoteSigner(name string, extraArgs []string,
|
|
|
|
password []byte, watchOnly *lnrpc.WatchOnly) *node.HarnessNode {
|
|
|
|
|
|
|
|
hn, err := h.manager.newNode(h.T, name, extraArgs, password, true)
|
|
|
|
require.NoErrorf(h, err, "unable to create new node for %s", name)
|
|
|
|
|
|
|
|
err = hn.StartWithNoAuth(h.runCtx)
|
|
|
|
require.NoError(h, err, "failed to start node %s", name)
|
|
|
|
|
|
|
|
// With the seed created, construct the init request to the node,
|
|
|
|
// including the newly generated seed.
|
|
|
|
initReq := &lnrpc.InitWalletRequest{
|
|
|
|
WalletPassword: password,
|
|
|
|
WatchOnly: watchOnly,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pass the init request via rpc to finish unlocking the node. This
|
|
|
|
// will also initialize the macaroon-authenticated LightningClient.
|
|
|
|
_, err = h.manager.initWalletAndNode(hn, initReq)
|
|
|
|
require.NoErrorf(h, err, "failed to init node %s", name)
|
|
|
|
|
|
|
|
return hn
|
|
|
|
}
|
|
|
|
|
2022-08-04 21:34:42 +02:00
|
|
|
// KillNode kills the node (but won't wait for the node process to stop).
|
|
|
|
func (h *HarnessTest) KillNode(hn *node.HarnessNode) {
|
|
|
|
require.NoErrorf(h, hn.Kill(), "%s: kill got error", hn.Name())
|
|
|
|
delete(h.manager.activeNodes, hn.Cfg.NodeID)
|
|
|
|
}
|
|
|
|
|
2022-07-22 14:57:23 +02:00
|
|
|
// SetFeeEstimate sets a fee rate to be returned from fee estimator.
|
2022-08-16 11:31:20 +02:00
|
|
|
//
|
|
|
|
// NOTE: this method will set the fee rate for a conf target of 1, which is the
|
|
|
|
// fallback fee rate for a `WebAPIEstimator` if a higher conf target's fee rate
|
|
|
|
// is not set. This means if the fee rate for conf target 6 is set, the fee
|
|
|
|
// estimator will use that value instead.
|
2022-07-22 14:57:23 +02:00
|
|
|
func (h *HarnessTest) SetFeeEstimate(fee chainfee.SatPerKWeight) {
|
2022-08-16 11:31:20 +02:00
|
|
|
h.feeService.SetFeeRate(fee, 1)
|
2022-07-22 14:57:23 +02:00
|
|
|
}
|
|
|
|
|
2022-08-04 14:15:01 +02:00
|
|
|
// SetFeeEstimateWithConf sets a fee rate of a specified conf target to be
|
|
|
|
// returned from fee estimator.
|
|
|
|
func (h *HarnessTest) SetFeeEstimateWithConf(
|
|
|
|
fee chainfee.SatPerKWeight, conf uint32) {
|
|
|
|
|
|
|
|
h.feeService.SetFeeRate(fee, conf)
|
|
|
|
}
|
|
|
|
|
2022-07-22 14:57:23 +02:00
|
|
|
// validateNodeState checks that the node doesn't have any uncleaned states
|
|
|
|
// which will affect its following tests.
|
2023-03-14 14:24:50 +01:00
|
|
|
func (h *HarnessTest) validateNodeState(hn *node.HarnessNode) error {
|
|
|
|
errStr := func(subject string) error {
|
|
|
|
return fmt.Errorf("%s: found %s channels, please close "+
|
2022-07-22 14:57:23 +02:00
|
|
|
"them properly", hn.Name(), subject)
|
|
|
|
}
|
|
|
|
// If the node still has open channels, it's most likely that the
|
|
|
|
// current test didn't close it properly.
|
2023-03-14 14:24:50 +01:00
|
|
|
if hn.State.OpenChannel.Active != 0 {
|
|
|
|
return errStr("active")
|
|
|
|
}
|
|
|
|
if hn.State.OpenChannel.Public != 0 {
|
|
|
|
return errStr("public")
|
|
|
|
}
|
|
|
|
if hn.State.OpenChannel.Private != 0 {
|
|
|
|
return errStr("private")
|
|
|
|
}
|
|
|
|
if hn.State.OpenChannel.Pending != 0 {
|
|
|
|
return errStr("pending open")
|
|
|
|
}
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
// The number of pending force close channels should be zero.
|
2023-03-14 14:24:50 +01:00
|
|
|
if hn.State.CloseChannel.PendingForceClose != 0 {
|
|
|
|
return errStr("pending force")
|
|
|
|
}
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
// The number of waiting close channels should be zero.
|
2023-03-14 14:24:50 +01:00
|
|
|
if hn.State.CloseChannel.WaitingClose != 0 {
|
|
|
|
return errStr("waiting close")
|
|
|
|
}
|
2022-07-22 14:57:23 +02:00
|
|
|
|
|
|
|
// Ths number of payments should be zero.
|
2023-03-14 14:24:50 +01:00
|
|
|
if hn.State.Payment.Total != 0 {
|
|
|
|
return fmt.Errorf("%s: found uncleaned payments, please "+
|
|
|
|
"delete all of them properly", hn.Name())
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2022-07-22 14:57:23 +02:00
|
|
|
}
|
2022-07-22 11:33:26 +02:00
|
|
|
|
|
|
|
// GetChanPointFundingTxid takes a channel point and converts it into a chain
|
|
|
|
// hash.
|
|
|
|
func (h *HarnessTest) GetChanPointFundingTxid(
|
|
|
|
cp *lnrpc.ChannelPoint) *chainhash.Hash {
|
|
|
|
|
|
|
|
txid, err := lnrpc.GetChanPointFundingTxid(cp)
|
|
|
|
require.NoError(h, err, "unable to get txid")
|
|
|
|
|
|
|
|
return txid
|
|
|
|
}
|
|
|
|
|
|
|
|
// OutPointFromChannelPoint creates an outpoint from a given channel point.
|
|
|
|
func (h *HarnessTest) OutPointFromChannelPoint(
|
|
|
|
cp *lnrpc.ChannelPoint) wire.OutPoint {
|
|
|
|
|
|
|
|
txid := h.GetChanPointFundingTxid(cp)
|
|
|
|
return wire.OutPoint{
|
|
|
|
Hash: *txid,
|
|
|
|
Index: cp.OutputIndex,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// OpenChannelParams houses the params to specify when opening a new channel.
|
|
|
|
type OpenChannelParams struct {
|
|
|
|
// Amt is the local amount being put into the channel.
|
|
|
|
Amt btcutil.Amount
|
|
|
|
|
|
|
|
// PushAmt is the amount that should be pushed to the remote when the
|
|
|
|
// channel is opened.
|
|
|
|
PushAmt btcutil.Amount
|
|
|
|
|
|
|
|
// Private is a boolan indicating whether the opened channel should be
|
|
|
|
// private.
|
|
|
|
Private bool
|
|
|
|
|
|
|
|
// SpendUnconfirmed is a boolean indicating whether we can utilize
|
|
|
|
// unconfirmed outputs to fund the channel.
|
|
|
|
SpendUnconfirmed bool
|
|
|
|
|
|
|
|
// MinHtlc is the htlc_minimum_msat value set when opening the channel.
|
|
|
|
MinHtlc lnwire.MilliSatoshi
|
|
|
|
|
|
|
|
// RemoteMaxHtlcs is the remote_max_htlcs value set when opening the
|
|
|
|
// channel, restricting the number of concurrent HTLCs the remote party
|
|
|
|
// can add to a commitment.
|
|
|
|
RemoteMaxHtlcs uint16
|
|
|
|
|
|
|
|
// FundingShim is an optional funding shim that the caller can specify
|
|
|
|
// in order to modify the channel funding workflow.
|
|
|
|
FundingShim *lnrpc.FundingShim
|
|
|
|
|
|
|
|
// SatPerVByte is the amount of satoshis to spend in chain fees per
|
|
|
|
// virtual byte of the transaction.
|
|
|
|
SatPerVByte btcutil.Amount
|
|
|
|
|
|
|
|
// CommitmentType is the commitment type that should be used for the
|
|
|
|
// channel to be opened.
|
|
|
|
CommitmentType lnrpc.CommitmentType
|
2022-07-28 11:36:56 +02:00
|
|
|
|
|
|
|
// ZeroConf is used to determine if the channel will be a zero-conf
|
|
|
|
// channel. This only works if the explicit negotiation is used with
|
|
|
|
// anchors or script enforced leases.
|
|
|
|
ZeroConf bool
|
|
|
|
|
|
|
|
// ScidAlias denotes whether the channel will be an option-scid-alias
|
|
|
|
// channel type negotiation.
|
|
|
|
ScidAlias bool
|
2022-10-10 15:18:29 +02:00
|
|
|
|
|
|
|
// BaseFee is the channel base fee applied during the channel
|
|
|
|
// announcement phase.
|
|
|
|
BaseFee uint64
|
|
|
|
|
|
|
|
// FeeRate is the channel fee rate in ppm applied during the channel
|
|
|
|
// announcement phase.
|
|
|
|
FeeRate uint64
|
|
|
|
|
|
|
|
// UseBaseFee, if set, instructs the downstream logic to apply the
|
|
|
|
// user-specified channel base fee to the channel update announcement.
|
|
|
|
// If set to false it avoids applying a base fee of 0 and instead
|
|
|
|
// activates the default configured base fee.
|
|
|
|
UseBaseFee bool
|
|
|
|
|
|
|
|
// UseFeeRate, if set, instructs the downstream logic to apply the
|
|
|
|
// user-specified channel fee rate to the channel update announcement.
|
|
|
|
// If set to false it avoids applying a fee rate of 0 and instead
|
|
|
|
// activates the default configured fee rate.
|
|
|
|
UseFeeRate bool
|
2021-12-22 11:48:55 +01:00
|
|
|
|
|
|
|
// FundMax is a boolean indicating whether the channel should be funded
|
|
|
|
// with the maximum possible amount from the wallet.
|
|
|
|
FundMax bool
|
2023-05-04 19:55:35 +02:00
|
|
|
|
|
|
|
// An optional note-to-self containing some useful information about the
|
|
|
|
// channel. This is stored locally only, and is purely for reference. It
|
|
|
|
// has no bearing on the channel's operation. Max allowed length is 500
|
|
|
|
// characters.
|
|
|
|
Memo string
|
2023-06-10 21:29:47 +02:00
|
|
|
|
|
|
|
// Outpoints is a list of client-selected outpoints that should be used
|
|
|
|
// for funding a channel. If Amt is specified then this amount is
|
|
|
|
// allocated from the sum of outpoints towards funding. If the
|
|
|
|
// FundMax flag is specified the entirety of selected funds is
|
|
|
|
// allocated towards channel funding.
|
|
|
|
Outpoints []*lnrpc.OutPoint
|
2022-07-22 11:33:26 +02:00
|
|
|
}
|
|
|
|
|
2022-08-03 15:29:25 +02:00
|
|
|
// prepareOpenChannel waits for both nodes to be synced to chain and returns an
|
|
|
|
// OpenChannelRequest.
|
|
|
|
func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode,
|
|
|
|
p OpenChannelParams) *lnrpc.OpenChannelRequest {
|
2022-07-29 05:38:13 +02:00
|
|
|
|
|
|
|
// Wait until srcNode and destNode have the latest chain synced.
|
|
|
|
// Otherwise, we may run into a check within the funding manager that
|
|
|
|
// prevents any funding workflows from being kicked off if the chain
|
|
|
|
// isn't yet synced.
|
|
|
|
h.WaitForBlockchainSync(srcNode)
|
|
|
|
h.WaitForBlockchainSync(destNode)
|
2022-07-22 11:33:26 +02:00
|
|
|
|
|
|
|
// Specify the minimal confirmations of the UTXOs used for channel
|
|
|
|
// funding.
|
|
|
|
minConfs := int32(1)
|
|
|
|
if p.SpendUnconfirmed {
|
|
|
|
minConfs = 0
|
|
|
|
}
|
|
|
|
|
2022-08-03 15:29:25 +02:00
|
|
|
// Prepare the request.
|
|
|
|
return &lnrpc.OpenChannelRequest{
|
2022-07-22 11:33:26 +02:00
|
|
|
NodePubkey: destNode.PubKey[:],
|
|
|
|
LocalFundingAmount: int64(p.Amt),
|
|
|
|
PushSat: int64(p.PushAmt),
|
|
|
|
Private: p.Private,
|
|
|
|
MinConfs: minConfs,
|
|
|
|
SpendUnconfirmed: p.SpendUnconfirmed,
|
|
|
|
MinHtlcMsat: int64(p.MinHtlc),
|
|
|
|
RemoteMaxHtlcs: uint32(p.RemoteMaxHtlcs),
|
|
|
|
FundingShim: p.FundingShim,
|
|
|
|
SatPerByte: int64(p.SatPerVByte),
|
|
|
|
CommitmentType: p.CommitmentType,
|
2022-07-28 11:36:56 +02:00
|
|
|
ZeroConf: p.ZeroConf,
|
|
|
|
ScidAlias: p.ScidAlias,
|
2022-10-10 15:18:29 +02:00
|
|
|
BaseFee: p.BaseFee,
|
|
|
|
FeeRate: p.FeeRate,
|
|
|
|
UseBaseFee: p.UseBaseFee,
|
|
|
|
UseFeeRate: p.UseFeeRate,
|
2021-12-22 11:48:55 +01:00
|
|
|
FundMax: p.FundMax,
|
2023-05-04 19:55:35 +02:00
|
|
|
Memo: p.Memo,
|
2023-06-10 21:29:47 +02:00
|
|
|
Outpoints: p.Outpoints,
|
2022-07-22 11:33:26 +02:00
|
|
|
}
|
2022-08-03 15:29:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// OpenChannelAssertPending attempts to open a channel between srcNode and
|
|
|
|
// destNode with the passed channel funding parameters. Once the `OpenChannel`
|
|
|
|
// is called, it will consume the first event it receives from the open channel
|
|
|
|
// client and asserts it's a channel pending event.
|
2022-08-03 20:36:12 +02:00
|
|
|
func (h *HarnessTest) openChannelAssertPending(srcNode,
|
|
|
|
destNode *node.HarnessNode,
|
|
|
|
p OpenChannelParams) (*lnrpc.PendingUpdate, rpc.OpenChanClient) {
|
2022-08-03 15:29:25 +02:00
|
|
|
|
|
|
|
// Prepare the request and open the channel.
|
|
|
|
openReq := h.prepareOpenChannel(srcNode, destNode, p)
|
2022-07-22 11:33:26 +02:00
|
|
|
respStream := srcNode.RPC.OpenChannel(openReq)
|
|
|
|
|
|
|
|
// Consume the "channel pending" update. This waits until the node
|
|
|
|
// notifies us that the final message in the channel funding workflow
|
|
|
|
// has been sent to the remote node.
|
|
|
|
resp := h.ReceiveOpenChannelUpdate(respStream)
|
|
|
|
|
|
|
|
// Check that the update is channel pending.
|
2022-08-03 20:36:12 +02:00
|
|
|
update, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
|
2022-07-22 11:33:26 +02:00
|
|
|
require.Truef(h, ok, "expected channel pending: update, instead got %v",
|
|
|
|
resp)
|
|
|
|
|
2022-08-03 20:36:12 +02:00
|
|
|
return update.ChanPending, respStream
|
|
|
|
}
|
|
|
|
|
|
|
|
// OpenChannelAssertPending attempts to open a channel between srcNode and
|
|
|
|
// destNode with the passed channel funding parameters. Once the `OpenChannel`
|
|
|
|
// is called, it will consume the first event it receives from the open channel
|
|
|
|
// client and asserts it's a channel pending event. It returns the
|
|
|
|
// `PendingUpdate`.
|
|
|
|
func (h *HarnessTest) OpenChannelAssertPending(srcNode,
|
|
|
|
destNode *node.HarnessNode, p OpenChannelParams) *lnrpc.PendingUpdate {
|
|
|
|
|
|
|
|
resp, _ := h.openChannelAssertPending(srcNode, destNode, p)
|
|
|
|
return resp
|
|
|
|
}
|
|
|
|
|
|
|
|
// OpenChannelAssertStream attempts to open a channel between srcNode and
|
|
|
|
// destNode with the passed channel funding parameters. Once the `OpenChannel`
|
|
|
|
// is called, it will consume the first event it receives from the open channel
|
|
|
|
// client and asserts it's a channel pending event. It returns the open channel
|
|
|
|
// stream.
|
|
|
|
func (h *HarnessTest) OpenChannelAssertStream(srcNode,
|
|
|
|
destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient {
|
|
|
|
|
|
|
|
_, stream := h.openChannelAssertPending(srcNode, destNode, p)
|
|
|
|
return stream
|
2022-07-22 11:33:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// OpenChannel attempts to open a channel with the specified parameters
|
2022-08-11 01:15:33 +02:00
|
|
|
// extended from Alice to Bob. Additionally, for public channels, it will mine
|
|
|
|
// extra blocks so they are announced to the network. In specific, the
|
|
|
|
// following items are asserted,
|
|
|
|
// - for non-zero conf channel, 1 blocks will be mined to confirm the funding
|
|
|
|
// tx.
|
2022-07-22 11:33:26 +02:00
|
|
|
// - both nodes should see the channel edge update in their network graph.
|
|
|
|
// - both nodes can report the status of the new channel from ListChannels.
|
2022-08-11 01:15:33 +02:00
|
|
|
// - extra blocks are mined if it's a public channel.
|
2022-07-22 11:33:26 +02:00
|
|
|
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
|
|
|
|
p OpenChannelParams) *lnrpc.ChannelPoint {
|
|
|
|
|
2022-08-11 01:15:33 +02:00
|
|
|
// First, open the channel without announcing it.
|
|
|
|
cp := h.OpenChannelNoAnnounce(alice, bob, p)
|
|
|
|
|
|
|
|
// If this is a private channel, there's no need to mine extra blocks
|
|
|
|
// since it will never be announced to the network.
|
|
|
|
if p.Private {
|
|
|
|
return cp
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mine extra blocks to announce the channel.
|
|
|
|
if p.ZeroConf {
|
|
|
|
// For a zero-conf channel, no blocks have been mined so we
|
|
|
|
// need to mine 6 blocks.
|
|
|
|
//
|
|
|
|
// Mine 1 block to confirm the funding transaction.
|
|
|
|
h.MineBlocksAndAssertNumTxes(numBlocksOpenChannel, 1)
|
|
|
|
} else {
|
|
|
|
// For a regular channel, 1 block has already been mined to
|
|
|
|
// confirm the funding transaction, so we mine 5 blocks.
|
|
|
|
h.MineBlocks(numBlocksOpenChannel - 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
return cp
|
|
|
|
}
|
|
|
|
|
|
|
|
// OpenChannelNoAnnounce attempts to open a channel with the specified
|
|
|
|
// parameters extended from Alice to Bob without mining the necessary blocks to
|
|
|
|
// announce the channel. Additionally, the following items are asserted,
|
|
|
|
// - for non-zero conf channel, 1 blocks will be mined to confirm the funding
|
|
|
|
// tx.
|
|
|
|
// - both nodes should see the channel edge update in their network graph.
|
|
|
|
// - both nodes can report the status of the new channel from ListChannels.
|
|
|
|
func (h *HarnessTest) OpenChannelNoAnnounce(alice, bob *node.HarnessNode,
|
|
|
|
p OpenChannelParams) *lnrpc.ChannelPoint {
|
|
|
|
|
2022-08-03 20:36:12 +02:00
|
|
|
chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p)
|
2022-07-22 11:33:26 +02:00
|
|
|
|
2022-08-11 01:15:33 +02:00
|
|
|
// Open a zero conf channel.
|
|
|
|
if p.ZeroConf {
|
|
|
|
return h.openChannelZeroConf(alice, bob, chanOpenUpdate)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open a non-zero conf channel.
|
|
|
|
return h.openChannel(alice, bob, chanOpenUpdate)
|
|
|
|
}
|
|
|
|
|
|
|
|
// openChannel attempts to open a channel with the specified parameters
|
|
|
|
// extended from Alice to Bob. Additionally, the following items are asserted,
|
|
|
|
// - 1 block is mined and the funding transaction should be found in it.
|
|
|
|
// - both nodes should see the channel edge update in their network graph.
|
|
|
|
// - both nodes can report the status of the new channel from ListChannels.
|
|
|
|
func (h *HarnessTest) openChannel(alice, bob *node.HarnessNode,
|
|
|
|
stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
|
|
|
|
|
|
|
|
// Mine 1 block to confirm the funding transaction.
|
|
|
|
block := h.MineBlocksAndAssertNumTxes(1, 1)[0]
|
2022-07-22 11:33:26 +02:00
|
|
|
|
|
|
|
// Wait for the channel open event.
|
2022-08-11 01:15:33 +02:00
|
|
|
fundingChanPoint := h.WaitForChannelOpenEvent(stream)
|
2022-07-22 11:33:26 +02:00
|
|
|
|
|
|
|
// Check that the funding tx is found in the first block.
|
|
|
|
fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
|
|
|
|
h.Miner.AssertTxInBlock(block, fundingTxID)
|
|
|
|
|
|
|
|
// Check that both alice and bob have seen the channel from their
|
|
|
|
// network topology.
|
|
|
|
h.AssertTopologyChannelOpen(alice, fundingChanPoint)
|
|
|
|
h.AssertTopologyChannelOpen(bob, fundingChanPoint)
|
|
|
|
|
|
|
|
// Check that the channel can be seen in their ListChannels.
|
|
|
|
h.AssertChannelExists(alice, fundingChanPoint)
|
|
|
|
h.AssertChannelExists(bob, fundingChanPoint)
|
|
|
|
|
2022-08-11 01:15:33 +02:00
|
|
|
return fundingChanPoint
|
|
|
|
}
|
|
|
|
|
|
|
|
// openChannelZeroConf attempts to open a channel with the specified parameters
|
|
|
|
// extended from Alice to Bob. Additionally, the following items are asserted,
|
|
|
|
// - both nodes should see the channel edge update in their network graph.
|
|
|
|
// - both nodes can report the status of the new channel from ListChannels.
|
|
|
|
func (h *HarnessTest) openChannelZeroConf(alice, bob *node.HarnessNode,
|
|
|
|
stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
|
|
|
|
|
|
|
|
// Wait for the channel open event.
|
|
|
|
fundingChanPoint := h.WaitForChannelOpenEvent(stream)
|
|
|
|
|
|
|
|
// Check that both alice and bob have seen the channel from their
|
|
|
|
// network topology.
|
|
|
|
h.AssertTopologyChannelOpen(alice, fundingChanPoint)
|
|
|
|
h.AssertTopologyChannelOpen(bob, fundingChanPoint)
|
|
|
|
|
|
|
|
// Finally, check that the channel can be seen in their ListChannels.
|
|
|
|
h.AssertChannelExists(alice, fundingChanPoint)
|
|
|
|
h.AssertChannelExists(bob, fundingChanPoint)
|
2022-07-22 11:33:26 +02:00
|
|
|
|
|
|
|
return fundingChanPoint
|
|
|
|
}
|
|
|
|
|
2022-08-03 15:29:25 +02:00
|
|
|
// OpenChannelAssertErr opens a channel between node srcNode and destNode,
|
|
|
|
// asserts that the expected error is returned from the channel opening.
|
|
|
|
func (h *HarnessTest) OpenChannelAssertErr(srcNode, destNode *node.HarnessNode,
|
|
|
|
p OpenChannelParams, expectedErr error) {
|
|
|
|
|
|
|
|
// Prepare the request and open the channel.
|
|
|
|
openReq := h.prepareOpenChannel(srcNode, destNode, p)
|
|
|
|
respStream := srcNode.RPC.OpenChannel(openReq)
|
|
|
|
|
|
|
|
// Receive an error to be sent from the stream.
|
|
|
|
_, err := h.receiveOpenChannelUpdate(respStream)
|
|
|
|
|
|
|
|
// Use string comparison here as we haven't codified all the RPC errors
|
|
|
|
// yet.
|
|
|
|
require.Containsf(h, err.Error(), expectedErr.Error(), "unexpected "+
|
|
|
|
"error returned, want %v, got %v", expectedErr, err)
|
|
|
|
}
|
|
|
|
|
2022-07-28 11:36:56 +02:00
|
|
|
// CloseChannelAssertPending attempts to close the channel indicated by the
|
|
|
|
// passed channel point, initiated by the passed node. Once the CloseChannel
|
|
|
|
// rpc is called, it will consume one event and assert it's a close pending
|
|
|
|
// event. In addition, it will check that the closing tx can be found in the
|
|
|
|
// mempool.
|
|
|
|
func (h *HarnessTest) CloseChannelAssertPending(hn *node.HarnessNode,
|
|
|
|
cp *lnrpc.ChannelPoint,
|
2022-07-22 11:33:26 +02:00
|
|
|
force bool) (rpc.CloseChanClient, *chainhash.Hash) {
|
|
|
|
|
|
|
|
// Calls the rpc to close the channel.
|
|
|
|
closeReq := &lnrpc.CloseChannelRequest{
|
|
|
|
ChannelPoint: cp,
|
|
|
|
Force: force,
|
2024-01-17 01:00:17 +01:00
|
|
|
NoWait: true,
|
2022-07-22 11:33:26 +02:00
|
|
|
}
|
2023-04-18 14:50:41 +02:00
|
|
|
|
|
|
|
var (
|
|
|
|
stream rpc.CloseChanClient
|
|
|
|
event *lnrpc.CloseStatusUpdate
|
|
|
|
err error
|
|
|
|
)
|
2022-07-22 11:33:26 +02:00
|
|
|
|
|
|
|
// Consume the "channel close" update in order to wait for the closing
|
|
|
|
// transaction to be broadcast, then wait for the closing tx to be seen
|
|
|
|
// within the network.
|
2024-01-17 01:00:17 +01:00
|
|
|
stream = hn.RPC.CloseChannel(closeReq)
|
|
|
|
_, err = h.ReceiveCloseChannelUpdate(stream)
|
|
|
|
require.NoError(h, err, "close channel update got error: %v", err)
|
|
|
|
|
|
|
|
event, err = h.ReceiveCloseChannelUpdate(stream)
|
|
|
|
if err != nil {
|
|
|
|
h.Logf("Test: %s, close channel got error: %v",
|
|
|
|
h.manager.currentTestCase, err)
|
|
|
|
}
|
2023-04-18 14:50:41 +02:00
|
|
|
require.NoError(h, err, "retry closing channel failed")
|
2022-07-26 10:59:57 +02:00
|
|
|
|
2022-07-22 11:33:26 +02:00
|
|
|
pendingClose, ok := event.Update.(*lnrpc.CloseStatusUpdate_ClosePending)
|
|
|
|
require.Truef(h, ok, "expected channel close update, instead got %v",
|
|
|
|
pendingClose)
|
|
|
|
|
|
|
|
closeTxid, err := chainhash.NewHash(pendingClose.ClosePending.Txid)
|
|
|
|
require.NoErrorf(h, err, "unable to decode closeTxid: %v",
|
|
|
|
pendingClose.ClosePending.Txid)
|
|
|
|
|
|
|
|
// Assert the closing tx is in the mempool.
|
|
|
|
h.Miner.AssertTxInMempool(closeTxid)
|
|
|
|
|
|
|
|
return stream, closeTxid
|
|
|
|
}
|
|
|
|
|
2022-07-28 11:36:56 +02:00
|
|
|
// CloseChannel attempts to coop close a non-anchored channel identified by the
|
2022-07-22 11:33:26 +02:00
|
|
|
// passed channel point owned by the passed harness node. The following items
|
|
|
|
// are asserted,
|
|
|
|
// 1. a close pending event is sent from the close channel client.
|
|
|
|
// 2. the closing tx is found in the mempool.
|
|
|
|
// 3. the node reports the channel being waiting to close.
|
|
|
|
// 4. a block is mined and the closing tx should be found in it.
|
|
|
|
// 5. the node reports zero waiting close channels.
|
|
|
|
// 6. the node receives a topology update regarding the channel close.
|
|
|
|
func (h *HarnessTest) CloseChannel(hn *node.HarnessNode,
|
2022-07-28 11:36:56 +02:00
|
|
|
cp *lnrpc.ChannelPoint) *chainhash.Hash {
|
2022-07-22 11:33:26 +02:00
|
|
|
|
2022-07-28 11:36:56 +02:00
|
|
|
stream, _ := h.CloseChannelAssertPending(hn, cp, false)
|
2022-07-22 11:33:26 +02:00
|
|
|
|
2022-07-29 06:15:25 +02:00
|
|
|
return h.AssertStreamChannelCoopClosed(hn, cp, false, stream)
|
2022-07-28 11:36:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ForceCloseChannel attempts to force close a non-anchored channel identified
|
|
|
|
// by the passed channel point owned by the passed harness node. The following
|
|
|
|
// items are asserted,
|
|
|
|
// 1. a close pending event is sent from the close channel client.
|
|
|
|
// 2. the closing tx is found in the mempool.
|
|
|
|
// 3. the node reports the channel being waiting to close.
|
|
|
|
// 4. a block is mined and the closing tx should be found in it.
|
|
|
|
// 5. the node reports zero waiting close channels.
|
|
|
|
// 6. the node receives a topology update regarding the channel close.
|
|
|
|
// 7. mine DefaultCSV-1 blocks.
|
|
|
|
// 8. the node reports zero pending force close channels.
|
|
|
|
func (h *HarnessTest) ForceCloseChannel(hn *node.HarnessNode,
|
|
|
|
cp *lnrpc.ChannelPoint) *chainhash.Hash {
|
|
|
|
|
|
|
|
stream, _ := h.CloseChannelAssertPending(hn, cp, true)
|
|
|
|
|
|
|
|
closingTxid := h.AssertStreamChannelForceClosed(hn, cp, false, stream)
|
|
|
|
|
|
|
|
// Cleanup the force close.
|
2023-05-12 10:09:56 +02:00
|
|
|
h.CleanupForceClose(hn)
|
2022-07-28 11:36:56 +02:00
|
|
|
|
|
|
|
return closingTxid
|
2022-07-22 11:33:26 +02:00
|
|
|
}
|
2022-07-26 06:11:43 +02:00
|
|
|
|
2022-07-26 10:59:57 +02:00
|
|
|
// CloseChannelAssertErr closes the given channel and asserts an error
|
|
|
|
// returned.
|
|
|
|
func (h *HarnessTest) CloseChannelAssertErr(hn *node.HarnessNode,
|
2022-07-28 11:36:56 +02:00
|
|
|
cp *lnrpc.ChannelPoint, force bool) error {
|
2022-07-26 10:59:57 +02:00
|
|
|
|
|
|
|
// Calls the rpc to close the channel.
|
|
|
|
closeReq := &lnrpc.CloseChannelRequest{
|
|
|
|
ChannelPoint: cp,
|
|
|
|
Force: force,
|
|
|
|
}
|
|
|
|
stream := hn.RPC.CloseChannel(closeReq)
|
|
|
|
|
|
|
|
// Consume the "channel close" update in order to wait for the closing
|
|
|
|
// transaction to be broadcast, then wait for the closing tx to be seen
|
|
|
|
// within the network.
|
|
|
|
_, err := h.ReceiveCloseChannelUpdate(stream)
|
|
|
|
require.Errorf(h, err, "%s: expect close channel to return an error",
|
|
|
|
hn.Name())
|
2022-07-28 11:36:56 +02:00
|
|
|
|
|
|
|
return err
|
2022-07-26 10:59:57 +02:00
|
|
|
}
|
|
|
|
|
2022-07-26 06:11:43 +02:00
|
|
|
// IsNeutrinoBackend returns a bool indicating whether the node is using a
|
|
|
|
// neutrino as its backend. This is useful when we want to skip certain tests
|
|
|
|
// which cannot be done with a neutrino backend.
|
|
|
|
func (h *HarnessTest) IsNeutrinoBackend() bool {
|
|
|
|
return h.manager.chainBackend.Name() == NeutrinoBackendName
|
|
|
|
}
|
|
|
|
|
|
|
|
// fundCoins attempts to send amt satoshis from the internal mining node to the
|
|
|
|
// targeted lightning node. The confirmed boolean indicates whether the
|
|
|
|
// transaction that pays to the target should confirm. For neutrino backend,
|
|
|
|
// the `confirmed` param is ignored.
|
|
|
|
func (h *HarnessTest) fundCoins(amt btcutil.Amount, target *node.HarnessNode,
|
|
|
|
addrType lnrpc.AddressType, confirmed bool) {
|
|
|
|
|
|
|
|
initialBalance := target.RPC.WalletBalance()
|
|
|
|
|
|
|
|
// First, obtain an address from the target lightning node, preferring
|
|
|
|
// to receive a p2wkh address s.t the output can immediately be used as
|
|
|
|
// an input to a funding transaction.
|
|
|
|
req := &lnrpc.NewAddressRequest{Type: addrType}
|
|
|
|
resp := target.RPC.NewAddress(req)
|
|
|
|
addr := h.DecodeAddress(resp.Address)
|
|
|
|
addrScript := h.PayToAddrScript(addr)
|
|
|
|
|
|
|
|
// Generate a transaction which creates an output to the target
|
|
|
|
// pkScript of the desired amount.
|
|
|
|
output := &wire.TxOut{
|
|
|
|
PkScript: addrScript,
|
|
|
|
Value: int64(amt),
|
|
|
|
}
|
|
|
|
h.Miner.SendOutput(output, defaultMinerFeeRate)
|
|
|
|
|
|
|
|
// Encode the pkScript in hex as this the format that it will be
|
|
|
|
// returned via rpc.
|
|
|
|
expPkScriptStr := hex.EncodeToString(addrScript)
|
|
|
|
|
|
|
|
// Now, wait for ListUnspent to show the unconfirmed transaction
|
|
|
|
// containing the correct pkscript.
|
|
|
|
//
|
|
|
|
// Since neutrino doesn't support unconfirmed outputs, skip this check.
|
|
|
|
if !h.IsNeutrinoBackend() {
|
|
|
|
utxos := h.AssertNumUTXOsUnconfirmed(target, 1)
|
|
|
|
|
|
|
|
// Assert that the lone unconfirmed utxo contains the same
|
|
|
|
// pkscript as the output generated above.
|
|
|
|
pkScriptStr := utxos[0].PkScript
|
|
|
|
require.Equal(h, pkScriptStr, expPkScriptStr,
|
|
|
|
"pkscript mismatch")
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the transaction should remain unconfirmed, then we'll wait until
|
|
|
|
// the target node's unconfirmed balance reflects the expected balance
|
|
|
|
// and exit.
|
|
|
|
if !confirmed && !h.IsNeutrinoBackend() {
|
|
|
|
expectedBalance := btcutil.Amount(
|
|
|
|
initialBalance.UnconfirmedBalance,
|
|
|
|
) + amt
|
|
|
|
h.WaitForBalanceUnconfirmed(target, expectedBalance)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-01 20:16:17 +02:00
|
|
|
// Otherwise, we'll generate 1 new blocks to ensure the output gains a
|
2022-07-26 06:11:43 +02:00
|
|
|
// sufficient number of confirmations and wait for the balance to
|
|
|
|
// reflect what's expected.
|
2022-08-23 23:28:31 +02:00
|
|
|
h.MineBlocks(1)
|
2022-07-26 06:11:43 +02:00
|
|
|
|
|
|
|
expectedBalance := btcutil.Amount(initialBalance.ConfirmedBalance) + amt
|
|
|
|
h.WaitForBalanceConfirmed(target, expectedBalance)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FundCoins attempts to send amt satoshis from the internal mining node to the
|
|
|
|
// targeted lightning node using a P2WKH address. 2 blocks are mined after in
|
|
|
|
// order to confirm the transaction.
|
|
|
|
func (h *HarnessTest) FundCoins(amt btcutil.Amount, hn *node.HarnessNode) {
|
|
|
|
h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, true)
|
|
|
|
}
|
2022-07-26 10:59:57 +02:00
|
|
|
|
2022-08-01 20:16:17 +02:00
|
|
|
// FundCoinsUnconfirmed attempts to send amt satoshis from the internal mining
|
|
|
|
// node to the targeted lightning node using a P2WKH address. No blocks are
|
|
|
|
// mined after and the UTXOs are unconfirmed.
|
|
|
|
func (h *HarnessTest) FundCoinsUnconfirmed(amt btcutil.Amount,
|
|
|
|
hn *node.HarnessNode) {
|
|
|
|
|
|
|
|
h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, false)
|
|
|
|
}
|
|
|
|
|
2022-08-02 14:56:13 +02:00
|
|
|
// FundCoinsNP2WKH attempts to send amt satoshis from the internal mining node
|
|
|
|
// to the targeted lightning node using a NP2WKH address.
|
|
|
|
func (h *HarnessTest) FundCoinsNP2WKH(amt btcutil.Amount,
|
|
|
|
target *node.HarnessNode) {
|
|
|
|
|
|
|
|
h.fundCoins(amt, target, lnrpc.AddressType_NESTED_PUBKEY_HASH, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FundCoinsP2TR attempts to send amt satoshis from the internal mining node to
|
|
|
|
// the targeted lightning node using a P2TR address.
|
|
|
|
func (h *HarnessTest) FundCoinsP2TR(amt btcutil.Amount,
|
|
|
|
target *node.HarnessNode) {
|
|
|
|
|
|
|
|
h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true)
|
|
|
|
}
|
|
|
|
|
2022-08-09 12:57:39 +02:00
|
|
|
// completePaymentRequestsAssertStatus sends payments from a node to complete
|
|
|
|
// all payment requests. This function does not return until all payments
|
|
|
|
// have reached the specified status.
|
|
|
|
func (h *HarnessTest) completePaymentRequestsAssertStatus(hn *node.HarnessNode,
|
|
|
|
paymentRequests []string, status lnrpc.Payment_PaymentStatus) {
|
2022-07-26 10:59:57 +02:00
|
|
|
|
2022-08-09 12:57:39 +02:00
|
|
|
// Create a buffered chan to signal the results.
|
2022-11-01 20:39:24 +01:00
|
|
|
results := make(chan rpc.PaymentClient, len(paymentRequests))
|
2022-07-26 10:59:57 +02:00
|
|
|
|
|
|
|
// send sends a payment and asserts if it doesn't succeeded.
|
|
|
|
send := func(payReq string) {
|
|
|
|
req := &routerrpc.SendPaymentRequest{
|
|
|
|
PaymentRequest: payReq,
|
2022-11-14 10:27:49 +01:00
|
|
|
TimeoutSeconds: int32(wait.PaymentTimeout.Seconds()),
|
2022-07-26 10:59:57 +02:00
|
|
|
FeeLimitMsat: noFeeLimitMsat,
|
|
|
|
}
|
|
|
|
stream := hn.RPC.SendPayment(req)
|
2022-08-09 12:57:39 +02:00
|
|
|
|
2022-11-01 20:39:24 +01:00
|
|
|
// Signal sent succeeded.
|
|
|
|
results <- stream
|
2022-07-26 10:59:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Launch all payments simultaneously.
|
|
|
|
for _, payReq := range paymentRequests {
|
|
|
|
payReqCopy := payReq
|
|
|
|
go send(payReqCopy)
|
|
|
|
}
|
|
|
|
|
2022-11-01 20:39:24 +01:00
|
|
|
// Wait for all payments to report the expected status.
|
2022-11-14 10:27:49 +01:00
|
|
|
timer := time.After(wait.PaymentTimeout)
|
2022-08-09 12:57:39 +02:00
|
|
|
select {
|
2022-11-01 20:39:24 +01:00
|
|
|
case stream := <-results:
|
|
|
|
h.AssertPaymentStatusFromStream(stream, status)
|
|
|
|
|
2022-08-09 12:57:39 +02:00
|
|
|
case <-timer:
|
|
|
|
require.Fail(h, "timeout", "waiting payment results timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CompletePaymentRequests sends payments from a node to complete all payment
|
|
|
|
// requests. This function does not return until all payments successfully
|
|
|
|
// complete without errors.
|
|
|
|
func (h *HarnessTest) CompletePaymentRequests(hn *node.HarnessNode,
|
|
|
|
paymentRequests []string) {
|
|
|
|
|
|
|
|
h.completePaymentRequestsAssertStatus(
|
|
|
|
hn, paymentRequests, lnrpc.Payment_SUCCEEDED,
|
|
|
|
)
|
2022-07-26 10:59:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// CompletePaymentRequestsNoWait sends payments from a node to complete all
|
|
|
|
// payment requests without waiting for the results. Instead, it checks the
|
|
|
|
// number of updates in the specified channel has increased.
|
|
|
|
func (h *HarnessTest) CompletePaymentRequestsNoWait(hn *node.HarnessNode,
|
|
|
|
paymentRequests []string, chanPoint *lnrpc.ChannelPoint) {
|
|
|
|
|
|
|
|
// We start by getting the current state of the client's channels. This
|
|
|
|
// is needed to ensure the payments actually have been committed before
|
|
|
|
// we return.
|
|
|
|
oldResp := h.GetChannelByChanPoint(hn, chanPoint)
|
|
|
|
|
2022-08-09 12:57:39 +02:00
|
|
|
// Send payments and assert they are in-flight.
|
|
|
|
h.completePaymentRequestsAssertStatus(
|
|
|
|
hn, paymentRequests, lnrpc.Payment_IN_FLIGHT,
|
|
|
|
)
|
2022-07-26 10:59:57 +02:00
|
|
|
|
|
|
|
// We are not waiting for feedback in the form of a response, but we
|
|
|
|
// should still wait long enough for the server to receive and handle
|
|
|
|
// the send before cancelling the request. We wait for the number of
|
|
|
|
// updates to one of our channels has increased before we return.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
newResp := h.GetChannelByChanPoint(hn, chanPoint)
|
|
|
|
|
|
|
|
// If this channel has an increased number of updates, we
|
|
|
|
// assume the payments are committed, and we can return.
|
|
|
|
if newResp.NumUpdates > oldResp.NumUpdates {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise return an error as the NumUpdates are not
|
|
|
|
// increased.
|
|
|
|
return fmt.Errorf("%s: channel:%v not updated after sending "+
|
|
|
|
"payments, old updates: %v, new updates: %v", hn.Name(),
|
|
|
|
chanPoint, oldResp.NumUpdates, newResp.NumUpdates)
|
|
|
|
}, DefaultTimeout)
|
|
|
|
require.NoError(h, err, "timeout while checking for channel updates")
|
|
|
|
}
|
2022-07-28 11:36:56 +02:00
|
|
|
|
|
|
|
// OpenChannelPsbt attempts to open a channel between srcNode and destNode with
|
|
|
|
// the passed channel funding parameters. It will assert if the expected step
|
|
|
|
// of funding the PSBT is not received from the source node.
|
|
|
|
func (h *HarnessTest) OpenChannelPsbt(srcNode, destNode *node.HarnessNode,
|
|
|
|
p OpenChannelParams) (rpc.OpenChanClient, []byte) {
|
|
|
|
|
|
|
|
// Wait until srcNode and destNode have the latest chain synced.
|
|
|
|
// Otherwise, we may run into a check within the funding manager that
|
|
|
|
// prevents any funding workflows from being kicked off if the chain
|
|
|
|
// isn't yet synced.
|
|
|
|
h.WaitForBlockchainSync(srcNode)
|
|
|
|
h.WaitForBlockchainSync(destNode)
|
|
|
|
|
|
|
|
// Send the request to open a channel to the source node now. This will
|
|
|
|
// open a long-lived stream where we'll receive status updates about
|
|
|
|
// the progress of the channel.
|
|
|
|
// respStream := h.OpenChannelStreamAndAssert(srcNode, destNode, p)
|
|
|
|
req := &lnrpc.OpenChannelRequest{
|
|
|
|
NodePubkey: destNode.PubKey[:],
|
|
|
|
LocalFundingAmount: int64(p.Amt),
|
|
|
|
PushSat: int64(p.PushAmt),
|
|
|
|
Private: p.Private,
|
|
|
|
SpendUnconfirmed: p.SpendUnconfirmed,
|
|
|
|
MinHtlcMsat: int64(p.MinHtlc),
|
|
|
|
FundingShim: p.FundingShim,
|
2023-08-28 13:09:52 +02:00
|
|
|
CommitmentType: p.CommitmentType,
|
2022-07-28 11:36:56 +02:00
|
|
|
}
|
|
|
|
respStream := srcNode.RPC.OpenChannel(req)
|
|
|
|
|
|
|
|
// Consume the "PSBT funding ready" update. This waits until the node
|
|
|
|
// notifies us that the PSBT can now be funded.
|
|
|
|
resp := h.ReceiveOpenChannelUpdate(respStream)
|
|
|
|
upd, ok := resp.Update.(*lnrpc.OpenStatusUpdate_PsbtFund)
|
|
|
|
require.Truef(h, ok, "expected PSBT funding update, got %v", resp)
|
|
|
|
|
2023-08-28 13:09:52 +02:00
|
|
|
// Make sure the channel funding address has the correct type for the
|
|
|
|
// given commitment type.
|
|
|
|
fundingAddr, err := btcutil.DecodeAddress(
|
|
|
|
upd.PsbtFund.FundingAddress, harnessNetParams,
|
|
|
|
)
|
|
|
|
require.NoError(h, err)
|
|
|
|
|
|
|
|
switch p.CommitmentType {
|
|
|
|
case lnrpc.CommitmentType_SIMPLE_TAPROOT:
|
|
|
|
require.IsType(h, &btcutil.AddressTaproot{}, fundingAddr)
|
|
|
|
|
|
|
|
default:
|
|
|
|
require.IsType(
|
|
|
|
h, &btcutil.AddressWitnessScriptHash{}, fundingAddr,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-07-28 11:36:56 +02:00
|
|
|
return respStream, upd.PsbtFund.Psbt
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanupForceClose mines a force close commitment found in the mempool and
|
|
|
|
// the following sweep transaction from the force closing node.
|
2023-05-12 10:09:56 +02:00
|
|
|
func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode) {
|
2022-07-28 11:36:56 +02:00
|
|
|
// Wait for the channel to be marked pending force close.
|
|
|
|
h.AssertNumPendingForceClose(hn, 1)
|
|
|
|
|
|
|
|
// Mine enough blocks for the node to sweep its funds from the force
|
2023-10-31 09:51:24 +01:00
|
|
|
// closed channel. The commit sweep resolver is able to broadcast the
|
|
|
|
// sweep tx up to one block before the CSV elapses, so wait until
|
|
|
|
// defaulCSV-1.
|
2022-07-28 11:36:56 +02:00
|
|
|
//
|
2023-10-31 09:51:24 +01:00
|
|
|
// NOTE: we might empty blocks here as we don't know the exact number
|
|
|
|
// of blocks to mine. This may end up mining more blocks than needed.
|
|
|
|
h.MineEmptyBlocks(node.DefaultCSV - 1)
|
2022-07-28 11:36:56 +02:00
|
|
|
|
|
|
|
// The node should now sweep the funds, clean up by mining the sweeping
|
|
|
|
// tx.
|
2022-08-23 23:28:31 +02:00
|
|
|
h.MineBlocksAndAssertNumTxes(1, 1)
|
2022-07-28 11:36:56 +02:00
|
|
|
|
|
|
|
// Mine blocks to get any second level HTLC resolved. If there are no
|
|
|
|
// HTLCs, this will behave like h.AssertNumPendingCloseChannels.
|
|
|
|
h.mineTillForceCloseResolved(hn)
|
|
|
|
}
|
|
|
|
|
|
|
|
// mineTillForceCloseResolved asserts that the number of pending close channels
|
|
|
|
// are zero. Each time it checks, a new block is mined using MineBlocksSlow to
|
|
|
|
// give the node some time to catch up the chain.
|
|
|
|
//
|
|
|
|
// NOTE: this method is a workaround to make sure we have a clean mempool at
|
|
|
|
// the end of a channel force closure. We cannot directly mine blocks and
|
|
|
|
// assert channels being fully closed because the subsystems in lnd don't share
|
|
|
|
// the same block height. This is especially the case when blocks are produced
|
|
|
|
// too fast.
|
|
|
|
// TODO(yy): remove this workaround when syncing blocks are unified in all the
|
|
|
|
// subsystems.
|
|
|
|
func (h *HarnessTest) mineTillForceCloseResolved(hn *node.HarnessNode) {
|
|
|
|
_, startHeight := h.Miner.GetBestBlock()
|
|
|
|
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
resp := hn.RPC.PendingChannels()
|
|
|
|
total := len(resp.PendingForceClosingChannels)
|
|
|
|
if total != 0 {
|
2022-08-23 23:28:31 +02:00
|
|
|
h.MineBlocks(1)
|
2022-07-28 11:36:56 +02:00
|
|
|
|
|
|
|
return fmt.Errorf("expected num of pending force " +
|
|
|
|
"close channel to be zero")
|
|
|
|
}
|
|
|
|
|
|
|
|
_, height := h.Miner.GetBestBlock()
|
|
|
|
h.Logf("Mined %d blocks while waiting for force closed "+
|
|
|
|
"channel to be resolved", height-startHeight)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, DefaultTimeout)
|
|
|
|
|
|
|
|
require.NoErrorf(h, err, "assert force close resolved timeout")
|
|
|
|
}
|
2022-07-28 12:24:13 +02:00
|
|
|
|
|
|
|
// CreatePayReqs is a helper method that will create a slice of payment
|
|
|
|
// requests for the given node.
|
|
|
|
func (h *HarnessTest) CreatePayReqs(hn *node.HarnessNode,
|
2024-02-20 14:30:28 +01:00
|
|
|
paymentAmt btcutil.Amount, numInvoices int,
|
|
|
|
routeHints ...*lnrpc.RouteHint) ([]string, [][]byte, []*lnrpc.Invoice) {
|
2022-07-28 12:24:13 +02:00
|
|
|
|
|
|
|
payReqs := make([]string, numInvoices)
|
|
|
|
rHashes := make([][]byte, numInvoices)
|
|
|
|
invoices := make([]*lnrpc.Invoice, numInvoices)
|
|
|
|
for i := 0; i < numInvoices; i++ {
|
|
|
|
preimage := h.Random32Bytes()
|
|
|
|
|
|
|
|
invoice := &lnrpc.Invoice{
|
2024-02-20 14:30:28 +01:00
|
|
|
Memo: "testing",
|
|
|
|
RPreimage: preimage,
|
|
|
|
Value: int64(paymentAmt),
|
|
|
|
RouteHints: routeHints,
|
2022-07-28 12:24:13 +02:00
|
|
|
}
|
|
|
|
resp := hn.RPC.AddInvoice(invoice)
|
|
|
|
|
|
|
|
// Set the payment address in the invoice so the caller can
|
|
|
|
// properly use it.
|
|
|
|
invoice.PaymentAddr = resp.PaymentAddr
|
|
|
|
|
|
|
|
payReqs[i] = resp.PaymentRequest
|
|
|
|
rHashes[i] = resp.RHash
|
|
|
|
invoices[i] = invoice
|
|
|
|
}
|
|
|
|
|
|
|
|
return payReqs, rHashes, invoices
|
|
|
|
}
|
|
|
|
|
|
|
|
// BackupDB creates a backup of the current database. It will stop the node
|
|
|
|
// first, copy the database files, and restart the node.
|
|
|
|
func (h *HarnessTest) BackupDB(hn *node.HarnessNode) {
|
|
|
|
restart := h.SuspendNode(hn)
|
|
|
|
|
|
|
|
err := hn.BackupDB()
|
|
|
|
require.NoErrorf(h, err, "%s: failed to backup db", hn.Name())
|
|
|
|
|
|
|
|
err = restart()
|
|
|
|
require.NoErrorf(h, err, "%s: failed to restart", hn.Name())
|
|
|
|
}
|
|
|
|
|
|
|
|
// RestartNodeAndRestoreDB restarts a given node with a callback to restore the
|
|
|
|
// db.
|
|
|
|
func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) {
|
|
|
|
cb := func() error { return hn.RestoreDB() }
|
|
|
|
err := h.manager.restartNode(h.runCtx, hn, cb)
|
|
|
|
require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
|
|
|
|
|
2022-08-09 06:40:27 +02:00
|
|
|
err = h.manager.unlockNode(hn)
|
|
|
|
require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
|
|
|
|
|
2022-07-28 12:24:13 +02:00
|
|
|
// Give the node some time to catch up with the chain before we
|
|
|
|
// continue with the tests.
|
|
|
|
h.WaitForBlockchainSync(hn)
|
|
|
|
}
|
2022-07-28 19:24:54 +02:00
|
|
|
|
2022-08-23 23:28:31 +02:00
|
|
|
// MineBlocks mines blocks and asserts all active nodes have synced to the
|
|
|
|
// chain.
|
2022-07-28 19:24:54 +02:00
|
|
|
//
|
2022-08-23 23:28:31 +02:00
|
|
|
// NOTE: this differs from miner's `MineBlocks` as it requires the nodes to be
|
|
|
|
// synced.
|
|
|
|
func (h *HarnessTest) MineBlocks(num uint32) []*wire.MsgBlock {
|
|
|
|
// Mining the blocks slow to give `lnd` more time to sync.
|
|
|
|
blocks := h.Miner.MineBlocksSlow(num)
|
2022-07-28 19:24:54 +02:00
|
|
|
|
|
|
|
// Make sure all the active nodes are synced.
|
2022-12-07 22:23:29 +01:00
|
|
|
bestBlock := blocks[len(blocks)-1]
|
|
|
|
h.AssertActiveNodesSyncedTo(bestBlock)
|
2022-08-23 23:28:31 +02:00
|
|
|
|
|
|
|
return blocks
|
|
|
|
}
|
|
|
|
|
|
|
|
// MineBlocksAndAssertNumTxes mines blocks and asserts the number of
|
|
|
|
// transactions are found in the first block. It also asserts all active nodes
|
|
|
|
// have synced to the chain.
|
|
|
|
//
|
|
|
|
// NOTE: this differs from miner's `MineBlocks` as it requires the nodes to be
|
|
|
|
// synced.
|
|
|
|
func (h *HarnessTest) MineBlocksAndAssertNumTxes(num uint32,
|
|
|
|
numTxs int) []*wire.MsgBlock {
|
|
|
|
|
|
|
|
// If we expect transactions to be included in the blocks we'll mine,
|
|
|
|
// we wait here until they are seen in the miner's mempool.
|
|
|
|
txids := h.Miner.AssertNumTxsInMempool(numTxs)
|
|
|
|
|
|
|
|
// Mine blocks.
|
|
|
|
blocks := h.Miner.MineBlocksSlow(num)
|
|
|
|
|
|
|
|
// Assert that all the transactions were included in the first block.
|
|
|
|
for _, txid := range txids {
|
|
|
|
h.Miner.AssertTxInBlock(blocks[0], txid)
|
2022-07-28 19:24:54 +02:00
|
|
|
}
|
2022-08-23 23:28:31 +02:00
|
|
|
|
|
|
|
// Finally, make sure all the active nodes are synced.
|
2022-12-07 22:23:29 +01:00
|
|
|
bestBlock := blocks[len(blocks)-1]
|
|
|
|
h.AssertActiveNodesSyncedTo(bestBlock)
|
2022-08-23 23:28:31 +02:00
|
|
|
|
|
|
|
return blocks
|
2022-07-28 19:24:54 +02:00
|
|
|
}
|
2022-08-02 19:00:20 +02:00
|
|
|
|
2023-04-11 09:22:05 +02:00
|
|
|
// cleanMempool mines blocks till the mempool is empty and asserts all active
|
|
|
|
// nodes have synced to the chain.
|
|
|
|
func (h *HarnessTest) cleanMempool() {
|
|
|
|
_, startHeight := h.Miner.GetBestBlock()
|
|
|
|
|
|
|
|
// Mining the blocks slow to give `lnd` more time to sync.
|
|
|
|
var bestBlock *wire.MsgBlock
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
// If mempool is empty, exit.
|
|
|
|
mem := h.Miner.GetRawMempool()
|
|
|
|
if len(mem) == 0 {
|
|
|
|
_, height := h.Miner.GetBestBlock()
|
|
|
|
h.Logf("Mined %d blocks when cleanup the mempool",
|
|
|
|
height-startHeight)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise mine a block.
|
|
|
|
blocks := h.Miner.MineBlocksSlow(1)
|
|
|
|
bestBlock = blocks[len(blocks)-1]
|
|
|
|
|
2023-05-18 11:56:39 +02:00
|
|
|
// Make sure all the active nodes are synced.
|
|
|
|
h.AssertActiveNodesSyncedTo(bestBlock)
|
|
|
|
|
2023-04-11 09:22:05 +02:00
|
|
|
return fmt.Errorf("still have %d txes in mempool", len(mem))
|
|
|
|
}, wait.MinerMempoolTimeout)
|
|
|
|
require.NoError(h, err, "timeout cleaning up mempool")
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanShutDown is used to quickly end a test by shutting down all non-standby
|
|
|
|
// nodes and mining blocks to empty the mempool.
|
|
|
|
//
|
|
|
|
// NOTE: this method provides a faster exit for a test that involves force
|
|
|
|
// closures as the caller doesn't need to mine all the blocks to make sure the
|
|
|
|
// mempool is empty.
|
|
|
|
func (h *HarnessTest) CleanShutDown() {
|
|
|
|
// First, shutdown all non-standby nodes to prevent new transactions
|
|
|
|
// being created and fed into the mempool.
|
|
|
|
h.shutdownNonStandbyNodes()
|
|
|
|
|
|
|
|
// Now mine blocks till the mempool is empty.
|
|
|
|
h.cleanMempool()
|
|
|
|
}
|
|
|
|
|
2022-08-07 00:06:02 +02:00
|
|
|
// MineEmptyBlocks mines a given number of empty blocks.
|
|
|
|
//
|
|
|
|
// NOTE: this differs from miner's `MineEmptyBlocks` as it requires the nodes
|
|
|
|
// to be synced.
|
|
|
|
func (h *HarnessTest) MineEmptyBlocks(num int) []*wire.MsgBlock {
|
|
|
|
blocks := h.Miner.MineEmptyBlocks(num)
|
|
|
|
|
|
|
|
// Finally, make sure all the active nodes are synced.
|
|
|
|
h.AssertActiveNodesSynced()
|
|
|
|
|
|
|
|
return blocks
|
|
|
|
}
|
|
|
|
|
2022-08-02 19:00:20 +02:00
|
|
|
// QueryChannelByChanPoint tries to find a channel matching the channel point
|
|
|
|
// and asserts. It returns the channel found.
|
|
|
|
func (h *HarnessTest) QueryChannelByChanPoint(hn *node.HarnessNode,
|
2023-02-24 08:48:32 +01:00
|
|
|
chanPoint *lnrpc.ChannelPoint,
|
|
|
|
opts ...ListChannelOption) *lnrpc.Channel {
|
2022-08-02 19:00:20 +02:00
|
|
|
|
2023-02-24 08:48:32 +01:00
|
|
|
channel, err := h.findChannel(hn, chanPoint, opts...)
|
2022-08-02 19:00:20 +02:00
|
|
|
require.NoError(h, err, "failed to query channel")
|
2023-02-24 08:48:32 +01:00
|
|
|
|
2022-08-02 19:00:20 +02:00
|
|
|
return channel
|
|
|
|
}
|
2022-08-03 21:38:09 +02:00
|
|
|
|
|
|
|
// SendPaymentAndAssertStatus sends a payment from the passed node and asserts
|
|
|
|
// the desired status is reached.
|
|
|
|
func (h *HarnessTest) SendPaymentAndAssertStatus(hn *node.HarnessNode,
|
|
|
|
req *routerrpc.SendPaymentRequest,
|
|
|
|
status lnrpc.Payment_PaymentStatus) *lnrpc.Payment {
|
|
|
|
|
|
|
|
stream := hn.RPC.SendPayment(req)
|
|
|
|
return h.AssertPaymentStatusFromStream(stream, status)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendPaymentAssertFail sends a payment from the passed node and asserts the
|
|
|
|
// payment is failed with the specified failure reason .
|
|
|
|
func (h *HarnessTest) SendPaymentAssertFail(hn *node.HarnessNode,
|
|
|
|
req *routerrpc.SendPaymentRequest,
|
|
|
|
reason lnrpc.PaymentFailureReason) *lnrpc.Payment {
|
|
|
|
|
|
|
|
payment := h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_FAILED)
|
|
|
|
require.Equal(h, reason, payment.FailureReason,
|
|
|
|
"payment failureReason not matched")
|
|
|
|
|
|
|
|
return payment
|
|
|
|
}
|
2022-08-03 23:37:43 +02:00
|
|
|
|
2022-08-08 20:00:03 +02:00
|
|
|
// SendPaymentAssertSettled sends a payment from the passed node and asserts the
|
|
|
|
// payment is settled.
|
|
|
|
func (h *HarnessTest) SendPaymentAssertSettled(hn *node.HarnessNode,
|
|
|
|
req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
|
|
|
|
|
|
|
|
return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_SUCCEEDED)
|
|
|
|
}
|
|
|
|
|
2022-08-03 23:37:43 +02:00
|
|
|
// OpenChannelRequest is used to open a channel using the method
|
|
|
|
// OpenMultiChannelsAsync.
|
|
|
|
type OpenChannelRequest struct {
|
|
|
|
// Local is the funding node.
|
|
|
|
Local *node.HarnessNode
|
|
|
|
|
|
|
|
// Remote is the receiving node.
|
|
|
|
Remote *node.HarnessNode
|
|
|
|
|
|
|
|
// Param is the open channel params.
|
|
|
|
Param OpenChannelParams
|
|
|
|
|
|
|
|
// stream is the client created after calling OpenChannel RPC.
|
|
|
|
stream rpc.OpenChanClient
|
|
|
|
|
|
|
|
// result is a channel used to send the channel point once the funding
|
|
|
|
// has succeeded.
|
|
|
|
result chan *lnrpc.ChannelPoint
|
|
|
|
}
|
|
|
|
|
|
|
|
// OpenMultiChannelsAsync takes a list of OpenChannelRequest and opens them in
|
|
|
|
// batch. The channel points are returned in same the order of the requests
|
|
|
|
// once all of the channel open succeeded.
|
|
|
|
//
|
|
|
|
// NOTE: compared to open multiple channel sequentially, this method will be
|
|
|
|
// faster as it doesn't need to mine 6 blocks for each channel open. However,
|
|
|
|
// it does make debugging the logs more difficult as messages are intertwined.
|
|
|
|
func (h *HarnessTest) OpenMultiChannelsAsync(
|
|
|
|
reqs []*OpenChannelRequest) []*lnrpc.ChannelPoint {
|
|
|
|
|
|
|
|
// openChannel opens a channel based on the request.
|
|
|
|
openChannel := func(req *OpenChannelRequest) {
|
|
|
|
stream := h.OpenChannelAssertStream(
|
|
|
|
req.Local, req.Remote, req.Param,
|
|
|
|
)
|
|
|
|
req.stream = stream
|
|
|
|
}
|
|
|
|
|
|
|
|
// assertChannelOpen is a helper closure that asserts a channel is
|
|
|
|
// open.
|
|
|
|
assertChannelOpen := func(req *OpenChannelRequest) {
|
|
|
|
// Wait for the channel open event from the stream.
|
|
|
|
cp := h.WaitForChannelOpenEvent(req.stream)
|
|
|
|
|
2023-08-11 06:53:47 +02:00
|
|
|
if !req.Param.Private {
|
|
|
|
// Check that both alice and bob have seen the channel
|
|
|
|
// from their channel watch request.
|
|
|
|
h.AssertTopologyChannelOpen(req.Local, cp)
|
|
|
|
h.AssertTopologyChannelOpen(req.Remote, cp)
|
|
|
|
}
|
2022-08-03 23:37:43 +02:00
|
|
|
|
|
|
|
// Finally, check that the channel can be seen in their
|
|
|
|
// ListChannels.
|
|
|
|
h.AssertChannelExists(req.Local, cp)
|
|
|
|
h.AssertChannelExists(req.Remote, cp)
|
|
|
|
|
|
|
|
req.result <- cp
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go through the requests and make the OpenChannel RPC call.
|
|
|
|
for _, r := range reqs {
|
|
|
|
openChannel(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mine one block to confirm all the funding transactions.
|
|
|
|
h.MineBlocksAndAssertNumTxes(1, len(reqs))
|
|
|
|
|
|
|
|
// Mine 5 more blocks so all the public channels are announced to the
|
|
|
|
// network.
|
|
|
|
h.MineBlocks(numBlocksOpenChannel - 1)
|
|
|
|
|
|
|
|
// Once the blocks are mined, we fire goroutines for each of the
|
|
|
|
// request to watch for the channel openning.
|
|
|
|
for _, r := range reqs {
|
|
|
|
r.result = make(chan *lnrpc.ChannelPoint, 1)
|
|
|
|
go assertChannelOpen(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, collect the results.
|
|
|
|
channelPoints := make([]*lnrpc.ChannelPoint, 0)
|
|
|
|
for _, r := range reqs {
|
|
|
|
select {
|
|
|
|
case cp := <-r.result:
|
|
|
|
channelPoints = append(channelPoints, cp)
|
|
|
|
|
2022-08-12 05:19:18 +02:00
|
|
|
case <-time.After(wait.ChannelOpenTimeout):
|
2022-08-03 23:37:43 +02:00
|
|
|
require.Failf(h, "timeout", "wait channel point "+
|
|
|
|
"timeout for channel %s=>%s", r.Local.Name(),
|
|
|
|
r.Remote.Name())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert that we have the expected num of channel points.
|
|
|
|
require.Len(h, channelPoints, len(reqs),
|
|
|
|
"returned channel points not match")
|
|
|
|
|
|
|
|
return channelPoints
|
|
|
|
}
|
2022-08-04 02:37:28 +02:00
|
|
|
|
|
|
|
// ReceiveInvoiceUpdate waits until a message is received on the subscribe
|
|
|
|
// invoice stream or the timeout is reached.
|
|
|
|
func (h *HarnessTest) ReceiveInvoiceUpdate(
|
|
|
|
stream rpc.InvoiceUpdateClient) *lnrpc.Invoice {
|
|
|
|
|
|
|
|
chanMsg := make(chan *lnrpc.Invoice)
|
|
|
|
errChan := make(chan error)
|
|
|
|
go func() {
|
|
|
|
// Consume one message. This will block until the message is
|
|
|
|
// received.
|
|
|
|
resp, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
errChan <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
chanMsg <- resp
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(DefaultTimeout):
|
|
|
|
require.Fail(h, "timeout", "timeout receiving invoice update")
|
|
|
|
|
|
|
|
case err := <-errChan:
|
|
|
|
require.Failf(h, "err from stream",
|
|
|
|
"received err from stream: %v", err)
|
|
|
|
|
|
|
|
case updateMsg := <-chanMsg:
|
|
|
|
return updateMsg
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-08-04 14:15:01 +02:00
|
|
|
|
|
|
|
// CalculateTxFee retrieves parent transactions and reconstructs the fee paid.
|
|
|
|
func (h *HarnessTest) CalculateTxFee(tx *wire.MsgTx) btcutil.Amount {
|
|
|
|
var balance btcutil.Amount
|
|
|
|
for _, in := range tx.TxIn {
|
|
|
|
parentHash := in.PreviousOutPoint.Hash
|
|
|
|
rawTx := h.Miner.GetRawTransaction(&parentHash)
|
|
|
|
parent := rawTx.MsgTx()
|
|
|
|
balance += btcutil.Amount(
|
|
|
|
parent.TxOut[in.PreviousOutPoint.Index].Value,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, out := range tx.TxOut {
|
|
|
|
balance -= btcutil.Amount(out.Value)
|
|
|
|
}
|
|
|
|
|
|
|
|
return balance
|
|
|
|
}
|
|
|
|
|
|
|
|
// CalculateTxesFeeRate takes a list of transactions and estimates the fee rate
|
|
|
|
// used to sweep them.
|
|
|
|
//
|
|
|
|
// NOTE: only used in current test file.
|
|
|
|
func (h *HarnessTest) CalculateTxesFeeRate(txns []*wire.MsgTx) int64 {
|
|
|
|
const scale = 1000
|
|
|
|
|
|
|
|
var totalWeight, totalFee int64
|
|
|
|
for _, tx := range txns {
|
|
|
|
utx := btcutil.NewTx(tx)
|
|
|
|
totalWeight += blockchain.GetTransactionWeight(utx)
|
|
|
|
|
|
|
|
fee := h.CalculateTxFee(tx)
|
|
|
|
totalFee += int64(fee)
|
|
|
|
}
|
|
|
|
feeRate := totalFee * scale / totalWeight
|
|
|
|
|
|
|
|
return feeRate
|
|
|
|
}
|
2022-08-04 16:19:46 +02:00
|
|
|
|
|
|
|
type SweptOutput struct {
|
|
|
|
OutPoint wire.OutPoint
|
|
|
|
SweepTx *wire.MsgTx
|
|
|
|
}
|
|
|
|
|
|
|
|
// FindCommitAndAnchor looks for a commitment sweep and anchor sweep in the
|
|
|
|
// mempool. Our anchor output is identified by having multiple inputs in its
|
|
|
|
// sweep transition, because we have to bring another input to add fees to the
|
|
|
|
// anchor. Note that the anchor swept output may be nil if the channel did not
|
|
|
|
// have anchors.
|
|
|
|
func (h *HarnessTest) FindCommitAndAnchor(sweepTxns []*wire.MsgTx,
|
|
|
|
closeTx string) (*SweptOutput, *SweptOutput) {
|
|
|
|
|
|
|
|
var commitSweep, anchorSweep *SweptOutput
|
|
|
|
|
|
|
|
for _, tx := range sweepTxns {
|
|
|
|
txHash := tx.TxHash()
|
|
|
|
sweepTx := h.Miner.GetRawTransaction(&txHash)
|
|
|
|
|
|
|
|
// We expect our commitment sweep to have a single input, and,
|
|
|
|
// our anchor sweep to have more inputs (because the wallet
|
|
|
|
// needs to add balance to the anchor amount). We find their
|
|
|
|
// sweep txids here to setup appropriate resolutions. We also
|
|
|
|
// need to find the outpoint for our resolution, which we do by
|
|
|
|
// matching the inputs to the sweep to the close transaction.
|
|
|
|
inputs := sweepTx.MsgTx().TxIn
|
|
|
|
if len(inputs) == 1 {
|
|
|
|
commitSweep = &SweptOutput{
|
|
|
|
OutPoint: inputs[0].PreviousOutPoint,
|
|
|
|
SweepTx: tx,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Since we have more than one input, we run through
|
|
|
|
// them to find the one whose previous outpoint matches
|
|
|
|
// the closing txid, which means this input is spending
|
|
|
|
// the close tx. This will be our anchor output.
|
|
|
|
for _, txin := range inputs {
|
|
|
|
op := txin.PreviousOutPoint.Hash.String()
|
|
|
|
if op == closeTx {
|
|
|
|
anchorSweep = &SweptOutput{
|
|
|
|
OutPoint: txin.PreviousOutPoint,
|
|
|
|
SweepTx: tx,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return commitSweep, anchorSweep
|
|
|
|
}
|
|
|
|
|
|
|
|
// AssertSweepFound looks up a sweep in a nodes list of broadcast sweeps and
|
|
|
|
// asserts it's found.
|
|
|
|
//
|
|
|
|
// NOTE: Does not account for node's internal state.
|
|
|
|
func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
|
2024-02-01 20:47:10 +01:00
|
|
|
sweep string, verbose bool, startHeight int32) {
|
2022-08-04 16:19:46 +02:00
|
|
|
|
|
|
|
// List all sweeps that alice's node had broadcast.
|
2024-02-01 20:47:10 +01:00
|
|
|
sweepResp := hn.RPC.ListSweeps(verbose, startHeight)
|
2022-08-04 16:19:46 +02:00
|
|
|
|
|
|
|
var found bool
|
|
|
|
if verbose {
|
|
|
|
found = findSweepInDetails(h, sweep, sweepResp)
|
|
|
|
} else {
|
|
|
|
found = findSweepInTxids(h, sweep, sweepResp)
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Truef(h, found, "%s: sweep: %v not found", sweep, hn.Name())
|
|
|
|
}
|
|
|
|
|
|
|
|
func findSweepInTxids(ht *HarnessTest, sweepTxid string,
|
|
|
|
sweepResp *walletrpc.ListSweepsResponse) bool {
|
|
|
|
|
|
|
|
sweepTxIDs := sweepResp.GetTransactionIds()
|
|
|
|
require.NotNil(ht, sweepTxIDs, "expected transaction ids")
|
|
|
|
require.Nil(ht, sweepResp.GetTransactionDetails())
|
|
|
|
|
|
|
|
// Check that the sweep tx we have just produced is present.
|
|
|
|
for _, tx := range sweepTxIDs.TransactionIds {
|
|
|
|
if tx == sweepTxid {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func findSweepInDetails(ht *HarnessTest, sweepTxid string,
|
|
|
|
sweepResp *walletrpc.ListSweepsResponse) bool {
|
|
|
|
|
|
|
|
sweepDetails := sweepResp.GetTransactionDetails()
|
|
|
|
require.NotNil(ht, sweepDetails, "expected transaction details")
|
|
|
|
require.Nil(ht, sweepResp.GetTransactionIds())
|
|
|
|
|
|
|
|
for _, tx := range sweepDetails.Transactions {
|
|
|
|
if tx.TxHash == sweepTxid {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
2022-08-07 01:53:43 +02:00
|
|
|
|
|
|
|
// ConnectMiner connects the miner with the chain backend in the network.
|
|
|
|
func (h *HarnessTest) ConnectMiner() {
|
|
|
|
err := h.manager.chainBackend.ConnectMiner()
|
|
|
|
require.NoError(h, err, "failed to connect miner")
|
|
|
|
}
|
|
|
|
|
|
|
|
// DisconnectMiner removes the connection between the miner and the chain
|
|
|
|
// backend in the network.
|
|
|
|
func (h *HarnessTest) DisconnectMiner() {
|
|
|
|
err := h.manager.chainBackend.DisconnectMiner()
|
|
|
|
require.NoError(h, err, "failed to disconnect miner")
|
|
|
|
}
|
2022-08-08 19:47:50 +02:00
|
|
|
|
|
|
|
// QueryRoutesAndRetry attempts to keep querying a route until timeout is
|
|
|
|
// reached.
|
|
|
|
//
|
|
|
|
// NOTE: when a channel is opened, we may need to query multiple times to get
|
|
|
|
// it in our QueryRoutes RPC. This happens even after we check the channel is
|
|
|
|
// heard by the node using ht.AssertChannelOpen. Deep down, this is because our
|
|
|
|
// GraphTopologySubscription and QueryRoutes give different results regarding a
|
|
|
|
// specific channel, with the formal reporting it being open while the latter
|
|
|
|
// not, resulting GraphTopologySubscription acting "faster" than QueryRoutes.
|
|
|
|
// TODO(yy): make sure related subsystems share the same view on a given
|
|
|
|
// channel.
|
|
|
|
func (h *HarnessTest) QueryRoutesAndRetry(hn *node.HarnessNode,
|
|
|
|
req *lnrpc.QueryRoutesRequest) *lnrpc.QueryRoutesResponse {
|
|
|
|
|
|
|
|
var routes *lnrpc.QueryRoutesResponse
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
ctxt, cancel := context.WithCancel(h.runCtx)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
resp, err := hn.RPC.LN.QueryRoutes(ctxt, req)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("%s: failed to query route: %w",
|
|
|
|
hn.Name(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
routes = resp
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, DefaultTimeout)
|
|
|
|
|
|
|
|
require.NoError(h, err, "timeout querying routes")
|
|
|
|
|
|
|
|
return routes
|
|
|
|
}
|
2022-08-10 23:35:22 +02:00
|
|
|
|
|
|
|
// ReceiveHtlcInterceptor waits until a message is received on the htlc
|
|
|
|
// interceptor stream or the timeout is reached.
|
|
|
|
func (h *HarnessTest) ReceiveHtlcInterceptor(
|
|
|
|
stream rpc.InterceptorClient) *routerrpc.ForwardHtlcInterceptRequest {
|
|
|
|
|
|
|
|
chanMsg := make(chan *routerrpc.ForwardHtlcInterceptRequest)
|
|
|
|
errChan := make(chan error)
|
|
|
|
go func() {
|
|
|
|
// Consume one message. This will block until the message is
|
|
|
|
// received.
|
|
|
|
resp, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
errChan <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
chanMsg <- resp
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(DefaultTimeout):
|
|
|
|
require.Fail(h, "timeout", "timeout intercepting htlc")
|
|
|
|
|
|
|
|
case err := <-errChan:
|
|
|
|
require.Failf(h, "err from stream",
|
|
|
|
"received err from stream: %v", err)
|
|
|
|
|
|
|
|
case updateMsg := <-chanMsg:
|
|
|
|
return updateMsg
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-08-11 05:39:16 +02:00
|
|
|
|
|
|
|
// ReceiveChannelEvent waits until a message is received from the
|
|
|
|
// ChannelEventsClient stream or the timeout is reached.
|
|
|
|
func (h *HarnessTest) ReceiveChannelEvent(
|
|
|
|
stream rpc.ChannelEventsClient) *lnrpc.ChannelEventUpdate {
|
|
|
|
|
|
|
|
chanMsg := make(chan *lnrpc.ChannelEventUpdate)
|
|
|
|
errChan := make(chan error)
|
|
|
|
go func() {
|
|
|
|
// Consume one message. This will block until the message is
|
|
|
|
// received.
|
|
|
|
resp, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
errChan <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
chanMsg <- resp
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(DefaultTimeout):
|
|
|
|
require.Fail(h, "timeout", "timeout intercepting htlc")
|
|
|
|
|
|
|
|
case err := <-errChan:
|
|
|
|
require.Failf(h, "err from stream",
|
|
|
|
"received err from stream: %v", err)
|
|
|
|
|
|
|
|
case updateMsg := <-chanMsg:
|
|
|
|
return updateMsg
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-08-11 05:59:14 +02:00
|
|
|
|
|
|
|
// GetOutputIndex returns the output index of the given address in the given
|
|
|
|
// transaction.
|
|
|
|
func (h *HarnessTest) GetOutputIndex(txid *chainhash.Hash, addr string) int {
|
|
|
|
// We'll then extract the raw transaction from the mempool in order to
|
|
|
|
// determine the index of the p2tr output.
|
|
|
|
tx := h.Miner.GetRawTransaction(txid)
|
|
|
|
|
|
|
|
p2trOutputIndex := -1
|
|
|
|
for i, txOut := range tx.MsgTx().TxOut {
|
|
|
|
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
|
|
|
|
txOut.PkScript, h.Miner.ActiveNet,
|
|
|
|
)
|
|
|
|
require.NoError(h, err)
|
|
|
|
|
|
|
|
if addrs[0].String() == addr {
|
|
|
|
p2trOutputIndex = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.Greater(h, p2trOutputIndex, -1)
|
|
|
|
|
|
|
|
return p2trOutputIndex
|
|
|
|
}
|