2017-11-17 00:37:08 +01:00
|
|
|
package lntest
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
import (
|
|
|
|
"bytes"
|
2019-09-29 00:43:42 +02:00
|
|
|
"context"
|
2021-06-14 16:24:02 +02:00
|
|
|
"crypto/rand"
|
2017-11-03 19:52:02 +01:00
|
|
|
"encoding/hex"
|
2021-08-08 11:16:49 +02:00
|
|
|
"encoding/json"
|
2017-11-03 19:52:02 +01:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"os/exec"
|
2021-09-08 16:31:27 +02:00
|
|
|
"path"
|
2017-11-03 19:52:02 +01:00
|
|
|
"path/filepath"
|
2021-08-04 14:55:17 +02:00
|
|
|
"strings"
|
2017-11-03 19:52:02 +01:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2018-06-05 03:34:16 +02:00
|
|
|
"github.com/btcsuite/btcd/chaincfg"
|
2020-09-30 00:15:05 +02:00
|
|
|
"github.com/btcsuite/btcd/integration/rpctest"
|
|
|
|
"github.com/btcsuite/btcd/rpcclient"
|
2018-06-05 03:34:16 +02:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
2018-09-07 01:49:23 +02:00
|
|
|
"github.com/btcsuite/btcutil"
|
2021-06-14 16:24:02 +02:00
|
|
|
"github.com/jackc/pgx/v4/pgxpool"
|
2019-03-11 00:32:59 +01:00
|
|
|
"github.com/lightningnetwork/lnd/chanbackup"
|
2018-07-31 09:17:17 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2019-05-06 12:19:36 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
|
2019-03-19 17:09:27 +01:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
2020-07-28 02:25:33 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
|
2019-06-04 01:12:20 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
2019-06-21 01:55:13 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc"
|
2019-06-08 02:46:31 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
|
2019-09-19 21:46:29 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
2018-07-31 09:17:17 +02:00
|
|
|
"github.com/lightningnetwork/lnd/macaroons"
|
2019-06-04 01:12:20 +02:00
|
|
|
"google.golang.org/grpc"
|
2021-09-18 06:10:01 +02:00
|
|
|
"google.golang.org/grpc/codes"
|
2019-06-04 01:12:20 +02:00
|
|
|
"google.golang.org/grpc/credentials"
|
2021-09-18 06:10:01 +02:00
|
|
|
"google.golang.org/grpc/status"
|
2019-06-08 02:46:31 +02:00
|
|
|
"gopkg.in/macaroon.v2"
|
2017-11-03 19:52:02 +01:00
|
|
|
)
|
|
|
|
|
2019-04-15 16:03:15 +02:00
|
|
|
const (
|
2019-11-29 16:22:38 +01:00
|
|
|
// logPubKeyBytes is the number of bytes of the node's PubKey that will
|
|
|
|
// be appended to the log file name. The whole PubKey is too long and
|
|
|
|
// not really necessary to quickly identify what node produced which
|
|
|
|
// log file.
|
2018-04-27 10:42:04 +02:00
|
|
|
logPubKeyBytes = 4
|
|
|
|
|
2017-11-17 00:37:08 +01:00
|
|
|
// trickleDelay is the amount of time in milliseconds between each
|
|
|
|
// release of announcements by AuthenticatedGossiper to the network.
|
|
|
|
trickleDelay = 50
|
2020-12-03 11:30:23 +01:00
|
|
|
|
2021-06-14 16:24:02 +02:00
|
|
|
postgresDsn = "postgres://postgres:postgres@localhost:6432/%s?sslmode=disable"
|
2021-09-14 08:15:04 +02:00
|
|
|
|
|
|
|
// commitInterval specifies the maximum interval the graph database
|
|
|
|
// will wait between attempting to flush a batch of modifications to
|
|
|
|
// disk(db.batch-commit-interval).
|
|
|
|
commitInterval = 10 * time.Millisecond
|
2017-11-17 00:37:08 +01:00
|
|
|
)
|
|
|
|
|
2019-04-15 16:03:15 +02:00
|
|
|
var (
|
|
|
|
// numActiveNodes is the number of active nodes within the test network.
|
2019-11-21 13:56:48 +01:00
|
|
|
numActiveNodes = 0
|
|
|
|
numActiveNodesMtx sync.Mutex
|
2019-04-15 16:03:15 +02:00
|
|
|
)
|
|
|
|
|
2021-06-14 16:24:02 +02:00
|
|
|
func postgresDatabaseDsn(dbName string) string {
|
|
|
|
return fmt.Sprintf(postgresDsn, dbName)
|
|
|
|
}
|
|
|
|
|
2019-11-29 16:22:38 +01:00
|
|
|
// generateListeningPorts returns four ints representing ports to listen on
|
|
|
|
// designated for the current lightning network test. This returns the next
|
|
|
|
// available ports for the p2p, rpc, rest and profiling services.
|
2021-05-14 10:09:05 +02:00
|
|
|
func generateListeningPorts(cfg *NodeConfig) {
|
|
|
|
if cfg.P2PPort == 0 {
|
|
|
|
cfg.P2PPort = NextAvailablePort()
|
|
|
|
}
|
|
|
|
if cfg.RPCPort == 0 {
|
|
|
|
cfg.RPCPort = NextAvailablePort()
|
|
|
|
}
|
|
|
|
if cfg.RESTPort == 0 {
|
|
|
|
cfg.RESTPort = NextAvailablePort()
|
|
|
|
}
|
|
|
|
if cfg.ProfilePort == 0 {
|
|
|
|
cfg.ProfilePort = NextAvailablePort()
|
|
|
|
}
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2018-12-14 09:24:00 +01:00
|
|
|
// BackendConfig is an interface that abstracts away the specific chain backend
|
|
|
|
// node implementation.
|
|
|
|
type BackendConfig interface {
|
|
|
|
// GenArgs returns the arguments needed to be passed to LND at startup
|
|
|
|
// for using this node as a chain backend.
|
|
|
|
GenArgs() []string
|
|
|
|
|
2019-05-24 14:17:48 +02:00
|
|
|
// ConnectMiner is called to establish a connection to the test miner.
|
|
|
|
ConnectMiner() error
|
|
|
|
|
2020-03-27 09:59:18 +01:00
|
|
|
// DisconnectMiner is called to disconnect the miner.
|
2019-05-24 14:17:48 +02:00
|
|
|
DisconnectMiner() error
|
2019-05-24 14:17:49 +02:00
|
|
|
|
|
|
|
// Name returns the name of the backend type.
|
|
|
|
Name() string
|
2018-12-14 09:24:00 +01:00
|
|
|
}
|
|
|
|
|
2019-12-17 10:48:17 +01:00
|
|
|
type NodeConfig struct {
|
2020-10-29 15:37:17 +01:00
|
|
|
Name string
|
|
|
|
|
|
|
|
// LogFilenamePrefix is is used to prefix node log files. Can be used
|
|
|
|
// to store the current test case for simpler postmortem debugging.
|
|
|
|
LogFilenamePrefix string
|
|
|
|
|
2018-12-14 09:24:00 +01:00
|
|
|
BackendCfg BackendConfig
|
|
|
|
NetParams *chaincfg.Params
|
|
|
|
BaseDir string
|
|
|
|
ExtraArgs []string
|
2017-11-03 22:06:07 +01:00
|
|
|
|
2018-03-21 23:46:37 +01:00
|
|
|
DataDir string
|
|
|
|
LogDir string
|
|
|
|
TLSCertPath string
|
|
|
|
TLSKeyPath string
|
|
|
|
AdminMacPath string
|
|
|
|
ReadMacPath string
|
|
|
|
InvoiceMacPath string
|
|
|
|
|
2019-03-11 00:32:18 +01:00
|
|
|
HasSeed bool
|
|
|
|
Password []byte
|
2018-04-03 01:57:25 +02:00
|
|
|
|
2019-04-15 16:03:15 +02:00
|
|
|
P2PPort int
|
|
|
|
RPCPort int
|
|
|
|
RESTPort int
|
|
|
|
ProfilePort int
|
2019-12-12 11:06:00 +01:00
|
|
|
|
|
|
|
AcceptKeySend bool
|
2021-05-06 18:19:05 +02:00
|
|
|
AcceptAMP bool
|
2020-09-10 15:48:39 +02:00
|
|
|
|
|
|
|
FeeURL string
|
2020-06-22 19:36:12 +02:00
|
|
|
|
2021-06-14 16:24:02 +02:00
|
|
|
DbBackend DatabaseBackend
|
|
|
|
PostgresDsn string
|
2017-11-03 22:06:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-17 10:48:17 +01:00
|
|
|
func (cfg NodeConfig) P2PAddr() string {
|
2020-12-03 11:30:23 +01:00
|
|
|
return fmt.Sprintf(listenerFormat, cfg.P2PPort)
|
2017-11-03 22:06:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-17 10:48:17 +01:00
|
|
|
func (cfg NodeConfig) RPCAddr() string {
|
2020-12-03 11:30:23 +01:00
|
|
|
return fmt.Sprintf(listenerFormat, cfg.RPCPort)
|
2017-11-03 22:06:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-17 10:48:17 +01:00
|
|
|
func (cfg NodeConfig) RESTAddr() string {
|
2020-12-03 11:30:23 +01:00
|
|
|
return fmt.Sprintf(listenerFormat, cfg.RESTPort)
|
2017-12-17 18:28:38 +01:00
|
|
|
}
|
|
|
|
|
2020-10-26 16:36:09 +01:00
|
|
|
// DBDir returns the holding directory path of the graph database.
|
|
|
|
func (cfg NodeConfig) DBDir() string {
|
|
|
|
return filepath.Join(cfg.DataDir, "graph", cfg.NetParams.Name)
|
|
|
|
}
|
|
|
|
|
2019-12-17 10:48:17 +01:00
|
|
|
func (cfg NodeConfig) DBPath() string {
|
2020-10-26 16:36:09 +01:00
|
|
|
return filepath.Join(cfg.DBDir(), "channel.db")
|
2017-11-03 22:06:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-17 10:48:17 +01:00
|
|
|
func (cfg NodeConfig) ChanBackupPath() string {
|
2019-03-11 00:32:59 +01:00
|
|
|
return filepath.Join(
|
|
|
|
cfg.DataDir, "chain", "bitcoin",
|
|
|
|
fmt.Sprintf(
|
|
|
|
"%v/%v", cfg.NetParams.Name,
|
|
|
|
chanbackup.DefaultBackupFileName,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2017-11-03 22:06:07 +01:00
|
|
|
// genArgs generates a slice of command line arguments from the lightning node
|
|
|
|
// config struct.
|
2019-12-17 10:48:17 +01:00
|
|
|
func (cfg NodeConfig) genArgs() []string {
|
2017-11-03 22:06:07 +01:00
|
|
|
var args []string
|
|
|
|
|
|
|
|
switch cfg.NetParams {
|
|
|
|
case &chaincfg.TestNet3Params:
|
|
|
|
args = append(args, "--bitcoin.testnet")
|
|
|
|
case &chaincfg.SimNetParams:
|
|
|
|
args = append(args, "--bitcoin.simnet")
|
|
|
|
case &chaincfg.RegressionNetParams:
|
|
|
|
args = append(args, "--bitcoin.regtest")
|
|
|
|
}
|
|
|
|
|
2018-12-14 09:24:00 +01:00
|
|
|
backendArgs := cfg.BackendCfg.GenArgs()
|
|
|
|
args = append(args, backendArgs...)
|
2021-09-18 07:00:39 +02:00
|
|
|
|
|
|
|
nodeArgs := []string{
|
|
|
|
"--bitcoin.active",
|
|
|
|
"--nobootstrap",
|
|
|
|
"--debuglevel=debug",
|
|
|
|
"--bitcoin.defaultchanconfs=1",
|
|
|
|
fmt.Sprintf("--db.batch-commit-interval=%v", commitInterval),
|
|
|
|
fmt.Sprintf("--bitcoin.defaultremotedelay=%v", DefaultCSV),
|
|
|
|
fmt.Sprintf("--rpclisten=%v", cfg.RPCAddr()),
|
|
|
|
fmt.Sprintf("--restlisten=%v", cfg.RESTAddr()),
|
|
|
|
fmt.Sprintf("--restcors=https://%v", cfg.RESTAddr()),
|
|
|
|
fmt.Sprintf("--listen=%v", cfg.P2PAddr()),
|
|
|
|
fmt.Sprintf("--externalip=%v", cfg.P2PAddr()),
|
|
|
|
fmt.Sprintf("--logdir=%v", cfg.LogDir),
|
|
|
|
fmt.Sprintf("--datadir=%v", cfg.DataDir),
|
|
|
|
fmt.Sprintf("--tlscertpath=%v", cfg.TLSCertPath),
|
|
|
|
fmt.Sprintf("--tlskeypath=%v", cfg.TLSKeyPath),
|
|
|
|
fmt.Sprintf("--configfile=%v", cfg.DataDir),
|
|
|
|
fmt.Sprintf("--adminmacaroonpath=%v", cfg.AdminMacPath),
|
|
|
|
fmt.Sprintf("--readonlymacaroonpath=%v", cfg.ReadMacPath),
|
|
|
|
fmt.Sprintf("--invoicemacaroonpath=%v", cfg.InvoiceMacPath),
|
|
|
|
fmt.Sprintf("--trickledelay=%v", trickleDelay),
|
|
|
|
fmt.Sprintf("--profile=%d", cfg.ProfilePort),
|
|
|
|
fmt.Sprintf("--caches.rpc-graph-cache-duration=0"),
|
|
|
|
}
|
|
|
|
args = append(args, nodeArgs...)
|
2017-11-03 22:06:07 +01:00
|
|
|
|
2018-04-03 01:57:25 +02:00
|
|
|
if !cfg.HasSeed {
|
2018-09-05 03:49:37 +02:00
|
|
|
args = append(args, "--noseedbackup")
|
2018-04-03 01:57:25 +02:00
|
|
|
}
|
|
|
|
|
2017-11-03 22:06:07 +01:00
|
|
|
if cfg.ExtraArgs != nil {
|
|
|
|
args = append(args, cfg.ExtraArgs...)
|
|
|
|
}
|
|
|
|
|
2019-12-12 11:06:00 +01:00
|
|
|
if cfg.AcceptKeySend {
|
2020-01-16 13:13:59 +01:00
|
|
|
args = append(args, "--accept-keysend")
|
2019-12-12 11:06:00 +01:00
|
|
|
}
|
|
|
|
|
2021-05-06 18:19:05 +02:00
|
|
|
if cfg.AcceptAMP {
|
|
|
|
args = append(args, "--accept-amp")
|
|
|
|
}
|
|
|
|
|
2021-06-14 16:24:02 +02:00
|
|
|
switch cfg.DbBackend {
|
|
|
|
case BackendEtcd:
|
2020-06-22 19:36:12 +02:00
|
|
|
args = append(args, "--db.backend=etcd")
|
|
|
|
args = append(args, "--db.etcd.embedded")
|
2021-01-06 19:42:08 +01:00
|
|
|
args = append(
|
|
|
|
args, fmt.Sprintf(
|
|
|
|
"--db.etcd.embedded_client_port=%v",
|
2021-03-04 23:15:04 +01:00
|
|
|
NextAvailablePort(),
|
2021-01-06 19:42:08 +01:00
|
|
|
),
|
|
|
|
)
|
|
|
|
args = append(
|
|
|
|
args, fmt.Sprintf(
|
|
|
|
"--db.etcd.embedded_peer_port=%v",
|
2021-03-04 23:15:04 +01:00
|
|
|
NextAvailablePort(),
|
2021-01-06 19:42:08 +01:00
|
|
|
),
|
|
|
|
)
|
2021-09-08 16:31:27 +02:00
|
|
|
args = append(
|
|
|
|
args, fmt.Sprintf(
|
|
|
|
"--db.etcd.embedded_log_file=%v",
|
|
|
|
path.Join(cfg.LogDir, "etcd.log"),
|
|
|
|
),
|
|
|
|
)
|
2021-06-14 16:24:02 +02:00
|
|
|
|
|
|
|
case BackendPostgres:
|
|
|
|
args = append(args, "--db.backend=postgres")
|
|
|
|
args = append(args, "--db.postgres.dsn="+cfg.PostgresDsn)
|
2020-06-22 19:36:12 +02:00
|
|
|
}
|
|
|
|
|
2020-09-10 15:48:39 +02:00
|
|
|
if cfg.FeeURL != "" {
|
|
|
|
args = append(args, "--feeurl="+cfg.FeeURL)
|
|
|
|
}
|
|
|
|
|
2017-11-03 22:06:07 +01:00
|
|
|
return args
|
|
|
|
}
|
|
|
|
|
2021-08-08 11:08:37 +02:00
|
|
|
// policyUpdateMap defines a type to store channel policy updates. It has the
|
|
|
|
// format,
|
|
|
|
// {
|
|
|
|
// "chanPoint1": {
|
|
|
|
// "advertisingNode1": [
|
|
|
|
// policy1, policy2, ...
|
|
|
|
// ],
|
|
|
|
// "advertisingNode2": [
|
|
|
|
// policy1, policy2, ...
|
|
|
|
// ]
|
|
|
|
// },
|
|
|
|
// "chanPoint2": ...
|
|
|
|
// }
|
|
|
|
type policyUpdateMap map[string]map[string][]*lnrpc.RoutingPolicy
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
// HarnessNode represents an instance of lnd running within our test network
|
2017-12-18 03:40:05 +01:00
|
|
|
// harness. Each HarnessNode instance also fully embeds an RPC client in
|
2017-11-17 00:37:08 +01:00
|
|
|
// order to pragmatically drive the node.
|
2017-11-03 19:52:02 +01:00
|
|
|
type HarnessNode struct {
|
2019-12-17 10:48:17 +01:00
|
|
|
Cfg *NodeConfig
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
// NodeID is a unique identifier for the node within a NetworkHarness.
|
|
|
|
NodeID int
|
2017-11-17 00:37:08 +01:00
|
|
|
|
|
|
|
// PubKey is the serialized compressed identity public key of the node.
|
|
|
|
// This field will only be populated once the node itself has been
|
|
|
|
// started via the start() method.
|
|
|
|
PubKey [33]byte
|
|
|
|
PubKeyStr string
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// rpc holds a list of RPC clients.
|
|
|
|
rpc *RPCClients
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2021-09-18 06:10:57 +02:00
|
|
|
// chanWatchRequests receives a request for watching a particular event
|
|
|
|
// for a given channel.
|
2017-11-17 00:37:08 +01:00
|
|
|
chanWatchRequests chan *chanWatchRequest
|
|
|
|
|
2018-08-16 11:00:52 +02:00
|
|
|
// For each outpoint, we'll track an integer which denotes the number of
|
|
|
|
// edges seen for that channel within the network. When this number
|
|
|
|
// reaches 2, then it means that both edge advertisements has propagated
|
|
|
|
// through the network.
|
2021-08-08 11:19:25 +02:00
|
|
|
openChans map[wire.OutPoint]int
|
|
|
|
openChanWatchers map[wire.OutPoint][]chan struct{}
|
2018-08-16 11:00:52 +02:00
|
|
|
|
2021-08-08 11:20:04 +02:00
|
|
|
closedChans map[wire.OutPoint]struct{}
|
|
|
|
closeChanWatchers map[wire.OutPoint][]chan struct{}
|
2018-08-16 11:00:52 +02:00
|
|
|
|
2021-08-08 11:08:37 +02:00
|
|
|
// policyUpdates stores a slice of seen polices by each advertising
|
|
|
|
// node and the outpoint.
|
|
|
|
policyUpdates policyUpdateMap
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// backupDbDir is the path where a database backup is stored, if any.
|
|
|
|
backupDbDir string
|
|
|
|
|
|
|
|
// postgresDbName is the name of the postgres database where lnd data is
|
|
|
|
// stored in.
|
|
|
|
postgresDbName string
|
|
|
|
|
2021-09-18 06:10:01 +02:00
|
|
|
// runCtx is a context with cancel method. It's used to signal when the
|
|
|
|
// node needs to quit, and used as the parent context when spawning
|
|
|
|
// children contexts for RPC requests.
|
|
|
|
runCtx context.Context
|
|
|
|
cancel context.CancelFunc
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
wg sync.WaitGroup
|
|
|
|
cmd *exec.Cmd
|
|
|
|
logFile *os.File
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// TODO(yy): remove
|
2017-11-17 00:37:08 +01:00
|
|
|
lnrpc.LightningClient
|
2018-04-03 01:57:25 +02:00
|
|
|
lnrpc.WalletUnlockerClient
|
2019-05-06 12:19:36 +02:00
|
|
|
invoicesrpc.InvoicesClient
|
2021-09-18 07:45:55 +02:00
|
|
|
SignerClient signrpc.SignerClient
|
2019-06-21 01:55:13 +02:00
|
|
|
RouterClient routerrpc.RouterClient
|
|
|
|
WalletKitClient walletrpc.WalletKitClient
|
2019-06-08 02:46:31 +02:00
|
|
|
Watchtower watchtowerrpc.WatchtowerClient
|
|
|
|
WatchtowerClient wtclientrpc.WatchtowerClientClient
|
2021-09-11 19:58:05 +02:00
|
|
|
StateClient lnrpc.StateClient
|
2021-09-18 07:45:55 +02:00
|
|
|
}
|
2021-06-16 07:57:07 +02:00
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// RPCClients wraps a list of RPC clients into a single struct for easier
|
|
|
|
// access.
|
|
|
|
type RPCClients struct {
|
|
|
|
// conn is the underlying connection to the grpc endpoint of the node.
|
|
|
|
conn *grpc.ClientConn
|
2021-06-14 16:24:02 +02:00
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
LN lnrpc.LightningClient
|
|
|
|
WalletUnlocker lnrpc.WalletUnlockerClient
|
|
|
|
Invoice invoicesrpc.InvoicesClient
|
|
|
|
Signer signrpc.SignerClient
|
|
|
|
Router routerrpc.RouterClient
|
|
|
|
WalletKit walletrpc.WalletKitClient
|
|
|
|
Watchtower watchtowerrpc.WatchtowerClient
|
|
|
|
WatchtowerClient wtclientrpc.WatchtowerClientClient
|
|
|
|
State lnrpc.StateClient
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
// Assert *HarnessNode implements the lnrpc.LightningClient interface.
|
|
|
|
var _ lnrpc.LightningClient = (*HarnessNode)(nil)
|
2018-04-03 01:57:25 +02:00
|
|
|
var _ lnrpc.WalletUnlockerClient = (*HarnessNode)(nil)
|
2019-05-06 12:19:36 +02:00
|
|
|
var _ invoicesrpc.InvoicesClient = (*HarnessNode)(nil)
|
2017-11-03 19:52:02 +01:00
|
|
|
|
2021-09-18 07:00:39 +02:00
|
|
|
// nextNodeID generates a unique sequence to be used as the node's ID.
|
|
|
|
func nextNodeID() int {
|
|
|
|
numActiveNodesMtx.Lock()
|
|
|
|
defer numActiveNodesMtx.Unlock()
|
|
|
|
nodeNum := numActiveNodes
|
|
|
|
numActiveNodes++
|
|
|
|
|
|
|
|
return nodeNum
|
|
|
|
}
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
// newNode creates a new test lightning node instance from the passed config.
|
2019-12-17 10:48:17 +01:00
|
|
|
func newNode(cfg NodeConfig) (*HarnessNode, error) {
|
2017-11-03 22:06:07 +01:00
|
|
|
if cfg.BaseDir == "" {
|
|
|
|
var err error
|
|
|
|
cfg.BaseDir, err = ioutil.TempDir("", "lndtest-node")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
2017-11-03 22:06:07 +01:00
|
|
|
cfg.DataDir = filepath.Join(cfg.BaseDir, "data")
|
|
|
|
cfg.LogDir = filepath.Join(cfg.BaseDir, "log")
|
2017-11-17 00:37:08 +01:00
|
|
|
cfg.TLSCertPath = filepath.Join(cfg.DataDir, "tls.cert")
|
|
|
|
cfg.TLSKeyPath = filepath.Join(cfg.DataDir, "tls.key")
|
2019-12-17 15:01:36 +01:00
|
|
|
|
|
|
|
networkDir := filepath.Join(
|
|
|
|
cfg.DataDir, "chain", "bitcoin", cfg.NetParams.Name,
|
|
|
|
)
|
|
|
|
cfg.AdminMacPath = filepath.Join(networkDir, "admin.macaroon")
|
|
|
|
cfg.ReadMacPath = filepath.Join(networkDir, "readonly.macaroon")
|
|
|
|
cfg.InvoiceMacPath = filepath.Join(networkDir, "invoice.macaroon")
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2021-05-14 10:09:05 +02:00
|
|
|
generateListeningPorts(&cfg)
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2020-01-16 13:13:59 +01:00
|
|
|
// Run all tests with accept keysend. The keysend code is very isolated
|
|
|
|
// and it is highly unlikely that it would affect regular itests when
|
|
|
|
// enabled.
|
2019-12-12 11:06:00 +01:00
|
|
|
cfg.AcceptKeySend = true
|
|
|
|
|
2021-06-14 16:24:02 +02:00
|
|
|
// Create temporary database.
|
|
|
|
var dbName string
|
|
|
|
if cfg.DbBackend == BackendPostgres {
|
|
|
|
var err error
|
|
|
|
dbName, err = createTempPgDb()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cfg.PostgresDsn = postgresDatabaseDsn(dbName)
|
|
|
|
}
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
return &HarnessNode{
|
2019-12-17 10:48:17 +01:00
|
|
|
Cfg: &cfg,
|
2021-09-18 07:00:39 +02:00
|
|
|
NodeID: nextNodeID(),
|
2017-11-17 00:37:08 +01:00
|
|
|
chanWatchRequests: make(chan *chanWatchRequest),
|
2018-08-16 11:00:52 +02:00
|
|
|
openChans: make(map[wire.OutPoint]int),
|
2021-08-08 11:19:25 +02:00
|
|
|
openChanWatchers: make(map[wire.OutPoint][]chan struct{}),
|
2018-08-16 11:00:52 +02:00
|
|
|
|
2021-08-08 11:20:04 +02:00
|
|
|
closedChans: make(map[wire.OutPoint]struct{}),
|
|
|
|
closeChanWatchers: make(map[wire.OutPoint][]chan struct{}),
|
2021-08-08 11:08:37 +02:00
|
|
|
|
|
|
|
policyUpdates: policyUpdateMap{},
|
2021-06-14 16:24:02 +02:00
|
|
|
|
|
|
|
postgresDbName: dbName,
|
2017-11-17 00:37:08 +01:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-06-14 16:24:02 +02:00
|
|
|
func createTempPgDb() (string, error) {
|
|
|
|
// Create random database name.
|
|
|
|
randBytes := make([]byte, 8)
|
|
|
|
_, err := rand.Read(randBytes)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
dbName := "itest_" + hex.EncodeToString(randBytes)
|
|
|
|
|
|
|
|
// Create database.
|
|
|
|
err = executePgQuery("CREATE DATABASE " + dbName)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return dbName, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func executePgQuery(query string) error {
|
|
|
|
pool, err := pgxpool.Connect(
|
|
|
|
context.Background(),
|
|
|
|
postgresDatabaseDsn("postgres"),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to connect to database: %v", err)
|
|
|
|
}
|
|
|
|
defer pool.Close()
|
|
|
|
|
|
|
|
_, err = pool.Exec(context.Background(), query)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-08-24 01:57:13 +02:00
|
|
|
// NewMiner creates a new miner using btcd backend. The baseLogDir specifies
|
|
|
|
// the miner node's log dir. When tests are finished, during clean up, its log
|
|
|
|
// files, including any compressed log files from logrotate, are copied to
|
|
|
|
// logDir as logFilename.
|
|
|
|
func NewMiner(baseLogDir, logFilename string, netParams *chaincfg.Params,
|
2020-12-03 11:30:22 +01:00
|
|
|
handler *rpcclient.NotificationHandlers,
|
|
|
|
btcdBinary string) (*rpctest.Harness, func() error, error) {
|
2020-09-30 00:15:05 +02:00
|
|
|
|
|
|
|
args := []string{
|
|
|
|
"--rejectnonstd",
|
|
|
|
"--txindex",
|
|
|
|
"--nowinservice",
|
|
|
|
"--nobanning",
|
|
|
|
"--debuglevel=debug",
|
2021-08-24 01:57:13 +02:00
|
|
|
"--logdir=" + baseLogDir,
|
2020-09-30 00:15:05 +02:00
|
|
|
"--trickleinterval=100ms",
|
2021-10-01 10:25:48 +02:00
|
|
|
// Don't disconnect if a reply takes too long.
|
|
|
|
"--nostalldetect",
|
2020-09-30 00:15:05 +02:00
|
|
|
}
|
|
|
|
|
2020-12-03 11:30:22 +01:00
|
|
|
miner, err := rpctest.New(netParams, handler, args, btcdBinary)
|
2020-09-30 00:15:05 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf(
|
|
|
|
"unable to create mining node: %v", err,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanUp := func() error {
|
|
|
|
if err := miner.TearDown(); err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"failed to tear down miner, got error: %s", err,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-08-24 01:57:13 +02:00
|
|
|
// After shutting down the miner, we'll make a copy of
|
|
|
|
// the log files before deleting the temporary log dir.
|
|
|
|
logDir := fmt.Sprintf("%s/%s", baseLogDir, netParams.Name)
|
|
|
|
files, err := ioutil.ReadDir(logDir)
|
2020-09-30 00:15:05 +02:00
|
|
|
if err != nil {
|
2021-08-24 01:57:13 +02:00
|
|
|
return fmt.Errorf("unable to read log directory: %v", err)
|
2020-09-30 00:15:05 +02:00
|
|
|
}
|
|
|
|
|
2021-08-24 01:57:13 +02:00
|
|
|
for _, file := range files {
|
|
|
|
logFile := fmt.Sprintf(
|
|
|
|
"%s/%s", logDir, file.Name(),
|
|
|
|
)
|
|
|
|
newFilename := strings.Replace(file.Name(), "btcd.log", logFilename, 1)
|
|
|
|
copyPath := fmt.Sprintf(
|
|
|
|
"%s/../%s", baseLogDir, newFilename,
|
|
|
|
)
|
|
|
|
|
|
|
|
err := CopyFile(filepath.Clean(copyPath), logFile)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to copy file: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = os.RemoveAll(baseLogDir); err != nil {
|
2020-09-30 00:15:05 +02:00
|
|
|
return fmt.Errorf(
|
2021-08-24 01:57:13 +02:00
|
|
|
"cannot remove dir %s: %v", baseLogDir, err,
|
2020-09-30 00:15:05 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return miner, cleanUp, nil
|
|
|
|
}
|
|
|
|
|
2021-08-08 11:16:49 +02:00
|
|
|
// String gives the internal state of the node which is useful for debugging.
|
|
|
|
func (hn *HarnessNode) String() string {
|
|
|
|
type nodeCfg struct {
|
|
|
|
LogFilenamePrefix string
|
|
|
|
ExtraArgs []string
|
|
|
|
HasSeed bool
|
|
|
|
P2PPort int
|
|
|
|
RPCPort int
|
|
|
|
RESTPort int
|
|
|
|
ProfilePort int
|
|
|
|
AcceptKeySend bool
|
|
|
|
AcceptAMP bool
|
|
|
|
FeeURL string
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeState := struct {
|
|
|
|
NodeID int
|
|
|
|
Name string
|
|
|
|
PubKey string
|
|
|
|
OpenChans map[string]int
|
|
|
|
ClosedChans map[string]struct{}
|
|
|
|
NodeCfg nodeCfg
|
|
|
|
}{
|
|
|
|
NodeID: hn.NodeID,
|
|
|
|
Name: hn.Cfg.Name,
|
|
|
|
PubKey: hn.PubKeyStr,
|
|
|
|
OpenChans: make(map[string]int),
|
|
|
|
ClosedChans: make(map[string]struct{}),
|
|
|
|
NodeCfg: nodeCfg{
|
|
|
|
LogFilenamePrefix: hn.Cfg.LogFilenamePrefix,
|
|
|
|
ExtraArgs: hn.Cfg.ExtraArgs,
|
|
|
|
HasSeed: hn.Cfg.HasSeed,
|
|
|
|
P2PPort: hn.Cfg.P2PPort,
|
|
|
|
RPCPort: hn.Cfg.RPCPort,
|
|
|
|
RESTPort: hn.Cfg.RESTPort,
|
|
|
|
AcceptKeySend: hn.Cfg.AcceptKeySend,
|
|
|
|
AcceptAMP: hn.Cfg.AcceptAMP,
|
|
|
|
FeeURL: hn.Cfg.FeeURL,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for outpoint, count := range hn.openChans {
|
|
|
|
nodeState.OpenChans[outpoint.String()] = count
|
|
|
|
}
|
|
|
|
for outpoint, count := range hn.closedChans {
|
|
|
|
nodeState.ClosedChans[outpoint.String()] = count
|
|
|
|
}
|
|
|
|
|
|
|
|
bytes, err := json.MarshalIndent(nodeState, "", "\t")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Sprintf("\n encode node state with err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("\nnode state: %s", bytes)
|
|
|
|
}
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
// DBPath returns the filepath to the channeldb database file for this node.
|
|
|
|
func (hn *HarnessNode) DBPath() string {
|
2019-12-17 10:48:17 +01:00
|
|
|
return hn.Cfg.DBPath()
|
2017-11-03 19:52:02 +01:00
|
|
|
}
|
|
|
|
|
2020-10-26 16:36:09 +01:00
|
|
|
// DBDir returns the path for the directory holding channeldb file(s).
|
|
|
|
func (hn *HarnessNode) DBDir() string {
|
|
|
|
return hn.Cfg.DBDir()
|
|
|
|
}
|
|
|
|
|
2018-06-10 10:02:59 +02:00
|
|
|
// Name returns the name of this node set during initialization.
|
|
|
|
func (hn *HarnessNode) Name() string {
|
2019-12-17 10:48:17 +01:00
|
|
|
return hn.Cfg.Name
|
2018-06-10 10:02:59 +02:00
|
|
|
}
|
|
|
|
|
2019-05-25 06:54:55 +02:00
|
|
|
// TLSCertStr returns the path where the TLS certificate is stored.
|
|
|
|
func (hn *HarnessNode) TLSCertStr() string {
|
2019-12-17 10:48:17 +01:00
|
|
|
return hn.Cfg.TLSCertPath
|
2019-05-25 06:54:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TLSKeyStr returns the path where the TLS key is stored.
|
|
|
|
func (hn *HarnessNode) TLSKeyStr() string {
|
2019-12-17 10:48:17 +01:00
|
|
|
return hn.Cfg.TLSKeyPath
|
2019-05-25 06:54:55 +02:00
|
|
|
}
|
|
|
|
|
2021-08-12 00:15:21 +02:00
|
|
|
// ChanBackupPath returns the fielpath to the on-disk channel.backup file for
|
2019-03-11 00:32:59 +01:00
|
|
|
// this node.
|
|
|
|
func (hn *HarnessNode) ChanBackupPath() string {
|
2019-12-17 10:48:17 +01:00
|
|
|
return hn.Cfg.ChanBackupPath()
|
2019-03-11 00:32:59 +01:00
|
|
|
}
|
|
|
|
|
2018-05-26 10:04:09 +02:00
|
|
|
// AdminMacPath returns the filepath to the admin.macaroon file for this node.
|
|
|
|
func (hn *HarnessNode) AdminMacPath() string {
|
2019-12-17 10:48:17 +01:00
|
|
|
return hn.Cfg.AdminMacPath
|
2018-05-26 10:04:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReadMacPath returns the filepath to the readonly.macaroon file for this node.
|
|
|
|
func (hn *HarnessNode) ReadMacPath() string {
|
2019-12-17 10:48:17 +01:00
|
|
|
return hn.Cfg.ReadMacPath
|
2018-05-26 10:04:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// InvoiceMacPath returns the filepath to the invoice.macaroon file for this
|
|
|
|
// node.
|
|
|
|
func (hn *HarnessNode) InvoiceMacPath() string {
|
2019-12-17 10:48:17 +01:00
|
|
|
return hn.Cfg.InvoiceMacPath
|
2018-05-26 10:04:09 +02:00
|
|
|
}
|
|
|
|
|
2021-09-18 07:00:39 +02:00
|
|
|
// startLnd handles the startup of lnd, creating log files, and possibly kills
|
|
|
|
// the process when needed.
|
|
|
|
func (hn *HarnessNode) startLnd(lndBinary string, lndError chan<- error) error {
|
2019-12-17 10:48:17 +01:00
|
|
|
args := hn.Cfg.genArgs()
|
2019-12-17 15:01:23 +01:00
|
|
|
hn.cmd = exec.Command(lndBinary, args...)
|
2017-11-17 00:37:08 +01:00
|
|
|
|
|
|
|
// Redirect stderr output to buffer
|
|
|
|
var errb bytes.Buffer
|
2017-11-03 19:52:02 +01:00
|
|
|
hn.cmd.Stderr = &errb
|
2017-11-17 00:37:08 +01:00
|
|
|
|
|
|
|
// If the logoutput flag is passed, redirect output from the nodes to
|
|
|
|
// log files.
|
2021-09-18 07:00:39 +02:00
|
|
|
var (
|
|
|
|
fileName string
|
|
|
|
err error
|
|
|
|
)
|
2017-11-17 00:37:08 +01:00
|
|
|
if *logOutput {
|
2021-09-18 07:00:39 +02:00
|
|
|
fileName, err = addLogFile(hn)
|
2017-11-17 00:37:08 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
if err := hn.cmd.Start(); err != nil {
|
2017-11-17 00:37:08 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Launch a new goroutine which that bubbles up any potential fatal
|
|
|
|
// process errors to the goroutine running the tests.
|
2018-07-17 09:13:05 +02:00
|
|
|
hn.wg.Add(1)
|
2017-11-17 00:37:08 +01:00
|
|
|
go func() {
|
2018-07-17 09:13:05 +02:00
|
|
|
defer hn.wg.Done()
|
2017-11-03 19:52:02 +01:00
|
|
|
|
2018-07-17 09:13:05 +02:00
|
|
|
err := hn.cmd.Wait()
|
2017-11-17 00:37:08 +01:00
|
|
|
if err != nil {
|
2021-09-18 06:10:01 +02:00
|
|
|
lndError <- fmt.Errorf("%v\n%v", err, errb.String())
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2018-04-30 06:45:36 +02:00
|
|
|
// Make sure log file is closed and renamed if necessary.
|
2021-09-18 07:00:39 +02:00
|
|
|
finalizeLogfile(hn, fileName)
|
2021-09-08 16:31:27 +02:00
|
|
|
|
|
|
|
// Rename the etcd.log file if the node was running on embedded
|
|
|
|
// etcd.
|
2021-09-18 07:00:39 +02:00
|
|
|
finalizeEtcdLog(hn)
|
2017-11-17 00:37:08 +01:00
|
|
|
}()
|
|
|
|
|
2021-09-18 07:00:39 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start launches a new process running lnd. Additionally, the PID of the
|
|
|
|
// launched process is saved in order to possibly kill the process forcibly
|
|
|
|
// later.
|
|
|
|
//
|
|
|
|
// This may not clean up properly if an error is returned, so the caller should
|
|
|
|
// call shutdown() regardless of the return value.
|
|
|
|
func (hn *HarnessNode) start(lndBinary string, lndError chan<- error,
|
|
|
|
wait bool) error {
|
|
|
|
|
|
|
|
// Init the runCtx.
|
|
|
|
ctxt, cancel := context.WithCancel(context.Background())
|
|
|
|
hn.runCtx = ctxt
|
|
|
|
hn.cancel = cancel
|
|
|
|
|
|
|
|
// Start lnd and prepare logs.
|
|
|
|
if err := hn.startLnd(lndBinary, lndError); err != nil {
|
2017-11-17 00:37:08 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-03-04 23:15:04 +01:00
|
|
|
// We may want to skip waiting for the node to come up (eg. the node
|
|
|
|
// is waiting to become the leader).
|
|
|
|
if !wait {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-18 06:10:57 +02:00
|
|
|
// Since Stop uses the LightningClient to stop the node, if we fail to
|
|
|
|
// get a connected client, we have to kill the process.
|
2019-12-17 10:48:17 +01:00
|
|
|
useMacaroons := !hn.Cfg.HasSeed
|
2018-04-03 01:57:25 +02:00
|
|
|
conn, err := hn.ConnectRPC(useMacaroons)
|
2017-11-17 00:37:08 +01:00
|
|
|
if err != nil {
|
2021-09-18 06:10:57 +02:00
|
|
|
err = fmt.Errorf("ConnectRPC err: %w", err)
|
|
|
|
cmdErr := hn.cmd.Process.Kill()
|
|
|
|
if cmdErr != nil {
|
|
|
|
err = fmt.Errorf("kill process got err: %w: %v",
|
|
|
|
cmdErr, err)
|
|
|
|
}
|
2017-11-17 00:37:08 +01:00
|
|
|
return err
|
|
|
|
}
|
2018-04-03 01:57:25 +02:00
|
|
|
|
2021-08-12 16:07:27 +02:00
|
|
|
if err := hn.WaitUntilStarted(conn, DefaultTimeout); err != nil {
|
2021-03-19 15:53:06 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// Init all the RPC clients.
|
|
|
|
hn.initRPCClients(conn)
|
|
|
|
|
2018-04-03 01:57:25 +02:00
|
|
|
// If the node was created with a seed, we will need to perform an
|
|
|
|
// additional step to unlock the wallet. The connection returned will
|
|
|
|
// only use the TLS certs, and can only perform operations necessary to
|
|
|
|
// unlock the daemon.
|
2019-12-17 10:48:17 +01:00
|
|
|
if hn.Cfg.HasSeed {
|
2021-09-18 07:45:55 +02:00
|
|
|
// TODO(yy): remove
|
2018-04-03 01:57:25 +02:00
|
|
|
hn.WalletUnlockerClient = lnrpc.NewWalletUnlockerClient(conn)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
return hn.initLightningClient()
|
2021-03-19 15:53:06 +01:00
|
|
|
}
|
|
|
|
|
2021-08-12 16:07:27 +02:00
|
|
|
// WaitUntilStarted waits until the wallet state flips from "WAITING_TO_START".
|
|
|
|
func (hn *HarnessNode) WaitUntilStarted(conn grpc.ClientConnInterface,
|
2021-03-19 15:53:06 +01:00
|
|
|
timeout time.Duration) error {
|
|
|
|
|
2021-10-06 14:12:11 +02:00
|
|
|
return hn.waitForState(conn, timeout, func(s lnrpc.WalletState) bool {
|
|
|
|
return s != lnrpc.WalletState_WAITING_TO_START
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitUntilStateReached waits until the given wallet state (or one of the
|
|
|
|
// states following it) has been reached.
|
|
|
|
func (hn *HarnessNode) WaitUntilStateReached(conn grpc.ClientConnInterface,
|
|
|
|
timeout time.Duration, desiredState lnrpc.WalletState) error {
|
|
|
|
|
|
|
|
return hn.waitForState(conn, timeout, func(s lnrpc.WalletState) bool {
|
|
|
|
return s >= desiredState
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitForState waits until the current node state fulfills the given
|
|
|
|
// predicate.
|
|
|
|
func (hn *HarnessNode) waitForState(conn grpc.ClientConnInterface,
|
|
|
|
timeout time.Duration,
|
|
|
|
predicate func(state lnrpc.WalletState) bool) error {
|
|
|
|
|
2021-03-19 15:53:06 +01:00
|
|
|
stateClient := lnrpc.NewStateClient(conn)
|
2021-09-18 07:45:55 +02:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2021-03-19 15:53:06 +01:00
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
stateStream, err := stateClient.SubscribeState(
|
|
|
|
ctx, &lnrpc.SubscribeStateRequest{},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
started := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
resp, err := stateStream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
errChan <- err
|
2021-08-12 16:07:27 +02:00
|
|
|
return
|
2021-03-19 15:53:06 +01:00
|
|
|
}
|
|
|
|
|
2021-10-06 14:12:11 +02:00
|
|
|
if predicate(resp.State) {
|
2021-03-19 15:53:06 +01:00
|
|
|
close(started)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
|
|
|
|
case <-started:
|
|
|
|
case err = <-errChan:
|
|
|
|
|
|
|
|
case <-time.After(timeout):
|
|
|
|
return fmt.Errorf("WaitUntilLeader timed out")
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
2018-04-03 01:57:25 +02:00
|
|
|
}
|
|
|
|
|
2021-03-04 23:15:04 +01:00
|
|
|
// WaitUntilLeader attempts to finish the start procedure by initiating an RPC
|
|
|
|
// connection and setting up the wallet unlocker client. This is needed when
|
|
|
|
// a node that has recently been started was waiting to become the leader and
|
2021-09-18 07:00:39 +02:00
|
|
|
// we're at the point when we expect that it is the leader now (awaiting
|
|
|
|
// unlock).
|
2021-03-04 23:15:04 +01:00
|
|
|
func (hn *HarnessNode) WaitUntilLeader(timeout time.Duration) error {
|
|
|
|
var (
|
|
|
|
conn *grpc.ClientConn
|
|
|
|
connErr error
|
|
|
|
)
|
|
|
|
|
|
|
|
startTs := time.Now()
|
|
|
|
if err := wait.NoError(func() error {
|
|
|
|
conn, connErr = hn.ConnectRPC(!hn.Cfg.HasSeed)
|
|
|
|
return connErr
|
|
|
|
}, timeout); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
timeout -= time.Since(startTs)
|
|
|
|
|
2021-08-12 16:07:27 +02:00
|
|
|
if err := hn.WaitUntilStarted(conn, timeout); err != nil {
|
2021-03-04 23:15:04 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// Init all the RPC clients.
|
|
|
|
hn.initRPCClients(conn)
|
|
|
|
|
2021-03-04 23:15:04 +01:00
|
|
|
// If the node was created with a seed, we will need to perform an
|
|
|
|
// additional step to unlock the wallet. The connection returned will
|
|
|
|
// only use the TLS certs, and can only perform operations necessary to
|
|
|
|
// unlock the daemon.
|
|
|
|
if hn.Cfg.HasSeed {
|
2021-09-18 07:45:55 +02:00
|
|
|
// TODO(yy): remove
|
2021-03-04 23:15:04 +01:00
|
|
|
hn.WalletUnlockerClient = lnrpc.NewWalletUnlockerClient(conn)
|
2021-09-18 07:45:55 +02:00
|
|
|
|
2021-03-04 23:15:04 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
return hn.initLightningClient()
|
2021-03-04 23:15:04 +01:00
|
|
|
}
|
|
|
|
|
2019-03-11 00:31:05 +01:00
|
|
|
// initClientWhenReady waits until the main gRPC server is detected as active,
|
|
|
|
// then complete the normal HarnessNode gRPC connection creation. This can be
|
|
|
|
// used it a node has just been unlocked, or has its wallet state initialized.
|
2021-03-04 23:15:04 +01:00
|
|
|
func (hn *HarnessNode) initClientWhenReady(timeout time.Duration) error {
|
2019-03-11 00:31:05 +01:00
|
|
|
var (
|
|
|
|
conn *grpc.ClientConn
|
|
|
|
connErr error
|
|
|
|
)
|
2019-09-19 21:46:29 +02:00
|
|
|
if err := wait.NoError(func() error {
|
2019-03-11 00:31:05 +01:00
|
|
|
conn, connErr = hn.ConnectRPC(true)
|
|
|
|
return connErr
|
2021-03-04 23:15:04 +01:00
|
|
|
}, timeout); err != nil {
|
2019-03-11 00:32:18 +01:00
|
|
|
return err
|
2019-03-11 00:31:05 +01:00
|
|
|
}
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// Init all the RPC clients.
|
|
|
|
hn.initRPCClients(conn)
|
|
|
|
|
|
|
|
return hn.initLightningClient()
|
2019-03-11 00:31:05 +01:00
|
|
|
}
|
|
|
|
|
2018-04-03 01:57:25 +02:00
|
|
|
// Init initializes a harness node by passing the init request via rpc. After
|
2020-10-06 17:23:40 +02:00
|
|
|
// the request is submitted, this method will block until a
|
2021-09-14 12:40:02 +02:00
|
|
|
// macaroon-authenticated RPC connection can be established to the harness
|
|
|
|
// node. Once established, the new connection is used to initialize the
|
2018-04-03 01:57:25 +02:00
|
|
|
// LightningClient and subscribes the HarnessNode to topology changes.
|
2021-09-14 12:40:02 +02:00
|
|
|
func (hn *HarnessNode) Init(
|
2020-10-06 17:23:40 +02:00
|
|
|
initReq *lnrpc.InitWalletRequest) (*lnrpc.InitWalletResponse, error) {
|
2018-04-03 01:57:25 +02:00
|
|
|
|
2021-09-14 12:40:02 +02:00
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
2020-10-06 17:23:40 +02:00
|
|
|
defer cancel()
|
|
|
|
response, err := hn.InitWallet(ctxt, initReq)
|
2018-04-03 01:57:25 +02:00
|
|
|
if err != nil {
|
2020-10-06 17:23:40 +02:00
|
|
|
return nil, err
|
2018-04-03 01:57:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the wallet to finish unlocking, such that we can connect to
|
|
|
|
// it via a macaroon-authenticated rpc connection.
|
2020-10-06 17:23:40 +02:00
|
|
|
var conn *grpc.ClientConn
|
|
|
|
if err = wait.Predicate(func() bool {
|
|
|
|
// If the node has been initialized stateless, we need to pass
|
|
|
|
// the macaroon to the client.
|
|
|
|
if initReq.StatelessInit {
|
|
|
|
adminMac := &macaroon.Macaroon{}
|
|
|
|
err := adminMac.UnmarshalBinary(response.AdminMacaroon)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
conn, err = hn.ConnectRPCWithMacaroon(adminMac)
|
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Normal initialization, we expect a macaroon to be in the
|
|
|
|
// file system.
|
|
|
|
conn, err = hn.ConnectRPC(true)
|
|
|
|
return err == nil
|
|
|
|
}, DefaultTimeout); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// Init all the RPC clients.
|
|
|
|
hn.initRPCClients(conn)
|
|
|
|
|
|
|
|
return response, hn.initLightningClient()
|
2020-10-06 17:23:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// InitChangePassword initializes a harness node by passing the change password
|
|
|
|
// request via RPC. After the request is submitted, this method will block until
|
|
|
|
// a macaroon-authenticated RPC connection can be established to the harness
|
|
|
|
// node. Once established, the new connection is used to initialize the
|
|
|
|
// LightningClient and subscribes the HarnessNode to topology changes.
|
2021-09-14 12:40:02 +02:00
|
|
|
func (hn *HarnessNode) InitChangePassword(
|
2020-10-06 17:23:40 +02:00
|
|
|
chngPwReq *lnrpc.ChangePasswordRequest) (*lnrpc.ChangePasswordResponse,
|
|
|
|
error) {
|
|
|
|
|
2021-09-14 12:40:02 +02:00
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
2020-10-06 17:23:40 +02:00
|
|
|
defer cancel()
|
|
|
|
response, err := hn.ChangePassword(ctxt, chngPwReq)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the wallet to finish unlocking, such that we can connect to
|
|
|
|
// it via a macaroon-authenticated rpc connection.
|
|
|
|
var conn *grpc.ClientConn
|
|
|
|
if err = wait.Predicate(func() bool {
|
|
|
|
// If the node has been initialized stateless, we need to pass
|
|
|
|
// the macaroon to the client.
|
|
|
|
if chngPwReq.StatelessInit {
|
|
|
|
adminMac := &macaroon.Macaroon{}
|
|
|
|
err := adminMac.UnmarshalBinary(response.AdminMacaroon)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
conn, err = hn.ConnectRPCWithMacaroon(adminMac)
|
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Normal initialization, we expect a macaroon to be in the
|
|
|
|
// file system.
|
|
|
|
conn, err = hn.ConnectRPC(true)
|
|
|
|
return err == nil
|
|
|
|
}, DefaultTimeout); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// Init all the RPC clients.
|
|
|
|
hn.initRPCClients(conn)
|
|
|
|
|
|
|
|
return response, hn.initLightningClient()
|
2019-03-11 00:31:05 +01:00
|
|
|
}
|
2019-03-11 00:32:18 +01:00
|
|
|
|
|
|
|
// Unlock attempts to unlock the wallet of the target HarnessNode. This method
|
|
|
|
// should be called after the restart of a HarnessNode that was created with a
|
|
|
|
// seed+password. Once this method returns, the HarnessNode will be ready to
|
|
|
|
// accept normal gRPC requests and harness command.
|
2021-09-14 12:40:02 +02:00
|
|
|
func (hn *HarnessNode) Unlock(unlockReq *lnrpc.UnlockWalletRequest) error {
|
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
|
|
|
defer cancel()
|
2019-03-11 00:32:18 +01:00
|
|
|
|
|
|
|
// Otherwise, we'll need to unlock the node before it's able to start
|
|
|
|
// up properly.
|
2021-09-18 07:45:55 +02:00
|
|
|
_, err := hn.rpc.WalletUnlocker.UnlockWallet(ctxt, unlockReq)
|
|
|
|
if err != nil {
|
2018-04-03 01:57:25 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-03-11 00:32:18 +01:00
|
|
|
// Now that the wallet has been unlocked, we'll wait for the RPC client
|
|
|
|
// to be ready, then establish the normal gRPC connection.
|
2021-03-04 23:15:04 +01:00
|
|
|
return hn.initClientWhenReady(DefaultTimeout)
|
2018-04-03 01:57:25 +02:00
|
|
|
}
|
|
|
|
|
2021-09-11 19:58:05 +02:00
|
|
|
// waitTillServerStarted makes a subscription to the server's state change and
|
|
|
|
// blocks until the server is in state ServerActive.
|
|
|
|
func (hn *HarnessNode) waitTillServerStarted() error {
|
2021-09-18 06:10:01 +02:00
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, NodeStartTimeout)
|
2021-09-11 19:58:05 +02:00
|
|
|
defer cancel()
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
client, err := hn.rpc.State.SubscribeState(
|
2021-09-11 19:58:05 +02:00
|
|
|
ctxt, &lnrpc.SubscribeStateRequest{},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to subscribe to state: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
resp, err := client.Recv()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to receive state "+
|
|
|
|
"client stream: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.State == lnrpc.WalletState_SERVER_ACTIVE {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2021-09-18 07:45:55 +02:00
|
|
|
}
|
2021-09-11 19:58:05 +02:00
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// initRPCClients initializes a list of RPC clients for the node.
|
|
|
|
func (hn *HarnessNode) initRPCClients(c *grpc.ClientConn) {
|
|
|
|
hn.rpc = &RPCClients{
|
|
|
|
conn: c,
|
|
|
|
LN: lnrpc.NewLightningClient(c),
|
|
|
|
Invoice: invoicesrpc.NewInvoicesClient(c),
|
|
|
|
Router: routerrpc.NewRouterClient(c),
|
|
|
|
WalletKit: walletrpc.NewWalletKitClient(c),
|
|
|
|
WalletUnlocker: lnrpc.NewWalletUnlockerClient(c),
|
|
|
|
Watchtower: watchtowerrpc.NewWatchtowerClient(c),
|
|
|
|
WatchtowerClient: wtclientrpc.NewWatchtowerClientClient(c),
|
|
|
|
Signer: signrpc.NewSignerClient(c),
|
|
|
|
State: lnrpc.NewStateClient(c),
|
|
|
|
}
|
2021-09-11 19:58:05 +02:00
|
|
|
}
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// initLightningClient blocks until the lnd server is fully started and
|
|
|
|
// subscribes the harness node to graph topology updates. This method also
|
|
|
|
// spawns a lightning network watcher for this node, which watches for topology
|
|
|
|
// changes.
|
|
|
|
func (hn *HarnessNode) initLightningClient() error {
|
|
|
|
// TODO(yy): remove
|
2018-04-03 01:57:25 +02:00
|
|
|
// Construct the LightningClient that will allow us to use the
|
|
|
|
// HarnessNode directly for normal rpc operations.
|
2021-09-18 07:45:55 +02:00
|
|
|
conn := hn.rpc.conn
|
2017-11-03 19:52:02 +01:00
|
|
|
hn.LightningClient = lnrpc.NewLightningClient(conn)
|
2019-05-06 12:19:36 +02:00
|
|
|
hn.InvoicesClient = invoicesrpc.NewInvoicesClient(conn)
|
2019-03-19 17:09:27 +01:00
|
|
|
hn.RouterClient = routerrpc.NewRouterClient(conn)
|
2019-06-04 01:12:20 +02:00
|
|
|
hn.WalletKitClient = walletrpc.NewWalletKitClient(conn)
|
2019-06-08 02:46:31 +02:00
|
|
|
hn.Watchtower = watchtowerrpc.NewWatchtowerClient(conn)
|
|
|
|
hn.WatchtowerClient = wtclientrpc.NewWatchtowerClientClient(conn)
|
2020-07-28 02:25:33 +02:00
|
|
|
hn.SignerClient = signrpc.NewSignerClient(conn)
|
2021-09-11 19:58:05 +02:00
|
|
|
hn.StateClient = lnrpc.NewStateClient(conn)
|
|
|
|
|
|
|
|
// Wait until the server is fully started.
|
|
|
|
if err := hn.waitTillServerStarted(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2018-04-03 01:57:25 +02:00
|
|
|
// Set the harness node's pubkey to what the node claims in GetInfo.
|
2021-02-05 13:30:59 +01:00
|
|
|
// Since the RPC might not be immediately active, we wrap the call in a
|
|
|
|
// wait.NoError.
|
|
|
|
if err := wait.NoError(hn.FetchNodeInfo, DefaultTimeout); err != nil {
|
2018-04-03 01:57:25 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Launch the watcher that will hook into graph related topology change
|
|
|
|
// from the PoV of this node.
|
|
|
|
hn.wg.Add(1)
|
2021-08-08 10:57:24 +02:00
|
|
|
go hn.lightningNetworkWatcher()
|
2018-04-03 01:57:25 +02:00
|
|
|
|
2021-08-08 10:57:24 +02:00
|
|
|
return nil
|
2018-04-03 01:57:25 +02:00
|
|
|
}
|
|
|
|
|
2019-01-27 16:05:30 +01:00
|
|
|
// FetchNodeInfo queries an unlocked node to retrieve its public key.
|
2018-04-03 01:57:25 +02:00
|
|
|
func (hn *HarnessNode) FetchNodeInfo() error {
|
2017-11-17 00:37:08 +01:00
|
|
|
// Obtain the lnid of this node for quick identification purposes.
|
2021-09-18 07:45:55 +02:00
|
|
|
info, err := hn.rpc.LN.GetInfo(hn.runCtx, &lnrpc.GetInfoRequest{})
|
2017-11-17 00:37:08 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
hn.PubKeyStr = info.IdentityPubkey
|
2017-11-17 00:37:08 +01:00
|
|
|
|
|
|
|
pubkey, err := hex.DecodeString(info.IdentityPubkey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-03 19:52:02 +01:00
|
|
|
copy(hn.PubKey[:], pubkey)
|
2017-11-17 00:37:08 +01:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-09 12:58:13 +01:00
|
|
|
// AddToLog adds a line of choice to the node's logfile. This is useful
|
|
|
|
// to interleave test output with output from the node.
|
2021-08-08 11:16:49 +02:00
|
|
|
func (hn *HarnessNode) AddToLog(format string, a ...interface{}) {
|
2018-01-09 12:58:13 +01:00
|
|
|
// If this node was not set up with a log file, just return early.
|
|
|
|
if hn.logFile == nil {
|
2021-08-08 11:16:49 +02:00
|
|
|
return
|
2018-01-09 12:58:13 +01:00
|
|
|
}
|
2021-08-08 11:16:49 +02:00
|
|
|
|
|
|
|
desc := fmt.Sprintf("itest: %s\n", fmt.Sprintf(format, a...))
|
|
|
|
if _, err := hn.logFile.WriteString(desc); err != nil {
|
|
|
|
hn.PrintErr("write to log err: %v", err)
|
2018-01-09 12:58:13 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-26 10:04:09 +02:00
|
|
|
// ReadMacaroon waits a given duration for the macaroon file to be created. If
|
|
|
|
// the file is readable within the timeout, its content is de-serialized as a
|
|
|
|
// macaroon and returned.
|
|
|
|
func (hn *HarnessNode) ReadMacaroon(macPath string, timeout time.Duration) (
|
|
|
|
*macaroon.Macaroon, error) {
|
|
|
|
|
2020-04-20 17:09:05 +02:00
|
|
|
// Wait until macaroon file is created and has valid content before
|
|
|
|
// using it.
|
|
|
|
var mac *macaroon.Macaroon
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
macBytes, err := ioutil.ReadFile(macPath)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error reading macaroon file: %v", err)
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2020-04-20 17:09:05 +02:00
|
|
|
newMac := &macaroon.Macaroon{}
|
|
|
|
if err = newMac.UnmarshalBinary(macBytes); err != nil {
|
|
|
|
return fmt.Errorf("error unmarshalling macaroon "+
|
|
|
|
"file: %v", err)
|
|
|
|
}
|
|
|
|
mac = newMac
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, timeout)
|
|
|
|
|
|
|
|
return mac, err
|
2018-05-26 10:04:09 +02:00
|
|
|
}
|
2018-04-03 01:57:25 +02:00
|
|
|
|
2018-05-26 10:04:09 +02:00
|
|
|
// ConnectRPCWithMacaroon uses the TLS certificate and given macaroon to
|
|
|
|
// create a gRPC client connection.
|
|
|
|
func (hn *HarnessNode) ConnectRPCWithMacaroon(mac *macaroon.Macaroon) (
|
|
|
|
*grpc.ClientConn, error) {
|
|
|
|
|
2020-04-20 17:09:05 +02:00
|
|
|
// Wait until TLS certificate is created and has valid content before
|
|
|
|
// using it, up to 30 sec.
|
|
|
|
var tlsCreds credentials.TransportCredentials
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
var err error
|
|
|
|
tlsCreds, err = credentials.NewClientTLSFromFile(
|
|
|
|
hn.Cfg.TLSCertPath, "",
|
|
|
|
)
|
|
|
|
return err
|
|
|
|
}, DefaultTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error reading TLS cert: %v", err)
|
2018-04-03 01:57:25 +02:00
|
|
|
}
|
|
|
|
|
2020-04-20 17:09:05 +02:00
|
|
|
opts := []grpc.DialOption{
|
|
|
|
grpc.WithBlock(),
|
|
|
|
grpc.WithTransportCredentials(tlsCreds),
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
2018-04-03 01:57:25 +02:00
|
|
|
|
2021-09-18 06:10:01 +02:00
|
|
|
ctx, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
2020-04-09 13:29:48 +02:00
|
|
|
defer cancel()
|
|
|
|
|
2018-05-26 10:04:09 +02:00
|
|
|
if mac == nil {
|
2020-04-09 13:29:48 +02:00
|
|
|
return grpc.DialContext(ctx, hn.Cfg.RPCAddr(), opts...)
|
2018-05-26 10:04:09 +02:00
|
|
|
}
|
2021-08-12 16:07:18 +02:00
|
|
|
macCred, err := macaroons.NewMacaroonCredential(mac)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error cloning mac: %v", err)
|
|
|
|
}
|
2018-04-03 01:57:25 +02:00
|
|
|
opts = append(opts, grpc.WithPerRPCCredentials(macCred))
|
|
|
|
|
2019-12-17 10:48:17 +01:00
|
|
|
return grpc.DialContext(ctx, hn.Cfg.RPCAddr(), opts...)
|
2018-05-26 10:04:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ConnectRPC uses the TLS certificate and admin macaroon files written by the
|
|
|
|
// lnd node to create a gRPC client connection.
|
|
|
|
func (hn *HarnessNode) ConnectRPC(useMacs bool) (*grpc.ClientConn, error) {
|
|
|
|
// If we don't want to use macaroons, just pass nil, the next method
|
|
|
|
// will handle it correctly.
|
|
|
|
if !useMacs {
|
|
|
|
return hn.ConnectRPCWithMacaroon(nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we should use a macaroon, always take the admin macaroon as a
|
|
|
|
// default.
|
2019-12-17 10:48:17 +01:00
|
|
|
mac, err := hn.ReadMacaroon(hn.Cfg.AdminMacPath, DefaultTimeout)
|
2018-05-26 10:04:09 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return hn.ConnectRPCWithMacaroon(mac)
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2018-02-22 23:27:24 +01:00
|
|
|
// SetExtraArgs assigns the ExtraArgs field for the node's configuration. The
|
|
|
|
// changes will take effect on restart.
|
|
|
|
func (hn *HarnessNode) SetExtraArgs(extraArgs []string) {
|
2019-12-17 10:48:17 +01:00
|
|
|
hn.Cfg.ExtraArgs = extraArgs
|
2018-02-22 23:27:24 +01:00
|
|
|
}
|
|
|
|
|
2017-11-17 00:37:08 +01:00
|
|
|
// cleanup cleans up all the temporary files created by the node's process.
|
2017-11-03 19:52:02 +01:00
|
|
|
func (hn *HarnessNode) cleanup() error {
|
2021-06-16 07:57:07 +02:00
|
|
|
if hn.backupDbDir != "" {
|
|
|
|
err := os.RemoveAll(hn.backupDbDir)
|
|
|
|
if err != nil {
|
2021-09-18 07:00:39 +02:00
|
|
|
return fmt.Errorf("unable to remove backup dir: %v",
|
|
|
|
err)
|
2021-06-16 07:57:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-17 10:48:17 +01:00
|
|
|
return os.RemoveAll(hn.Cfg.BaseDir)
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stop attempts to stop the active lnd process.
|
2017-11-03 19:52:02 +01:00
|
|
|
func (hn *HarnessNode) stop() error {
|
2017-11-03 23:21:35 +01:00
|
|
|
// Do nothing if the process is not running.
|
2021-09-18 06:10:57 +02:00
|
|
|
if hn.runCtx == nil {
|
2017-11-17 00:37:08 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
// If start() failed before creating clients, we will just wait for the
|
2017-11-03 23:21:35 +01:00
|
|
|
// child process to die.
|
2021-09-18 07:45:55 +02:00
|
|
|
if hn.rpc != nil && hn.rpc.LN != nil {
|
|
|
|
// Don't watch for error because sometimes the RPC connection
|
|
|
|
// gets closed before a response is returned.
|
2017-11-03 23:21:35 +01:00
|
|
|
req := lnrpc.StopRequest{}
|
2021-08-04 14:55:17 +02:00
|
|
|
|
|
|
|
err := wait.NoError(func() error {
|
2021-09-18 07:45:55 +02:00
|
|
|
_, err := hn.rpc.LN.StopDaemon(hn.runCtx, &req)
|
2021-08-04 14:55:17 +02:00
|
|
|
switch {
|
|
|
|
case err == nil:
|
|
|
|
return nil
|
|
|
|
|
|
|
|
// Try again if a recovery/rescan is in progress.
|
2021-09-18 07:45:55 +02:00
|
|
|
case strings.Contains(
|
|
|
|
err.Error(), "recovery in progress",
|
|
|
|
):
|
2021-08-04 14:55:17 +02:00
|
|
|
return err
|
|
|
|
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}, DefaultTimeout)
|
2021-06-25 00:34:30 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2021-09-18 06:10:01 +02:00
|
|
|
// Stop the runCtx and wait for goroutines to finish.
|
|
|
|
hn.cancel()
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2021-09-18 06:10:57 +02:00
|
|
|
// Wait for lnd process to exit.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
if hn.cmd.ProcessState == nil {
|
|
|
|
return fmt.Errorf("process did not exit")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !hn.cmd.ProcessState.Exited() {
|
|
|
|
return fmt.Errorf("process did not exit")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for goroutines to be finished.
|
|
|
|
hn.wg.Wait()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, DefaultTimeout*2)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
hn.LightningClient = nil
|
2018-04-03 01:57:25 +02:00
|
|
|
hn.WalletUnlockerClient = nil
|
2019-06-08 02:46:31 +02:00
|
|
|
hn.Watchtower = nil
|
2019-06-21 01:55:13 +02:00
|
|
|
hn.WatchtowerClient = nil
|
2019-12-16 21:32:45 +01:00
|
|
|
|
|
|
|
// Close any attempts at further grpc connections.
|
2021-09-18 07:45:55 +02:00
|
|
|
if hn.rpc.conn != nil {
|
|
|
|
err := status.Code(hn.rpc.conn.Close())
|
2021-09-18 06:10:01 +02:00
|
|
|
switch err {
|
|
|
|
case codes.OK:
|
|
|
|
return nil
|
|
|
|
|
|
|
|
// When the context is canceled above, we might get the
|
|
|
|
// following error as the context is no longer active.
|
|
|
|
case codes.Canceled:
|
|
|
|
return nil
|
|
|
|
|
|
|
|
case codes.Unknown:
|
|
|
|
return fmt.Errorf("unknown error attempting to stop "+
|
|
|
|
"grpc client: %v", err)
|
2021-08-12 16:07:27 +02:00
|
|
|
|
2021-09-18 06:10:01 +02:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("error attempting to stop "+
|
|
|
|
"grpc client: %v", err)
|
2019-12-16 21:32:45 +01:00
|
|
|
}
|
2021-09-18 06:10:01 +02:00
|
|
|
|
2019-12-16 21:32:45 +01:00
|
|
|
}
|
|
|
|
|
2017-11-03 23:21:35 +01:00
|
|
|
return nil
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2021-09-18 07:00:39 +02:00
|
|
|
// shutdown stops the active lnd process and cleans up any temporary
|
|
|
|
// directories created along the way.
|
2017-11-03 22:40:57 +01:00
|
|
|
func (hn *HarnessNode) shutdown() error {
|
2017-11-03 19:52:02 +01:00
|
|
|
if err := hn.stop(); err != nil {
|
2017-11-17 00:37:08 +01:00
|
|
|
return err
|
|
|
|
}
|
2017-11-03 19:52:02 +01:00
|
|
|
if err := hn.cleanup(); err != nil {
|
2017-11-17 00:37:08 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-11 17:23:18 +01:00
|
|
|
// kill kills the lnd process
|
|
|
|
func (hn *HarnessNode) kill() error {
|
|
|
|
return hn.cmd.Process.Kill()
|
|
|
|
}
|
|
|
|
|
2021-08-08 11:01:56 +02:00
|
|
|
type chanWatchType uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
// watchOpenChannel specifies that this is a request to watch an open
|
|
|
|
// channel event.
|
|
|
|
watchOpenChannel chanWatchType = iota
|
|
|
|
|
|
|
|
// watchCloseChannel specifies that this is a request to watch a close
|
|
|
|
// channel event.
|
|
|
|
watchCloseChannel
|
2021-08-08 11:08:37 +02:00
|
|
|
|
|
|
|
// watchPolicyUpdate specifies that this is a request to watch a policy
|
|
|
|
// update event.
|
|
|
|
watchPolicyUpdate
|
2021-08-08 11:01:56 +02:00
|
|
|
)
|
|
|
|
|
2017-11-17 00:37:08 +01:00
|
|
|
// closeChanWatchRequest is a request to the lightningNetworkWatcher to be
|
|
|
|
// notified once it's detected within the test Lightning Network, that a
|
|
|
|
// channel has either been added or closed.
|
|
|
|
type chanWatchRequest struct {
|
|
|
|
chanPoint wire.OutPoint
|
|
|
|
|
2021-08-08 11:01:56 +02:00
|
|
|
chanWatchType chanWatchType
|
2017-11-17 00:37:08 +01:00
|
|
|
|
|
|
|
eventChan chan struct{}
|
2021-08-08 11:08:37 +02:00
|
|
|
|
|
|
|
advertisingNode string
|
|
|
|
policy *lnrpc.RoutingPolicy
|
|
|
|
includeUnannounced bool
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2021-09-14 12:40:02 +02:00
|
|
|
func (hn *HarnessNode) checkChanPointInGraph(chanPoint wire.OutPoint) bool {
|
lntest: fix possible race condition re asserting channel propagation
In this commit, we attempt to fix a race condition that may occur in the
current AMP and MPP tests.
It appears the following scenario is possible:
* The `mppTestContext` [is used to create 6 channels back to
back](https://github.com/lightningnetwork/lnd/blob/master/lntest/itest/lnd_amp_test.go#L43)
* The method used to create the channel ends up calling
[`openChannelAndAssert`](https://github.com/lightningnetwork/lnd/blob/edd4152682dc8f70e11da541cd3be4692db4b011/lntest/itest/lnd_mpp_test.go#L300)
which'll open the channel, mine 6 blocks, [then ensure that the
channel gets
advertised](https://github.com/lightningnetwork/lnd/blob/edd4152682dc8f70e11da541cd3be4692db4b011/lntest/itest/assertions.go#L78)
* Later on, [we wait for all nodes to hear about all channels on the
network
level](https://github.com/lightningnetwork/lnd/blob/master/lntest/itest/lnd_amp_test.go#L62)
I think the issue here is that we'll potentially already have mined 30
or so blocks before getting to the final nodes, and those nodes may have
already heard about the channel already. This may then cause their
[`lightningNetworkWatcher`](https://github.com/lightningnetwork/lnd/blob/edd4152682dc8f70e11da541cd3be4692db4b011/lntest/node.go#L1213)
goroutine to not properly dispatch this, since it's assumed that the
channel hasn't been announced (or notified) when the method is called.
One solution here is to just check if the channel is already in the
node's graph or not, when we go to register the notification. If we do
this in the same state machine as the watcher, then we ensure if the
channel is already known, the client is immediately notified. One thing
that can help us debug this more in the future is adding additional
logging in some of these helper goroutines so we can more easily track
the control flow.
This commit implements this solution by checking to ensure that the
channel isn't already known in our channel graph before attempting to
wait for its notification, as we may already have missed the
notification before this registration request came in.
2021-07-15 03:35:48 +02:00
|
|
|
|
2021-09-14 12:40:02 +02:00
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
lntest: fix possible race condition re asserting channel propagation
In this commit, we attempt to fix a race condition that may occur in the
current AMP and MPP tests.
It appears the following scenario is possible:
* The `mppTestContext` [is used to create 6 channels back to
back](https://github.com/lightningnetwork/lnd/blob/master/lntest/itest/lnd_amp_test.go#L43)
* The method used to create the channel ends up calling
[`openChannelAndAssert`](https://github.com/lightningnetwork/lnd/blob/edd4152682dc8f70e11da541cd3be4692db4b011/lntest/itest/lnd_mpp_test.go#L300)
which'll open the channel, mine 6 blocks, [then ensure that the
channel gets
advertised](https://github.com/lightningnetwork/lnd/blob/edd4152682dc8f70e11da541cd3be4692db4b011/lntest/itest/assertions.go#L78)
* Later on, [we wait for all nodes to hear about all channels on the
network
level](https://github.com/lightningnetwork/lnd/blob/master/lntest/itest/lnd_amp_test.go#L62)
I think the issue here is that we'll potentially already have mined 30
or so blocks before getting to the final nodes, and those nodes may have
already heard about the channel already. This may then cause their
[`lightningNetworkWatcher`](https://github.com/lightningnetwork/lnd/blob/edd4152682dc8f70e11da541cd3be4692db4b011/lntest/node.go#L1213)
goroutine to not properly dispatch this, since it's assumed that the
channel hasn't been announced (or notified) when the method is called.
One solution here is to just check if the channel is already in the
node's graph or not, when we go to register the notification. If we do
this in the same state machine as the watcher, then we ensure if the
channel is already known, the client is immediately notified. One thing
that can help us debug this more in the future is adding additional
logging in some of these helper goroutines so we can more easily track
the control flow.
This commit implements this solution by checking to ensure that the
channel isn't already known in our channel graph before attempting to
wait for its notification, as we may already have missed the
notification before this registration request came in.
2021-07-15 03:35:48 +02:00
|
|
|
defer cancel()
|
2021-09-14 12:40:02 +02:00
|
|
|
|
|
|
|
chanGraph, err := hn.DescribeGraph(ctxt, &lnrpc.ChannelGraphRequest{})
|
lntest: fix possible race condition re asserting channel propagation
In this commit, we attempt to fix a race condition that may occur in the
current AMP and MPP tests.
It appears the following scenario is possible:
* The `mppTestContext` [is used to create 6 channels back to
back](https://github.com/lightningnetwork/lnd/blob/master/lntest/itest/lnd_amp_test.go#L43)
* The method used to create the channel ends up calling
[`openChannelAndAssert`](https://github.com/lightningnetwork/lnd/blob/edd4152682dc8f70e11da541cd3be4692db4b011/lntest/itest/lnd_mpp_test.go#L300)
which'll open the channel, mine 6 blocks, [then ensure that the
channel gets
advertised](https://github.com/lightningnetwork/lnd/blob/edd4152682dc8f70e11da541cd3be4692db4b011/lntest/itest/assertions.go#L78)
* Later on, [we wait for all nodes to hear about all channels on the
network
level](https://github.com/lightningnetwork/lnd/blob/master/lntest/itest/lnd_amp_test.go#L62)
I think the issue here is that we'll potentially already have mined 30
or so blocks before getting to the final nodes, and those nodes may have
already heard about the channel already. This may then cause their
[`lightningNetworkWatcher`](https://github.com/lightningnetwork/lnd/blob/edd4152682dc8f70e11da541cd3be4692db4b011/lntest/node.go#L1213)
goroutine to not properly dispatch this, since it's assumed that the
channel hasn't been announced (or notified) when the method is called.
One solution here is to just check if the channel is already in the
node's graph or not, when we go to register the notification. If we do
this in the same state machine as the watcher, then we ensure if the
channel is already known, the client is immediately notified. One thing
that can help us debug this more in the future is adding additional
logging in some of these helper goroutines so we can more easily track
the control flow.
This commit implements this solution by checking to ensure that the
channel isn't already known in our channel graph before attempting to
wait for its notification, as we may already have missed the
notification before this registration request came in.
2021-07-15 03:35:48 +02:00
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
targetChanPoint := chanPoint.String()
|
|
|
|
for _, chanEdge := range chanGraph.Edges {
|
|
|
|
candidateChanPoint := chanEdge.ChanPoint
|
|
|
|
if targetChanPoint == candidateChanPoint {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-11-17 00:37:08 +01:00
|
|
|
// lightningNetworkWatcher is a goroutine which is able to dispatch
|
|
|
|
// notifications once it has been observed that a target channel has been
|
|
|
|
// closed or opened within the network. In order to dispatch these
|
|
|
|
// notifications, the GraphTopologySubscription client exposed as part of the
|
|
|
|
// gRPC interface is used.
|
2021-08-08 10:57:24 +02:00
|
|
|
func (hn *HarnessNode) lightningNetworkWatcher() {
|
2017-11-03 19:52:02 +01:00
|
|
|
defer hn.wg.Done()
|
2017-11-17 00:37:08 +01:00
|
|
|
|
|
|
|
graphUpdates := make(chan *lnrpc.GraphTopologyUpdate)
|
2021-08-08 10:57:24 +02:00
|
|
|
|
|
|
|
// Start a goroutine to receive graph updates.
|
2017-11-03 19:52:02 +01:00
|
|
|
hn.wg.Add(1)
|
2017-11-17 00:37:08 +01:00
|
|
|
go func() {
|
2017-11-03 19:52:02 +01:00
|
|
|
defer hn.wg.Done()
|
2021-08-08 10:57:24 +02:00
|
|
|
err := hn.receiveTopologyClientStream(graphUpdates)
|
2017-11-17 00:37:08 +01:00
|
|
|
if err != nil {
|
2021-08-08 10:57:24 +02:00
|
|
|
hn.PrintErr("receive topology client stream "+
|
|
|
|
"got err:%v", err)
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
|
|
|
|
// A new graph update has just been received, so we'll examine
|
|
|
|
// the current set of registered clients to see if we can
|
|
|
|
// dispatch any requests.
|
|
|
|
case graphUpdate := <-graphUpdates:
|
2021-08-08 11:19:25 +02:00
|
|
|
hn.handleChannelEdgeUpdates(graphUpdate.ChannelUpdates)
|
2021-08-08 11:20:04 +02:00
|
|
|
hn.handleClosedChannelUpdate(graphUpdate.ClosedChans)
|
|
|
|
// TODO(yy): handle node updates too
|
2017-11-17 00:37:08 +01:00
|
|
|
|
|
|
|
// A new watch request, has just arrived. We'll either be able
|
|
|
|
// to dispatch immediately, or need to add the client for
|
|
|
|
// processing later.
|
2017-11-03 19:52:02 +01:00
|
|
|
case watchRequest := <-hn.chanWatchRequests:
|
2021-08-08 11:01:56 +02:00
|
|
|
switch watchRequest.chanWatchType {
|
|
|
|
case watchOpenChannel:
|
|
|
|
// TODO(roasbeef): add update type also, checks
|
|
|
|
// for multiple of 2
|
2021-08-08 11:19:25 +02:00
|
|
|
hn.handleOpenChannelWatchRequest(watchRequest)
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2021-08-08 11:01:56 +02:00
|
|
|
case watchCloseChannel:
|
|
|
|
hn.handleCloseChannelWatchRequest(watchRequest)
|
2021-08-08 11:08:37 +02:00
|
|
|
|
|
|
|
case watchPolicyUpdate:
|
|
|
|
hn.handlePolicyUpdateWatchRequest(watchRequest)
|
2021-08-08 11:01:56 +02:00
|
|
|
}
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2021-09-18 06:10:01 +02:00
|
|
|
case <-hn.runCtx.Done():
|
2017-11-17 00:37:08 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForNetworkChannelOpen will block until a channel with the target
|
|
|
|
// outpoint is seen as being fully advertised within the network. A channel is
|
|
|
|
// considered "fully advertised" once both of its directional edges has been
|
|
|
|
// advertised within the test Lightning Network.
|
2021-09-14 12:40:02 +02:00
|
|
|
func (hn *HarnessNode) WaitForNetworkChannelOpen(
|
2021-08-08 11:19:25 +02:00
|
|
|
chanPoint *lnrpc.ChannelPoint) error {
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2021-09-14 12:40:02 +02:00
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2017-11-17 00:37:08 +01:00
|
|
|
eventChan := make(chan struct{})
|
|
|
|
|
2021-08-08 11:19:25 +02:00
|
|
|
op, err := MakeOutpoint(chanPoint)
|
2018-01-11 05:59:30 +01:00
|
|
|
if err != nil {
|
2021-08-08 11:19:25 +02:00
|
|
|
return fmt.Errorf("failed to create outpoint for %v "+
|
|
|
|
"got err: %v", chanPoint, err)
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
hn.chanWatchRequests <- &chanWatchRequest{
|
2021-08-08 11:01:56 +02:00
|
|
|
chanPoint: op,
|
|
|
|
eventChan: eventChan,
|
|
|
|
chanWatchType: watchOpenChannel,
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-eventChan:
|
|
|
|
return nil
|
2021-09-14 12:40:02 +02:00
|
|
|
case <-ctxt.Done():
|
2021-08-08 11:19:25 +02:00
|
|
|
return fmt.Errorf("channel:%s not opened before timeout: %s",
|
|
|
|
op, hn)
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForNetworkChannelClose will block until a channel with the target
|
|
|
|
// outpoint is seen as closed within the network. A channel is considered
|
|
|
|
// closed once a transaction spending the funding outpoint is seen within a
|
|
|
|
// confirmed block.
|
2021-09-14 12:40:02 +02:00
|
|
|
func (hn *HarnessNode) WaitForNetworkChannelClose(
|
2021-08-08 11:20:04 +02:00
|
|
|
chanPoint *lnrpc.ChannelPoint) error {
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2021-09-14 12:40:02 +02:00
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2017-11-17 00:37:08 +01:00
|
|
|
eventChan := make(chan struct{})
|
|
|
|
|
2021-08-08 11:20:04 +02:00
|
|
|
op, err := MakeOutpoint(chanPoint)
|
2017-11-17 00:37:08 +01:00
|
|
|
if err != nil {
|
2021-08-08 11:20:04 +02:00
|
|
|
return fmt.Errorf("failed to create outpoint for %v "+
|
|
|
|
"got err: %v", chanPoint, err)
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2017-11-03 19:52:02 +01:00
|
|
|
hn.chanWatchRequests <- &chanWatchRequest{
|
2021-08-08 11:01:56 +02:00
|
|
|
chanPoint: op,
|
|
|
|
eventChan: eventChan,
|
|
|
|
chanWatchType: watchCloseChannel,
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-eventChan:
|
|
|
|
return nil
|
2021-09-14 12:40:02 +02:00
|
|
|
case <-ctxt.Done():
|
2021-08-08 11:20:04 +02:00
|
|
|
return fmt.Errorf("channel:%s not closed before timeout: "+
|
|
|
|
"%s", op, hn)
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-08 11:08:37 +02:00
|
|
|
// WaitForChannelPolicyUpdate will block until a channel policy with the target
|
|
|
|
// outpoint and advertisingNode is seen within the network.
|
2021-09-14 12:40:02 +02:00
|
|
|
func (hn *HarnessNode) WaitForChannelPolicyUpdate(
|
2021-08-08 11:08:37 +02:00
|
|
|
advertisingNode string, policy *lnrpc.RoutingPolicy,
|
|
|
|
chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) error {
|
|
|
|
|
2021-09-14 12:40:02 +02:00
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2021-08-08 11:08:37 +02:00
|
|
|
eventChan := make(chan struct{})
|
|
|
|
|
|
|
|
op, err := MakeOutpoint(chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to create outpoint for %v"+
|
|
|
|
"got err: %v", chanPoint, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ticker := time.NewTicker(wait.PollInterval)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// Send a watch request every second.
|
|
|
|
case <-ticker.C:
|
2021-10-01 10:25:53 +02:00
|
|
|
// Did the event can close in the meantime? We want to
|
|
|
|
// avoid a "close of closed channel" panic since we're
|
|
|
|
// re-using the same event chan for multiple requests.
|
|
|
|
select {
|
|
|
|
case <-eventChan:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2021-08-08 11:08:37 +02:00
|
|
|
hn.chanWatchRequests <- &chanWatchRequest{
|
|
|
|
chanPoint: op,
|
|
|
|
eventChan: eventChan,
|
|
|
|
chanWatchType: watchPolicyUpdate,
|
|
|
|
policy: policy,
|
|
|
|
advertisingNode: advertisingNode,
|
|
|
|
includeUnannounced: includeUnannounced,
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-eventChan:
|
|
|
|
return nil
|
|
|
|
|
2021-09-14 12:40:02 +02:00
|
|
|
case <-ctxt.Done():
|
2021-08-08 11:08:37 +02:00
|
|
|
return fmt.Errorf("channel:%s policy not updated "+
|
|
|
|
"before timeout: [%s:%v] %s", op,
|
|
|
|
advertisingNode, policy, hn.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-21 12:52:50 +01:00
|
|
|
// WaitForBlockchainSync waits for the target node to be fully synchronized with
|
|
|
|
// the blockchain. If the passed context object has a set timeout, it will
|
|
|
|
// continually poll until the timeout has elapsed. In the case that the chain
|
|
|
|
// isn't synced before the timeout is up, this function will return an error.
|
2021-09-14 12:40:02 +02:00
|
|
|
func (hn *HarnessNode) WaitForBlockchainSync() error {
|
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2020-11-21 12:52:50 +01:00
|
|
|
ticker := time.NewTicker(time.Millisecond * 100)
|
|
|
|
defer ticker.Stop()
|
2017-11-17 00:37:08 +01:00
|
|
|
|
2020-11-21 12:52:50 +01:00
|
|
|
for {
|
2021-09-18 07:45:55 +02:00
|
|
|
resp, err := hn.rpc.LN.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
|
2020-11-21 12:52:50 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if resp.SyncedToChain {
|
|
|
|
return nil
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
|
2020-11-21 12:52:50 +01:00
|
|
|
select {
|
2021-09-14 12:40:02 +02:00
|
|
|
case <-ctxt.Done():
|
2020-11-21 12:52:50 +01:00
|
|
|
return fmt.Errorf("timeout while waiting for " +
|
|
|
|
"blockchain sync")
|
2021-09-18 06:10:01 +02:00
|
|
|
case <-hn.runCtx.Done():
|
2020-11-21 12:52:50 +01:00
|
|
|
return nil
|
|
|
|
case <-ticker.C:
|
|
|
|
}
|
2017-11-17 00:37:08 +01:00
|
|
|
}
|
|
|
|
}
|
2017-11-03 19:52:02 +01:00
|
|
|
|
2018-08-10 06:15:40 +02:00
|
|
|
// WaitForBalance waits until the node sees the expected confirmed/unconfirmed
|
|
|
|
// balance within their wallet.
|
2021-09-18 06:10:01 +02:00
|
|
|
func (hn *HarnessNode) WaitForBalance(expectedBalance btcutil.Amount,
|
|
|
|
confirmed bool) error {
|
|
|
|
|
2018-08-10 06:15:40 +02:00
|
|
|
req := &lnrpc.WalletBalanceRequest{}
|
|
|
|
|
2018-09-07 01:49:23 +02:00
|
|
|
var lastBalance btcutil.Amount
|
2018-08-10 06:15:40 +02:00
|
|
|
doesBalanceMatch := func() bool {
|
2021-09-18 07:45:55 +02:00
|
|
|
balance, err := hn.rpc.LN.WalletBalance(hn.runCtx, req)
|
2018-08-10 06:15:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if confirmed {
|
2018-09-07 01:49:23 +02:00
|
|
|
lastBalance = btcutil.Amount(balance.ConfirmedBalance)
|
2018-09-10 15:02:06 +02:00
|
|
|
return btcutil.Amount(balance.ConfirmedBalance) == expectedBalance
|
2018-08-10 06:15:40 +02:00
|
|
|
}
|
|
|
|
|
2018-09-07 01:49:23 +02:00
|
|
|
lastBalance = btcutil.Amount(balance.UnconfirmedBalance)
|
2018-09-10 15:02:06 +02:00
|
|
|
return btcutil.Amount(balance.UnconfirmedBalance) == expectedBalance
|
2018-08-10 06:15:40 +02:00
|
|
|
}
|
|
|
|
|
2020-12-08 16:27:01 +01:00
|
|
|
err := wait.Predicate(doesBalanceMatch, DefaultTimeout)
|
2018-08-10 06:15:40 +02:00
|
|
|
if err != nil {
|
2018-09-07 01:49:23 +02:00
|
|
|
return fmt.Errorf("balances not synced after deadline: "+
|
|
|
|
"expected %v, only have %v", expectedBalance, lastBalance)
|
2018-08-10 06:15:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2021-08-08 11:16:49 +02:00
|
|
|
|
|
|
|
// PrintErr prints an error to the console.
|
|
|
|
func (hn *HarnessNode) PrintErr(format string, a ...interface{}) {
|
|
|
|
fmt.Printf("itest error from [node:%s]: %s\n",
|
|
|
|
hn.Cfg.Name, fmt.Sprintf(format, a...))
|
|
|
|
}
|
2021-08-08 11:19:25 +02:00
|
|
|
|
|
|
|
// handleChannelEdgeUpdates takes a series of channel edge updates, extracts
|
|
|
|
// the outpoints, and saves them to harness node's internal state.
|
|
|
|
func (hn *HarnessNode) handleChannelEdgeUpdates(
|
|
|
|
updates []*lnrpc.ChannelEdgeUpdate) {
|
|
|
|
|
|
|
|
// For each new channel, we'll increment the number of
|
|
|
|
// edges seen by one.
|
|
|
|
for _, newChan := range updates {
|
|
|
|
op, err := MakeOutpoint(newChan.ChanPoint)
|
|
|
|
if err != nil {
|
|
|
|
hn.PrintErr("failed to create outpoint for %v "+
|
|
|
|
"got err: %v", newChan.ChanPoint, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
hn.openChans[op]++
|
|
|
|
|
|
|
|
// For this new channel, if the number of edges seen is less
|
|
|
|
// than two, then the channel hasn't been fully announced yet.
|
|
|
|
if numEdges := hn.openChans[op]; numEdges < 2 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll notify all the registered watchers and
|
|
|
|
// remove the dispatched watchers.
|
|
|
|
for _, eventChan := range hn.openChanWatchers[op] {
|
|
|
|
close(eventChan)
|
|
|
|
}
|
|
|
|
delete(hn.openChanWatchers, op)
|
2021-08-08 11:08:37 +02:00
|
|
|
|
|
|
|
// Check whether there's a routing policy update. If so, save
|
|
|
|
// it to the node state.
|
|
|
|
if newChan.RoutingPolicy == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the policy to the slice.
|
|
|
|
node := newChan.AdvertisingNode
|
|
|
|
policies := hn.policyUpdates[op.String()]
|
|
|
|
|
|
|
|
// If the map[op] is nil, we need to initialize the map first.
|
|
|
|
if policies == nil {
|
|
|
|
policies = make(map[string][]*lnrpc.RoutingPolicy)
|
|
|
|
}
|
|
|
|
policies[node] = append(
|
|
|
|
policies[node], newChan.RoutingPolicy,
|
|
|
|
)
|
|
|
|
hn.policyUpdates[op.String()] = policies
|
2021-08-08 11:19:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleOpenChannelWatchRequest processes a watch open channel request by
|
|
|
|
// checking the number of the edges seen for a given channel point. If the
|
|
|
|
// number is no less than 2 then the channel is considered open. Otherwise, we
|
|
|
|
// will attempt to find it in its channel graph. If neither can be found, the
|
|
|
|
// request is added to a watch request list than will be handled by
|
|
|
|
// handleChannelEdgeUpdates.
|
|
|
|
func (hn *HarnessNode) handleOpenChannelWatchRequest(req *chanWatchRequest) {
|
|
|
|
targetChan := req.chanPoint
|
|
|
|
|
|
|
|
// If this is an open request, then it can be dispatched if the number
|
|
|
|
// of edges seen for the channel is at least two.
|
|
|
|
if numEdges := hn.openChans[targetChan]; numEdges >= 2 {
|
|
|
|
close(req.eventChan)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Before we add the channel to our set of open clients, we'll check to
|
|
|
|
// see if the channel is already in the channel graph of the target
|
|
|
|
// node. This lets us handle the case where a node has already seen a
|
|
|
|
// channel before a notification has been requested, causing us to miss
|
|
|
|
// it.
|
2021-09-14 12:40:02 +02:00
|
|
|
chanFound := hn.checkChanPointInGraph(targetChan)
|
2021-08-08 11:19:25 +02:00
|
|
|
if chanFound {
|
|
|
|
close(req.eventChan)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll add this to the list of open channel watchers for
|
|
|
|
// this out point.
|
|
|
|
hn.openChanWatchers[targetChan] = append(
|
|
|
|
hn.openChanWatchers[targetChan],
|
|
|
|
req.eventChan,
|
|
|
|
)
|
|
|
|
}
|
2021-08-08 11:20:04 +02:00
|
|
|
|
|
|
|
// handleClosedChannelUpdate takes a series of closed channel updates, extracts
|
|
|
|
// the outpoints, saves them to harness node's internal state, and notifies all
|
|
|
|
// registered clients.
|
|
|
|
func (hn *HarnessNode) handleClosedChannelUpdate(
|
|
|
|
updates []*lnrpc.ClosedChannelUpdate) {
|
|
|
|
|
|
|
|
// For each channel closed, we'll mark that we've detected a channel
|
|
|
|
// closure while lnd was pruning the channel graph.
|
|
|
|
for _, closedChan := range updates {
|
|
|
|
op, err := MakeOutpoint(closedChan.ChanPoint)
|
|
|
|
if err != nil {
|
|
|
|
hn.PrintErr("failed to create outpoint for %v "+
|
|
|
|
"got err: %v", closedChan.ChanPoint, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
hn.closedChans[op] = struct{}{}
|
|
|
|
|
|
|
|
// As the channel has been closed, we'll notify all register
|
|
|
|
// watchers.
|
|
|
|
for _, eventChan := range hn.closeChanWatchers[op] {
|
|
|
|
close(eventChan)
|
|
|
|
}
|
|
|
|
delete(hn.closeChanWatchers, op)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleCloseChannelWatchRequest processes a watch close channel request by
|
|
|
|
// checking whether the given channel point can be found in the node's internal
|
|
|
|
// state. If not, the request is added to a watch request list than will be
|
|
|
|
// handled by handleCloseChannelWatchRequest.
|
|
|
|
func (hn *HarnessNode) handleCloseChannelWatchRequest(req *chanWatchRequest) {
|
|
|
|
targetChan := req.chanPoint
|
|
|
|
|
|
|
|
// If this is a close request, then it can be immediately dispatched if
|
|
|
|
// we've already seen a channel closure for this channel.
|
|
|
|
if _, ok := hn.closedChans[targetChan]; ok {
|
|
|
|
close(req.eventChan)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll add this to the list of close channel watchers for
|
|
|
|
// this out point.
|
|
|
|
hn.closeChanWatchers[targetChan] = append(
|
|
|
|
hn.closeChanWatchers[targetChan],
|
|
|
|
req.eventChan,
|
|
|
|
)
|
|
|
|
}
|
2021-08-08 10:57:24 +02:00
|
|
|
|
|
|
|
type topologyClient lnrpc.Lightning_SubscribeChannelGraphClient
|
|
|
|
|
|
|
|
// newTopologyClient creates a topology client.
|
|
|
|
func (hn *HarnessNode) newTopologyClient(
|
|
|
|
ctx context.Context) (topologyClient, error) {
|
|
|
|
|
|
|
|
req := &lnrpc.GraphTopologySubscription{}
|
2021-09-18 07:45:55 +02:00
|
|
|
client, err := hn.rpc.LN.SubscribeChannelGraph(ctx, req)
|
2021-08-08 10:57:24 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("%s(%d): unable to create topology "+
|
|
|
|
"client: %v (%s)", hn.Name(), hn.NodeID, err,
|
|
|
|
time.Now().String())
|
|
|
|
}
|
|
|
|
|
|
|
|
return client, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// receiveTopologyClientStream initializes a topologyClient to subscribe
|
|
|
|
// topology update events. Due to a race condition between the ChannelRouter
|
|
|
|
// starting and us making the subscription request, it's possible for our graph
|
|
|
|
// subscription to fail. In that case, we will retry the subscription until it
|
|
|
|
// succeeds or fail after 10 seconds.
|
|
|
|
//
|
|
|
|
// NOTE: must be run as a goroutine.
|
|
|
|
func (hn *HarnessNode) receiveTopologyClientStream(
|
|
|
|
receiver chan *lnrpc.GraphTopologyUpdate) error {
|
|
|
|
|
|
|
|
// Create a topology client to receive graph updates.
|
2021-09-18 06:10:01 +02:00
|
|
|
client, err := hn.newTopologyClient(hn.runCtx)
|
2021-08-08 10:57:24 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("create topologyClient failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We use the context to time out when retrying graph subscription.
|
2021-09-18 06:10:01 +02:00
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
2021-08-08 10:57:24 +02:00
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
for {
|
|
|
|
update, err := client.Recv()
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
|
|
|
// Good case. We will send the update to the receiver.
|
|
|
|
|
|
|
|
case strings.Contains(err.Error(), "router not started"):
|
|
|
|
// If the router hasn't been started, we will retry
|
|
|
|
// every 200 ms until it has been started or fail
|
|
|
|
// after the ctxt is timed out.
|
|
|
|
select {
|
|
|
|
case <-ctxt.Done():
|
|
|
|
return fmt.Errorf("graph subscription: " +
|
|
|
|
"router not started before timeout")
|
|
|
|
case <-time.After(wait.PollInterval):
|
2021-09-18 06:10:01 +02:00
|
|
|
case <-hn.runCtx.Done():
|
2021-08-08 10:57:24 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-create the topology client.
|
2021-09-18 06:10:01 +02:00
|
|
|
client, err = hn.newTopologyClient(hn.runCtx)
|
2021-08-08 10:57:24 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("create topologyClient "+
|
|
|
|
"failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
case strings.Contains(err.Error(), "EOF"):
|
|
|
|
// End of subscription stream. Do nothing and quit.
|
|
|
|
return nil
|
|
|
|
|
2021-09-18 06:10:01 +02:00
|
|
|
case strings.Contains(err.Error(), context.Canceled.Error()):
|
|
|
|
// End of subscription stream. Do nothing and quit.
|
|
|
|
return nil
|
|
|
|
|
2021-08-08 10:57:24 +02:00
|
|
|
default:
|
|
|
|
// An expected error is returned, return and leave it
|
|
|
|
// to be handled by the caller.
|
2021-09-18 07:45:55 +02:00
|
|
|
return fmt.Errorf("graph subscription err: %w", err)
|
2021-08-08 10:57:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Send the update or quit.
|
|
|
|
select {
|
|
|
|
case receiver <- update:
|
2021-09-18 06:10:01 +02:00
|
|
|
case <-hn.runCtx.Done():
|
2021-08-08 10:57:24 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-08-08 11:08:37 +02:00
|
|
|
|
|
|
|
// handlePolicyUpdateWatchRequest checks that if the expected policy can be
|
|
|
|
// found either in the node's interval state or describe graph response. If
|
|
|
|
// found, it will signal the request by closing the event channel. Otherwise it
|
|
|
|
// does nothing but returns nil.
|
|
|
|
func (hn *HarnessNode) handlePolicyUpdateWatchRequest(req *chanWatchRequest) {
|
|
|
|
op := req.chanPoint
|
|
|
|
|
|
|
|
// Get a list of known policies for this chanPoint+advertisingNode
|
|
|
|
// combination. Start searching in the node state first.
|
|
|
|
policies, ok := hn.policyUpdates[op.String()][req.advertisingNode]
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
// If it cannot be found in the node state, try searching it
|
|
|
|
// from the node's DescribeGraph.
|
|
|
|
policyMap := hn.getChannelPolicies(req.includeUnannounced)
|
|
|
|
policies, ok = policyMap[op.String()][req.advertisingNode]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if there's a matched policy.
|
|
|
|
for _, policy := range policies {
|
|
|
|
if CheckChannelPolicy(policy, req.policy) == nil {
|
|
|
|
close(req.eventChan)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getChannelPolicies queries the channel graph and formats the policies into
|
|
|
|
// the format defined in type policyUpdateMap.
|
|
|
|
func (hn *HarnessNode) getChannelPolicies(include bool) policyUpdateMap {
|
|
|
|
|
2021-09-18 06:10:01 +02:00
|
|
|
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
2021-08-08 11:08:37 +02:00
|
|
|
defer cancel()
|
|
|
|
|
2021-09-18 07:45:55 +02:00
|
|
|
graph, err := hn.rpc.LN.DescribeGraph(ctxt, &lnrpc.ChannelGraphRequest{
|
2021-08-08 11:08:37 +02:00
|
|
|
IncludeUnannounced: include,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
hn.PrintErr("DescribeGraph got err: %v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
policyUpdates := policyUpdateMap{}
|
|
|
|
|
|
|
|
for _, e := range graph.Edges {
|
|
|
|
|
|
|
|
policies := policyUpdates[e.ChanPoint]
|
|
|
|
|
|
|
|
// If the map[op] is nil, we need to initialize the map first.
|
|
|
|
if policies == nil {
|
|
|
|
policies = make(map[string][]*lnrpc.RoutingPolicy)
|
|
|
|
}
|
|
|
|
|
|
|
|
if e.Node1Policy != nil {
|
|
|
|
policies[e.Node1Pub] = append(
|
|
|
|
policies[e.Node1Pub], e.Node1Policy,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
if e.Node2Policy != nil {
|
|
|
|
policies[e.Node2Pub] = append(
|
|
|
|
policies[e.Node2Pub], e.Node2Policy,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
policyUpdates[e.ChanPoint] = policies
|
|
|
|
}
|
|
|
|
|
|
|
|
return policyUpdates
|
|
|
|
}
|
2021-09-18 07:00:39 +02:00
|
|
|
|
|
|
|
// renameFile is a helper to rename (log) files created during integration
|
|
|
|
// tests.
|
|
|
|
func renameFile(fromFileName, toFileName string) {
|
|
|
|
err := os.Rename(fromFileName, toFileName)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("could not rename %s to %s: %v\n",
|
|
|
|
fromFileName, toFileName, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getFinalizedLogFilePrefix returns the finalize log filename.
|
|
|
|
func getFinalizedLogFilePrefix(hn *HarnessNode) string {
|
|
|
|
pubKeyHex := hex.EncodeToString(
|
|
|
|
hn.PubKey[:logPubKeyBytes],
|
|
|
|
)
|
|
|
|
|
|
|
|
return fmt.Sprintf("%s/%d-%s-%s-%s",
|
|
|
|
GetLogDir(), hn.NodeID,
|
|
|
|
hn.Cfg.LogFilenamePrefix,
|
|
|
|
hn.Cfg.Name, pubKeyHex)
|
|
|
|
}
|
|
|
|
|
|
|
|
// finalizeLogfile makes sure the log file cleanup function is initialized,
|
|
|
|
// even if no log file is created.
|
|
|
|
func finalizeLogfile(hn *HarnessNode, fileName string) {
|
|
|
|
if hn.logFile != nil {
|
|
|
|
hn.logFile.Close()
|
|
|
|
|
|
|
|
// If logoutput flag is not set, return early.
|
|
|
|
if !*logOutput {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
newFileName := fmt.Sprintf("%v.log",
|
|
|
|
getFinalizedLogFilePrefix(hn),
|
|
|
|
)
|
|
|
|
|
|
|
|
renameFile(fileName, newFileName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func finalizeEtcdLog(hn *HarnessNode) {
|
|
|
|
if hn.Cfg.DbBackend != BackendEtcd {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
etcdLogFileName := fmt.Sprintf("%s/etcd.log", hn.Cfg.LogDir)
|
|
|
|
newEtcdLogFileName := fmt.Sprintf("%v-etcd.log",
|
|
|
|
getFinalizedLogFilePrefix(hn),
|
|
|
|
)
|
|
|
|
|
|
|
|
renameFile(etcdLogFileName, newEtcdLogFileName)
|
|
|
|
}
|
|
|
|
|
|
|
|
func addLogFile(hn *HarnessNode) (string, error) {
|
|
|
|
var fileName string
|
|
|
|
|
|
|
|
dir := GetLogDir()
|
|
|
|
fileName = fmt.Sprintf("%s/%d-%s-%s-%s.log", dir, hn.NodeID,
|
|
|
|
hn.Cfg.LogFilenamePrefix, hn.Cfg.Name,
|
|
|
|
hex.EncodeToString(hn.PubKey[:logPubKeyBytes]))
|
|
|
|
|
|
|
|
// If the node's PubKey is not yet initialized, create a
|
|
|
|
// temporary file name. Later, after the PubKey has been
|
|
|
|
// initialized, the file can be moved to its final name with
|
|
|
|
// the PubKey included.
|
|
|
|
if bytes.Equal(hn.PubKey[:4], []byte{0, 0, 0, 0}) {
|
|
|
|
fileName = fmt.Sprintf("%s/%d-%s-%s-tmp__.log", dir,
|
|
|
|
hn.NodeID, hn.Cfg.LogFilenamePrefix,
|
|
|
|
hn.Cfg.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create file if not exists, otherwise append.
|
|
|
|
file, err := os.OpenFile(fileName,
|
|
|
|
os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return fileName, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pass node's stderr to both errb and the file.
|
|
|
|
w := io.MultiWriter(hn.cmd.Stderr, file)
|
|
|
|
hn.cmd.Stderr = w
|
|
|
|
|
|
|
|
// Pass the node's stdout only to the file.
|
|
|
|
hn.cmd.Stdout = file
|
|
|
|
|
|
|
|
// Let the node keep a reference to this file, such
|
|
|
|
// that we can add to it if necessary.
|
|
|
|
hn.logFile = file
|
|
|
|
|
|
|
|
return fileName, nil
|
|
|
|
}
|