Merge pull request #6822 from yyforyongyu/3-new-itest

itest: continued itest refactor and fix - II
This commit is contained in:
Oliver Gugger 2022-11-18 10:27:26 +01:00 committed by GitHub
commit 67e2e382bc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 3232 additions and 4028 deletions

View File

@ -401,7 +401,7 @@ jobs:
args: backend=neutrino
steps:
- name: git checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: go cache
uses: actions/cache@v1
@ -415,7 +415,7 @@ jobs:
lnd-${{ runner.os }}-go-
- name: setup go ${{ env.GO_VERSION }}
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: '${{ env.GO_VERSION }}'
@ -449,7 +449,7 @@ jobs:
GOPATH: ${{ github.workspace }}/go
steps:
- name: git checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: go cache
uses: actions/cache@v1
@ -463,7 +463,7 @@ jobs:
lnd-${{ runner.os }}-go-
- name: setup go ${{ env.GO_VERSION }}
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: '${{ env.GO_VERSION }}'
@ -472,6 +472,7 @@ jobs:
- name: Zip log files on failure
if: ${{ failure() }}
timeout-minutes: 1 # timeout after 1 minute
run: 7z a logs-itest-windows.zip lntest/itest/**/*.log
- name: Upload log files on failure

View File

@ -78,6 +78,22 @@ linters-settings:
# so no return split required.
block-size: 3
gomnd:
# List of numbers to exclude from analysis.
# The numbers should be written as string.
# Values always ignored: "1", "1.0", "0" and "0.0"
# Default: []
ignored-numbers:
- '0666'
- '0755'
# List of function patterns to exclude from analysis.
# Values always ignored: `time.Date`
# Default: []
ignored-functions:
- 'math.*'
- 'strconv.ParseInt'
linters:
enable-all: true

View File

@ -247,8 +247,9 @@ better testing suite for writing integration tests. A new defined structure is
implemented, please refer to
[README](https://github.com/lightningnetwork/lnd/tree/master/lntemp) for more
details. Along the way, several
PRs([6776](https://github.com/lightningnetwork/lnd/pull/6776)) have been made
to refactor the itest for code health and maintenance.
PRs([6776](https://github.com/lightningnetwork/lnd/pull/6776),
[6822](https://github.com/lightningnetwork/lnd/pull/6822)) have been made to
refactor the itest for code health and maintenance.
# Contributors (Alphabetical Order)

View File

@ -6,7 +6,9 @@ import (
"fmt"
"sync"
"testing"
"time"
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
@ -14,6 +16,7 @@ import (
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntemp/rpc"
"github.com/lightningnetwork/lnd/lntest"
@ -594,6 +597,14 @@ func (h *HarnessTest) SetFeeEstimate(fee chainfee.SatPerKWeight) {
h.feeService.SetFeeRate(fee, 1)
}
// SetFeeEstimateWithConf sets a fee rate of a specified conf target to be
// returned from fee estimator.
func (h *HarnessTest) SetFeeEstimateWithConf(
fee chainfee.SatPerKWeight, conf uint32) {
h.feeService.SetFeeRate(fee, conf)
}
// validateNodeState checks that the node doesn't have any uncleaned states
// which will affect its following tests.
func (h *HarnessTest) validateNodeState(hn *node.HarnessNode) {
@ -693,12 +704,10 @@ type OpenChannelParams struct {
ScidAlias bool
}
// OpenChannelAssertPending attempts to open a channel between srcNode and
// destNode with the passed channel funding parameters. Once the `OpenChannel`
// is called, it will consume the first event it receives from the open channel
// client and asserts it's a channel pending event.
func (h *HarnessTest) OpenChannelAssertPending(srcNode,
destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient {
// prepareOpenChannel waits for both nodes to be synced to chain and returns an
// OpenChannelRequest.
func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode,
p OpenChannelParams) *lnrpc.OpenChannelRequest {
// Wait until srcNode and destNode have the latest chain synced.
// Otherwise, we may run into a check within the funding manager that
@ -714,8 +723,8 @@ func (h *HarnessTest) OpenChannelAssertPending(srcNode,
minConfs = 0
}
// Prepare the request and open the channel.
openReq := &lnrpc.OpenChannelRequest{
// Prepare the request.
return &lnrpc.OpenChannelRequest{
NodePubkey: destNode.PubKey[:],
LocalFundingAmount: int64(p.Amt),
PushSat: int64(p.PushAmt),
@ -730,6 +739,18 @@ func (h *HarnessTest) OpenChannelAssertPending(srcNode,
ZeroConf: p.ZeroConf,
ScidAlias: p.ScidAlias,
}
}
// OpenChannelAssertPending attempts to open a channel between srcNode and
// destNode with the passed channel funding parameters. Once the `OpenChannel`
// is called, it will consume the first event it receives from the open channel
// client and asserts it's a channel pending event.
func (h *HarnessTest) openChannelAssertPending(srcNode,
destNode *node.HarnessNode,
p OpenChannelParams) (*lnrpc.PendingUpdate, rpc.OpenChanClient) {
// Prepare the request and open the channel.
openReq := h.prepareOpenChannel(srcNode, destNode, p)
respStream := srcNode.RPC.OpenChannel(openReq)
// Consume the "channel pending" update. This waits until the node
@ -738,11 +759,35 @@ func (h *HarnessTest) OpenChannelAssertPending(srcNode,
resp := h.ReceiveOpenChannelUpdate(respStream)
// Check that the update is channel pending.
_, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
update, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
require.Truef(h, ok, "expected channel pending: update, instead got %v",
resp)
return respStream
return update.ChanPending, respStream
}
// OpenChannelAssertPending attempts to open a channel between srcNode and
// destNode with the passed channel funding parameters. Once the `OpenChannel`
// is called, it will consume the first event it receives from the open channel
// client and asserts it's a channel pending event. It returns the
// `PendingUpdate`.
func (h *HarnessTest) OpenChannelAssertPending(srcNode,
destNode *node.HarnessNode, p OpenChannelParams) *lnrpc.PendingUpdate {
resp, _ := h.openChannelAssertPending(srcNode, destNode, p)
return resp
}
// OpenChannelAssertStream attempts to open a channel between srcNode and
// destNode with the passed channel funding parameters. Once the `OpenChannel`
// is called, it will consume the first event it receives from the open channel
// client and asserts it's a channel pending event. It returns the open channel
// stream.
func (h *HarnessTest) OpenChannelAssertStream(srcNode,
destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient {
_, stream := h.openChannelAssertPending(srcNode, destNode, p)
return stream
}
// OpenChannel attempts to open a channel with the specified parameters
@ -754,7 +799,7 @@ func (h *HarnessTest) OpenChannelAssertPending(srcNode,
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
p OpenChannelParams) *lnrpc.ChannelPoint {
chanOpenUpdate := h.OpenChannelAssertPending(alice, bob, p)
chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p)
// Mine 6 blocks, then wait for Alice's node to notify us that the
// channel has been opened. The funding transaction should be found
@ -785,6 +830,24 @@ func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
return fundingChanPoint
}
// OpenChannelAssertErr opens a channel between node srcNode and destNode,
// asserts that the expected error is returned from the channel opening.
func (h *HarnessTest) OpenChannelAssertErr(srcNode, destNode *node.HarnessNode,
p OpenChannelParams, expectedErr error) {
// Prepare the request and open the channel.
openReq := h.prepareOpenChannel(srcNode, destNode, p)
respStream := srcNode.RPC.OpenChannel(openReq)
// Receive an error to be sent from the stream.
_, err := h.receiveOpenChannelUpdate(respStream)
// Use string comparison here as we haven't codified all the RPC errors
// yet.
require.Containsf(h, err.Error(), expectedErr.Error(), "unexpected "+
"error returned, want %v, got %v", expectedErr, err)
}
// CloseChannelAssertPending attempts to close the channel indicated by the
// passed channel point, initiated by the passed node. Once the CloseChannel
// rpc is called, it will consume one event and assert it's a close pending
@ -971,6 +1034,22 @@ func (h *HarnessTest) FundCoinsUnconfirmed(amt btcutil.Amount,
h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, false)
}
// FundCoinsNP2WKH attempts to send amt satoshis from the internal mining node
// to the targeted lightning node using a NP2WKH address.
func (h *HarnessTest) FundCoinsNP2WKH(amt btcutil.Amount,
target *node.HarnessNode) {
h.fundCoins(amt, target, lnrpc.AddressType_NESTED_PUBKEY_HASH, true)
}
// FundCoinsP2TR attempts to send amt satoshis from the internal mining node to
// the targeted lightning node using a P2TR address.
func (h *HarnessTest) FundCoinsP2TR(amt btcutil.Amount,
target *node.HarnessNode) {
h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true)
}
// CompletePaymentRequests sends payments from a node to complete all payment
// requests. This function does not return until all payments successfully
// complete without errors.
@ -1243,3 +1322,309 @@ func (h *HarnessTest) MineBlocksAndAssertNumTxes(num uint32,
return blocks
}
// QueryChannelByChanPoint tries to find a channel matching the channel point
// and asserts. It returns the channel found.
func (h *HarnessTest) QueryChannelByChanPoint(hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint) *lnrpc.Channel {
channel, err := h.findChannel(hn, chanPoint)
require.NoError(h, err, "failed to query channel")
return channel
}
// SendPaymentAndAssertStatus sends a payment from the passed node and asserts
// the desired status is reached.
func (h *HarnessTest) SendPaymentAndAssertStatus(hn *node.HarnessNode,
req *routerrpc.SendPaymentRequest,
status lnrpc.Payment_PaymentStatus) *lnrpc.Payment {
stream := hn.RPC.SendPayment(req)
return h.AssertPaymentStatusFromStream(stream, status)
}
// SendPaymentAssertFail sends a payment from the passed node and asserts the
// payment is failed with the specified failure reason .
func (h *HarnessTest) SendPaymentAssertFail(hn *node.HarnessNode,
req *routerrpc.SendPaymentRequest,
reason lnrpc.PaymentFailureReason) *lnrpc.Payment {
payment := h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_FAILED)
require.Equal(h, reason, payment.FailureReason,
"payment failureReason not matched")
return payment
}
// OpenChannelRequest is used to open a channel using the method
// OpenMultiChannelsAsync.
type OpenChannelRequest struct {
// Local is the funding node.
Local *node.HarnessNode
// Remote is the receiving node.
Remote *node.HarnessNode
// Param is the open channel params.
Param OpenChannelParams
// stream is the client created after calling OpenChannel RPC.
stream rpc.OpenChanClient
// result is a channel used to send the channel point once the funding
// has succeeded.
result chan *lnrpc.ChannelPoint
}
// OpenMultiChannelsAsync takes a list of OpenChannelRequest and opens them in
// batch. The channel points are returned in same the order of the requests
// once all of the channel open succeeded.
//
// NOTE: compared to open multiple channel sequentially, this method will be
// faster as it doesn't need to mine 6 blocks for each channel open. However,
// it does make debugging the logs more difficult as messages are intertwined.
func (h *HarnessTest) OpenMultiChannelsAsync(
reqs []*OpenChannelRequest) []*lnrpc.ChannelPoint {
// openChannel opens a channel based on the request.
openChannel := func(req *OpenChannelRequest) {
stream := h.OpenChannelAssertStream(
req.Local, req.Remote, req.Param,
)
req.stream = stream
}
// assertChannelOpen is a helper closure that asserts a channel is
// open.
assertChannelOpen := func(req *OpenChannelRequest) {
// Wait for the channel open event from the stream.
cp := h.WaitForChannelOpenEvent(req.stream)
// Check that both alice and bob have seen the channel
// from their channel watch request.
h.AssertTopologyChannelOpen(req.Local, cp)
h.AssertTopologyChannelOpen(req.Remote, cp)
// Finally, check that the channel can be seen in their
// ListChannels.
h.AssertChannelExists(req.Local, cp)
h.AssertChannelExists(req.Remote, cp)
req.result <- cp
}
// Go through the requests and make the OpenChannel RPC call.
for _, r := range reqs {
openChannel(r)
}
// Mine one block to confirm all the funding transactions.
h.MineBlocksAndAssertNumTxes(1, len(reqs))
// Mine 5 more blocks so all the public channels are announced to the
// network.
h.MineBlocks(numBlocksOpenChannel - 1)
// Once the blocks are mined, we fire goroutines for each of the
// request to watch for the channel openning.
for _, r := range reqs {
r.result = make(chan *lnrpc.ChannelPoint, 1)
go assertChannelOpen(r)
}
// Finally, collect the results.
channelPoints := make([]*lnrpc.ChannelPoint, 0)
for _, r := range reqs {
select {
case cp := <-r.result:
channelPoints = append(channelPoints, cp)
case <-time.After(lntest.ChannelOpenTimeout):
require.Failf(h, "timeout", "wait channel point "+
"timeout for channel %s=>%s", r.Local.Name(),
r.Remote.Name())
}
}
// Assert that we have the expected num of channel points.
require.Len(h, channelPoints, len(reqs),
"returned channel points not match")
return channelPoints
}
// ReceiveInvoiceUpdate waits until a message is received on the subscribe
// invoice stream or the timeout is reached.
func (h *HarnessTest) ReceiveInvoiceUpdate(
stream rpc.InvoiceUpdateClient) *lnrpc.Invoice {
chanMsg := make(chan *lnrpc.Invoice)
errChan := make(chan error)
go func() {
// Consume one message. This will block until the message is
// received.
resp, err := stream.Recv()
if err != nil {
errChan <- err
return
}
chanMsg <- resp
}()
select {
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout receiving invoice update")
case err := <-errChan:
require.Failf(h, "err from stream",
"received err from stream: %v", err)
case updateMsg := <-chanMsg:
return updateMsg
}
return nil
}
// CalculateTxFee retrieves parent transactions and reconstructs the fee paid.
func (h *HarnessTest) CalculateTxFee(tx *wire.MsgTx) btcutil.Amount {
var balance btcutil.Amount
for _, in := range tx.TxIn {
parentHash := in.PreviousOutPoint.Hash
rawTx := h.Miner.GetRawTransaction(&parentHash)
parent := rawTx.MsgTx()
balance += btcutil.Amount(
parent.TxOut[in.PreviousOutPoint.Index].Value,
)
}
for _, out := range tx.TxOut {
balance -= btcutil.Amount(out.Value)
}
return balance
}
// CalculateTxesFeeRate takes a list of transactions and estimates the fee rate
// used to sweep them.
//
// NOTE: only used in current test file.
func (h *HarnessTest) CalculateTxesFeeRate(txns []*wire.MsgTx) int64 {
const scale = 1000
var totalWeight, totalFee int64
for _, tx := range txns {
utx := btcutil.NewTx(tx)
totalWeight += blockchain.GetTransactionWeight(utx)
fee := h.CalculateTxFee(tx)
totalFee += int64(fee)
}
feeRate := totalFee * scale / totalWeight
return feeRate
}
type SweptOutput struct {
OutPoint wire.OutPoint
SweepTx *wire.MsgTx
}
// FindCommitAndAnchor looks for a commitment sweep and anchor sweep in the
// mempool. Our anchor output is identified by having multiple inputs in its
// sweep transition, because we have to bring another input to add fees to the
// anchor. Note that the anchor swept output may be nil if the channel did not
// have anchors.
func (h *HarnessTest) FindCommitAndAnchor(sweepTxns []*wire.MsgTx,
closeTx string) (*SweptOutput, *SweptOutput) {
var commitSweep, anchorSweep *SweptOutput
for _, tx := range sweepTxns {
txHash := tx.TxHash()
sweepTx := h.Miner.GetRawTransaction(&txHash)
// We expect our commitment sweep to have a single input, and,
// our anchor sweep to have more inputs (because the wallet
// needs to add balance to the anchor amount). We find their
// sweep txids here to setup appropriate resolutions. We also
// need to find the outpoint for our resolution, which we do by
// matching the inputs to the sweep to the close transaction.
inputs := sweepTx.MsgTx().TxIn
if len(inputs) == 1 {
commitSweep = &SweptOutput{
OutPoint: inputs[0].PreviousOutPoint,
SweepTx: tx,
}
} else {
// Since we have more than one input, we run through
// them to find the one whose previous outpoint matches
// the closing txid, which means this input is spending
// the close tx. This will be our anchor output.
for _, txin := range inputs {
op := txin.PreviousOutPoint.Hash.String()
if op == closeTx {
anchorSweep = &SweptOutput{
OutPoint: txin.PreviousOutPoint,
SweepTx: tx,
}
}
}
}
}
return commitSweep, anchorSweep
}
// AssertSweepFound looks up a sweep in a nodes list of broadcast sweeps and
// asserts it's found.
//
// NOTE: Does not account for node's internal state.
func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
sweep string, verbose bool) {
// List all sweeps that alice's node had broadcast.
sweepResp := hn.RPC.ListSweeps(verbose)
var found bool
if verbose {
found = findSweepInDetails(h, sweep, sweepResp)
} else {
found = findSweepInTxids(h, sweep, sweepResp)
}
require.Truef(h, found, "%s: sweep: %v not found", sweep, hn.Name())
}
func findSweepInTxids(ht *HarnessTest, sweepTxid string,
sweepResp *walletrpc.ListSweepsResponse) bool {
sweepTxIDs := sweepResp.GetTransactionIds()
require.NotNil(ht, sweepTxIDs, "expected transaction ids")
require.Nil(ht, sweepResp.GetTransactionDetails())
// Check that the sweep tx we have just produced is present.
for _, tx := range sweepTxIDs.TransactionIds {
if tx == sweepTxid {
return true
}
}
return false
}
func findSweepInDetails(ht *HarnessTest, sweepTxid string,
sweepResp *walletrpc.ListSweepsResponse) bool {
sweepDetails := sweepResp.GetTransactionDetails()
require.NotNil(ht, sweepDetails, "expected transaction details")
require.Nil(ht, sweepResp.GetTransactionIds())
for _, tx := range sweepDetails.Transactions {
if tx.TxHash == sweepTxid {
return true
}
}
return false
}

View File

@ -4,6 +4,7 @@ import (
"context"
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"math"
"strings"
@ -78,6 +79,22 @@ func (h *HarnessTest) ConnectNodes(a, b *node.HarnessNode) {
h.AssertPeerConnected(a, b)
}
// ConnectNodesPerm creates a persistent connection between the two nodes and
// asserts the connection is succeeded.
func (h *HarnessTest) ConnectNodesPerm(a, b *node.HarnessNode) {
bobInfo := b.RPC.GetInfo()
req := &lnrpc.ConnectPeerRequest{
Addr: &lnrpc.LightningAddress{
Pubkey: bobInfo.IdentityPubkey,
Host: b.Cfg.P2PAddr(),
},
Perm: true,
}
a.RPC.ConnectPeer(req)
h.AssertPeerConnected(a, b)
}
// DisconnectNodes disconnects the given two nodes and asserts the
// disconnection is succeeded. The request is made from node a and sent to node
// b.
@ -199,6 +216,20 @@ func (h *HarnessTest) AssertNumEdges(hn *node.HarnessNode,
func (h *HarnessTest) ReceiveOpenChannelUpdate(
stream rpc.OpenChanClient) *lnrpc.OpenStatusUpdate {
update, err := h.receiveOpenChannelUpdate(stream)
require.NoError(h, err, "received err from open channel stream")
return update
}
// receiveOpenChannelUpdate waits until a message or an error is received on
// the stream or the timeout is reached.
//
// TODO(yy): use generics to unify all receiving stream update once go@1.18 is
// used.
func (h *HarnessTest) receiveOpenChannelUpdate(
stream rpc.OpenChanClient) (*lnrpc.OpenStatusUpdate, error) {
chanMsg := make(chan *lnrpc.OpenStatusUpdate)
errChan := make(chan error)
go func() {
@ -216,16 +247,14 @@ func (h *HarnessTest) ReceiveOpenChannelUpdate(
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout waiting for open channel "+
"update sent")
return nil, nil
case err := <-errChan:
require.Failf(h, "open channel stream",
"received err from open channel stream: %v", err)
return nil, err
case updateMsg := <-chanMsg:
return updateMsg
return updateMsg, nil
}
return nil
}
// WaitForChannelOpenEvent waits for a notification that a channel is open by
@ -1232,21 +1261,20 @@ func (h *HarnessTest) AssertNumHTLCsAndStage(hn *node.HarnessNode,
// findPayment queries the payment from the node's ListPayments which matches
// the specified preimage hash.
func (h *HarnessTest) findPayment(hn *node.HarnessNode,
preimage lntypes.Preimage) *lnrpc.Payment {
paymentHash string) *lnrpc.Payment {
req := &lnrpc.ListPaymentsRequest{IncludeIncomplete: true}
paymentsResp := hn.RPC.ListPayments(req)
payHash := preimage.Hash()
for _, p := range paymentsResp.Payments {
if p.PaymentHash != payHash.String() {
if p.PaymentHash != paymentHash {
continue
}
return p
}
require.Fail(h, "payment: %v not found", payHash)
require.Fail(h, "payment: %v not found", paymentHash)
return nil
}
@ -1262,7 +1290,7 @@ func (h *HarnessTest) AssertPaymentStatus(hn *node.HarnessNode,
var target *lnrpc.Payment
err := wait.NoError(func() error {
p := h.findPayment(hn, preimage)
p := h.findPayment(hn, preimage.Hash().String())
if status == p.Status {
target = p
return nil
@ -1295,3 +1323,328 @@ func (h *HarnessTest) AssertActiveNodesSynced() {
h.WaitForBlockchainSync(node)
}
}
// AssertPeerNotConnected asserts that the given node b is not connected to a.
func (h *HarnessTest) AssertPeerNotConnected(a, b *node.HarnessNode) {
err := wait.NoError(func() error {
// We require the RPC call to be succeeded and won't wait for
// it as it's an unexpected behavior.
resp := a.RPC.ListPeers()
// If node B is seen in the ListPeers response from node A,
// then we return false as the connection has been fully
// established.
for _, peer := range resp.Peers {
if peer.PubKey == b.PubKeyStr {
return fmt.Errorf("peers %s and %s still "+
"connected", a.Name(), b.Name())
}
}
return nil
}, DefaultTimeout)
require.NoError(h, err, "timeout checking peers not connected")
}
// AssertNotConnected asserts that two peers are not connected.
func (h *HarnessTest) AssertNotConnected(a, b *node.HarnessNode) {
h.AssertPeerNotConnected(a, b)
h.AssertPeerNotConnected(b, a)
}
// AssertConnected asserts that two peers are connected.
func (h *HarnessTest) AssertConnected(a, b *node.HarnessNode) {
h.AssertPeerConnected(a, b)
h.AssertPeerConnected(b, a)
}
// AssertAmountPaid checks that the ListChannels command of the provided
// node list the total amount sent and received as expected for the
// provided channel.
func (h *HarnessTest) AssertAmountPaid(channelName string, hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint, amountSent, amountReceived int64) {
checkAmountPaid := func() error {
// Find the targeted channel.
channel, err := h.findChannel(hn, chanPoint)
if err != nil {
return fmt.Errorf("assert amount failed: %w", err)
}
if channel.TotalSatoshisSent != amountSent {
return fmt.Errorf("%v: incorrect amount"+
" sent: %v != %v", channelName,
channel.TotalSatoshisSent,
amountSent)
}
if channel.TotalSatoshisReceived !=
amountReceived {
return fmt.Errorf("%v: incorrect amount"+
" received: %v != %v",
channelName,
channel.TotalSatoshisReceived,
amountReceived)
}
return nil
}
// As far as HTLC inclusion in commitment transaction might be
// postponed we will try to check the balance couple of times,
// and then if after some period of time we receive wrong
// balance return the error.
err := wait.NoError(checkAmountPaid, DefaultTimeout)
require.NoError(h, err, "timeout while checking amount paid")
}
// AssertLastHTLCError checks that the last sent HTLC of the last payment sent
// by the given node failed with the expected failure code.
func (h *HarnessTest) AssertLastHTLCError(hn *node.HarnessNode,
code lnrpc.Failure_FailureCode) {
// Use -1 to specify the last HTLC.
h.assertHTLCError(hn, code, -1)
}
// AssertFirstHTLCError checks that the first HTLC of the last payment sent
// by the given node failed with the expected failure code.
func (h *HarnessTest) AssertFirstHTLCError(hn *node.HarnessNode,
code lnrpc.Failure_FailureCode) {
// Use 0 to specify the first HTLC.
h.assertHTLCError(hn, code, 0)
}
// assertLastHTLCError checks that the HTLC at the specified index of the last
// payment sent by the given node failed with the expected failure code.
func (h *HarnessTest) assertHTLCError(hn *node.HarnessNode,
code lnrpc.Failure_FailureCode, index int) {
req := &lnrpc.ListPaymentsRequest{
IncludeIncomplete: true,
}
err := wait.NoError(func() error {
paymentsResp := hn.RPC.ListPayments(req)
payments := paymentsResp.Payments
if len(payments) == 0 {
return fmt.Errorf("no payments found")
}
payment := payments[len(payments)-1]
htlcs := payment.Htlcs
if len(htlcs) == 0 {
return fmt.Errorf("no htlcs found")
}
// If the index is greater than 0, check we have enough htlcs.
if index > 0 && len(htlcs) <= index {
return fmt.Errorf("not enough htlcs")
}
// If index is less than or equal to 0, we will read the last
// htlc.
if index <= 0 {
index = len(htlcs) - 1
}
htlc := htlcs[index]
// The htlc must have a status of failed.
if htlc.Status != lnrpc.HTLCAttempt_FAILED {
return fmt.Errorf("htlc should be failed")
}
// The failure field must not be empty.
if htlc.Failure == nil {
return fmt.Errorf("expected htlc failure")
}
// Exit if the expected code is found.
if htlc.Failure.Code == code {
return nil
}
return fmt.Errorf("unexpected failure code")
}, DefaultTimeout)
require.NoError(h, err, "timeout checking HTLC error")
}
// AssertZombieChannel asserts that a given channel found using the chanID is
// marked as zombie.
func (h *HarnessTest) AssertZombieChannel(hn *node.HarnessNode, chanID uint64) {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
err := wait.NoError(func() error {
_, err := hn.RPC.LN.GetChanInfo(
ctxt, &lnrpc.ChanInfoRequest{ChanId: chanID},
)
if err == nil {
return fmt.Errorf("expected error but got nil")
}
if !strings.Contains(err.Error(), "marked as zombie") {
return fmt.Errorf("expected error to contain '%s' but "+
"was '%v'", "marked as zombie", err)
}
return nil
}, DefaultTimeout)
require.NoError(h, err, "timeout while checking zombie channel")
}
// AssertTxAtHeight gets all of the transactions that a node's wallet has a
// record of at the target height, and finds and returns the tx with the target
// txid, failing if it is not found.
func (h *HarnessTest) AssertTxAtHeight(hn *node.HarnessNode, height int32,
txid *chainhash.Hash) *lnrpc.Transaction {
req := &lnrpc.GetTransactionsRequest{
StartHeight: height,
EndHeight: height,
}
txns := hn.RPC.GetTransactions(req)
for _, tx := range txns.Transactions {
if tx.TxHash == txid.String() {
return tx
}
}
require.Failf(h, "fail to find tx", "tx:%v not found at height:%v",
txid, height)
return nil
}
// getChannelPolicies queries the channel graph and retrieves the current edge
// policies for the provided channel point.
func (h *HarnessTest) getChannelPolicies(hn *node.HarnessNode,
advertisingNode string,
cp *lnrpc.ChannelPoint) (*lnrpc.RoutingPolicy, error) {
req := &lnrpc.ChannelGraphRequest{IncludeUnannounced: true}
chanGraph := hn.RPC.DescribeGraph(req)
cpStr := channelPointStr(cp)
for _, e := range chanGraph.Edges {
if e.ChanPoint != cpStr {
continue
}
if e.Node1Pub == advertisingNode {
return e.Node1Policy, nil
}
return e.Node2Policy, nil
}
// If we've iterated over all the known edges and we weren't
// able to find this specific one, then we'll fail.
return nil, fmt.Errorf("did not find edge with advertisingNode: %s"+
", channel point: %s", advertisingNode, cpStr)
}
// AssertChannelPolicy asserts that the passed node's known channel policy for
// the passed chanPoint is consistent with the expected policy values.
func (h *HarnessTest) AssertChannelPolicy(hn *node.HarnessNode,
advertisingNode string, expectedPolicy *lnrpc.RoutingPolicy,
chanPoint *lnrpc.ChannelPoint) {
policy, err := h.getChannelPolicies(hn, advertisingNode, chanPoint)
require.NoErrorf(h, err, "%s: failed to find policy", hn.Name())
err = node.CheckChannelPolicy(policy, expectedPolicy)
require.NoErrorf(h, err, "%s: check policy failed", hn.Name())
}
// AssertNumPolicyUpdates asserts that a given number of channel policy updates
// has been seen in the specified node.
func (h *HarnessTest) AssertNumPolicyUpdates(hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint,
advertisingNode *node.HarnessNode, num int) {
op := h.OutPointFromChannelPoint(chanPoint)
var policies []*node.PolicyUpdateInfo
err := wait.NoError(func() error {
policyMap := hn.Watcher.GetPolicyUpdates(op)
nodePolicy, ok := policyMap[advertisingNode.PubKeyStr]
if ok {
policies = nodePolicy
}
if len(policies) == num {
return nil
}
p, err := json.MarshalIndent(policies, "", "\t")
require.NoError(h, err, "encode policy err")
return fmt.Errorf("expected to find %d policy updates, "+
"instead got: %d, chanPoint: %v, "+
"advertisingNode: %s:%s, policy: %s", num,
len(policies), op, advertisingNode.Name(),
advertisingNode.PubKeyStr, p)
}, DefaultTimeout)
require.NoError(h, err, "%s: timeout waiting for num of policy updates",
hn.Name())
}
// AssertNumPayments asserts that the number of payments made within the test
// scope is as expected, including the incomplete ones.
func (h *HarnessTest) AssertNumPayments(hn *node.HarnessNode,
num int) []*lnrpc.Payment {
// Get the number of payments we already have from the previous test.
have := hn.State.Payment.Total
req := &lnrpc.ListPaymentsRequest{
IncludeIncomplete: true,
IndexOffset: hn.State.Payment.LastIndexOffset,
}
var payments []*lnrpc.Payment
err := wait.NoError(func() error {
resp := hn.RPC.ListPayments(req)
payments = resp.Payments
if len(payments) == num {
return nil
}
return errNumNotMatched(hn.Name(), "num of payments",
num, len(payments), have+len(payments), have)
}, DefaultTimeout)
require.NoError(h, err, "timeout checking num of payments")
return payments
}
// AssertNumNodeAnns asserts that a given number of node announcements has been
// seen in the specified node.
func (h *HarnessTest) AssertNumNodeAnns(hn *node.HarnessNode,
pubkey string, num int) []*lnrpc.NodeUpdate {
// We will get the current number of channel updates first and add it
// to our expected number of newly created channel updates.
anns, err := hn.Watcher.WaitForNumNodeUpdates(pubkey, num)
require.NoError(h, err, "failed to assert num of channel updates")
return anns
}
// AssertNumChannelUpdates asserts that a given number of channel updates has
// been seen in the specified node's network topology.
func (h *HarnessTest) AssertNumChannelUpdates(hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint, num int) {
op := h.OutPointFromChannelPoint(chanPoint)
err := hn.Watcher.WaitForNumChannelUpdates(op, num)
require.NoError(h, err, "failed to assert num of channel updates")
}

View File

@ -376,3 +376,10 @@ func (h *HarnessMiner) GetNumTxsFromMempool(n int) []*wire.MsgTx {
return txes
}
// NewMinerAddress creates a new address for the miner and asserts.
func (h *HarnessMiner) NewMinerAddress() btcutil.Address {
addr, err := h.NewAddress()
require.NoError(h, err, "failed to create new miner address")
return addr
}

View File

@ -156,7 +156,7 @@ func (nm *nodeManager) restartNode(ctxt context.Context, node *node.HarnessNode,
}
if len(chanBackups) != 0 {
unlockReq.ChannelBackups = chanBackups[0]
unlockReq.RecoveryWindow = 1000
unlockReq.RecoveryWindow = 100
}
err = wait.NoError(func() error {

View File

@ -166,6 +166,7 @@ func (cfg *BaseNodeConfig) GenArgs() []string {
"--debuglevel=debug",
"--bitcoin.defaultchanconfs=1",
"--accept-keysend",
"--keep-failed-payment-attempts",
fmt.Sprintf("--db.batch-commit-interval=%v", commitInterval),
fmt.Sprintf("--bitcoin.defaultremotedelay=%v",
lntest.DefaultCSV),

View File

@ -103,20 +103,6 @@ type invoiceCount struct {
LastIndexOffset uint64
}
// balanceCount provides a summary over balances related to channels.
type balanceCount struct {
LocalBalance *lnrpc.Amount
RemoteBalance *lnrpc.Amount
UnsettledLocalBalance *lnrpc.Amount
UnsettledRemoteBalance *lnrpc.Amount
PendingOpenLocalBalance *lnrpc.Amount
PendingOpenRemoteBalance *lnrpc.Amount
// Deprecated fields.
Balance int64
PendingOpenBalance int64
}
// walletBalance provides a summary over balances related the node's wallet.
type walletBalance struct {
TotalBalance int64
@ -139,9 +125,6 @@ type State struct {
// CloseChannel gives the summary of close channel related counts.
CloseChannel closedChannelCount
// Balance gives the summary of the channel balance.
Balance balanceCount
// Wallet gives the summary of the wallet balance.
Wallet walletBalance
@ -315,18 +298,6 @@ func (s *State) updateEdgeStats() {
s.Edge.Public = len(resp.Edges)
}
// updateChannelBalance creates stats for the node's channel balance.
func (s *State) updateChannelBalance() {
resp := s.rpc.ChannelBalance()
s.Balance.LocalBalance = resp.LocalBalance
s.Balance.RemoteBalance = resp.RemoteBalance
s.Balance.UnsettledLocalBalance = resp.UnsettledLocalBalance
s.Balance.UnsettledRemoteBalance = resp.UnsettledRemoteBalance
s.Balance.PendingOpenLocalBalance = resp.PendingOpenLocalBalance
s.Balance.PendingOpenRemoteBalance = resp.PendingOpenRemoteBalance
}
// updateWalletBalance creates stats for the node's wallet balance.
func (s *State) updateWalletBalance() {
resp := s.rpc.WalletBalance()
@ -345,7 +316,6 @@ func (s *State) updateState() {
s.updateInvoiceStats()
s.updateUTXOStats()
s.updateEdgeStats()
s.updateChannelBalance()
s.updateWalletBalance()
}

View File

@ -124,10 +124,12 @@ func (nw *nodeWatcher) WaitForNumChannelUpdates(op wire.OutPoint,
// WaitForNumNodeUpdates will block until a given number of node updates has
// been seen in the node's network topology.
func (nw *nodeWatcher) WaitForNumNodeUpdates(pubkey string,
expected int) error {
expected int) ([]*lnrpc.NodeUpdate, error) {
updates := make([]*lnrpc.NodeUpdate, 0)
checkNumUpdates := func() error {
num := len(nw.GetNodeUpdates(pubkey))
updates = nw.GetNodeUpdates(pubkey)
num := len(updates)
if num >= expected {
return nil
}
@ -136,7 +138,9 @@ func (nw *nodeWatcher) WaitForNumNodeUpdates(pubkey string,
"want %d, got %d", expected, num)
}
return wait.NoError(checkNumUpdates, DefaultTimeout)
err := wait.NoError(checkNumUpdates, DefaultTimeout)
return updates, err
}
// WaitForChannelOpen will block until a channel with the target outpoint is
@ -559,7 +563,7 @@ func (nw *nodeWatcher) handlePolicyUpdateWatchRequest(req *chanWatchRequest) {
// Check if the latest policy is matched.
policy := policies[len(policies)-1]
if checkChannelPolicy(policy.RoutingPolicy, req.policy) == nil {
if CheckChannelPolicy(policy.RoutingPolicy, req.policy) == nil {
close(req.eventChan)
return
}
@ -653,8 +657,8 @@ func (nw *nodeWatcher) getChannelPolicies(include bool) policyUpdateMap {
return policyUpdates
}
// checkChannelPolicy checks that the policy matches the expected one.
func checkChannelPolicy(policy, expectedPolicy *lnrpc.RoutingPolicy) error {
// CheckChannelPolicy checks that the policy matches the expected one.
func CheckChannelPolicy(policy, expectedPolicy *lnrpc.RoutingPolicy) error {
if policy.FeeBaseMsat != expectedPolicy.FeeBaseMsat {
return fmt.Errorf("expected base fee %v, got %v",
expectedPolicy.FeeBaseMsat, policy.FeeBaseMsat)

View File

@ -2,6 +2,7 @@ package rpc
import (
"context"
"strings"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/stretchr/testify/require"
@ -98,6 +99,18 @@ func (h *HarnessRPC) ConnectPeer(
return resp
}
// ConnectPeerAssertErr makes a RPC call to ConnectPeer and asserts an error
// returned.
func (h *HarnessRPC) ConnectPeerAssertErr(req *lnrpc.ConnectPeerRequest) error {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.LN.ConnectPeer(ctxt, req)
require.Error(h, err, "expected an error from ConnectPeer")
return err
}
// ListChannels list the channels for the given node and asserts it's
// successful.
func (h *HarnessRPC) ListChannels(
@ -120,6 +133,29 @@ func (h *HarnessRPC) PendingChannels() *lnrpc.PendingChannelsResponse {
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
resp, err := h.LN.PendingChannels(ctxt, pendingChansRequest)
// TODO(yy): We may get a `unable to find arbitrator` error from the
// rpc point, due to a timing issue in rpcserver,
// 1. `r.server.chanStateDB.FetchClosedChannels` fetches
// the pending force close channel.
// 2. `r.arbitratorPopulateForceCloseResp` relies on the
// channel arbitrator to get the report, and,
// 3. the arbitrator may be deleted due to the force close
// channel being resolved.
// Somewhere along the line is missing a lock to keep the data
// consistent.
//
// Return if there's no error.
if err == nil {
return resp
}
// Otherwise, give it a second shot if it's the arbitrator error.
if strings.Contains(err.Error(), "unable to find arbitrator") {
resp, err = h.LN.PendingChannels(ctxt, pendingChansRequest)
}
// It's very unlikely we'd get the arbitrator not found error again.
h.NoError(err, "PendingChannels")
return resp
@ -319,3 +355,215 @@ func (h *HarnessRPC) ChannelAcceptor() (AcceptorClient, context.CancelFunc) {
return resp, cancel
}
// SendCoins sends a given amount of money to the specified address from the
// passed node.
func (h *HarnessRPC) SendCoins(
req *lnrpc.SendCoinsRequest) *lnrpc.SendCoinsResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.LN.SendCoins(ctxt, req)
h.NoError(err, "SendCoins")
return resp
}
// SendCoinsAssertErr sends a given amount of money to the specified address
// from the passed node and asserts an error has returned.
func (h *HarnessRPC) SendCoinsAssertErr(req *lnrpc.SendCoinsRequest) {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.LN.SendCoins(ctxt, req)
require.Error(h, err, "node %s didn't not return an error", h.Name)
}
// GetTransactions makes a RPC call to GetTransactions and asserts.
func (h *HarnessRPC) GetTransactions(
req *lnrpc.GetTransactionsRequest) *lnrpc.TransactionDetails {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
if req == nil {
req = &lnrpc.GetTransactionsRequest{}
}
resp, err := h.LN.GetTransactions(ctxt, req)
h.NoError(err, "GetTransactions")
return resp
}
// SignMessage makes a RPC call to node's SignMessage and asserts.
func (h *HarnessRPC) SignMessage(msg []byte) *lnrpc.SignMessageResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &lnrpc.SignMessageRequest{Msg: msg}
resp, err := h.LN.SignMessage(ctxt, req)
h.NoError(err, "SignMessage")
return resp
}
// VerifyMessage makes a RPC call to node's VerifyMessage and asserts.
func (h *HarnessRPC) VerifyMessage(msg []byte,
sig string) *lnrpc.VerifyMessageResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &lnrpc.VerifyMessageRequest{Msg: msg, Signature: sig}
resp, err := h.LN.VerifyMessage(ctxt, req)
h.NoError(err, "VerifyMessage")
return resp
}
// GetRecoveryInfo uses the specified node to make a RPC call to
// GetRecoveryInfo and asserts.
func (h *HarnessRPC) GetRecoveryInfo(
req *lnrpc.GetRecoveryInfoRequest) *lnrpc.GetRecoveryInfoResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
if req == nil {
req = &lnrpc.GetRecoveryInfoRequest{}
}
resp, err := h.LN.GetRecoveryInfo(ctxt, req)
h.NoError(err, "GetRecoveryInfo")
return resp
}
// BatchOpenChannel makes a RPC call to BatchOpenChannel and asserts.
func (h *HarnessRPC) BatchOpenChannel(
req *lnrpc.BatchOpenChannelRequest) *lnrpc.BatchOpenChannelResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.LN.BatchOpenChannel(ctxt, req)
h.NoError(err, "BatchOpenChannel")
return resp
}
// BatchOpenChannelAssertErr makes a RPC call to BatchOpenChannel and asserts
// there's an error returned.
func (h *HarnessRPC) BatchOpenChannelAssertErr(
req *lnrpc.BatchOpenChannelRequest) error {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.LN.BatchOpenChannel(ctxt, req)
require.Error(h, err, "expecte batch open channel fail")
return err
}
// QueryRoutes makes a RPC call to QueryRoutes and asserts.
func (h *HarnessRPC) QueryRoutes(
req *lnrpc.QueryRoutesRequest) *lnrpc.QueryRoutesResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
routes, err := h.LN.QueryRoutes(ctxt, req)
h.NoError(err, "QueryRoutes")
return routes
}
// SendToRoute makes a RPC call to SendToRoute and asserts.
func (h *HarnessRPC) SendToRoute() lnrpc.Lightning_SendToRouteClient {
// SendToRoute needs to have the context alive for the entire test case
// as the returned client will be used for send and receive payment
// stream. Thus we use runCtx here instead of a timeout context.
client, err := h.LN.SendToRoute(h.runCtx)
h.NoError(err, "SendToRoute")
return client
}
// SendToRouteSync makes a RPC call to SendToRouteSync and asserts.
func (h *HarnessRPC) SendToRouteSync(
req *lnrpc.SendToRouteRequest) *lnrpc.SendResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.LN.SendToRouteSync(ctxt, req)
h.NoError(err, "SendToRouteSync")
return resp
}
// UpdateChannelPolicy makes a RPC call to UpdateChannelPolicy and asserts.
func (h *HarnessRPC) UpdateChannelPolicy(
req *lnrpc.PolicyUpdateRequest) *lnrpc.PolicyUpdateResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.LN.UpdateChannelPolicy(ctxt, req)
h.NoError(err, "UpdateChannelPolicy")
return resp
}
type InvoiceUpdateClient lnrpc.Lightning_SubscribeInvoicesClient
// SubscribeInvoices creates a subscription client for invoice events and
// asserts its creation.
//
// NOTE: make sure to subscribe an invoice as early as possible as it takes
// some time for the lnd to create the subscription client. If an invoice is
// added right after the subscription, it may be missed. However, if AddIndex
// or SettleIndex is used in the request, it will be fine as a backlog will
// always be sent.
func (h *HarnessRPC) SubscribeInvoices(
req *lnrpc.InvoiceSubscription) InvoiceUpdateClient {
// SubscribeInvoices needs to have the context alive for the
// entire test case as the returned client will be used for send and
// receive events stream. Thus we use runCtx here instead of a timeout
// context.
client, err := h.LN.SubscribeInvoices(h.runCtx, req)
h.NoError(err, "SubscribeInvoices")
return client
}
type BackupSubscriber lnrpc.Lightning_SubscribeChannelBackupsClient
// SubscribeChannelBackups creates a client to listen to channel backup stream.
func (h *HarnessRPC) SubscribeChannelBackups() BackupSubscriber {
// Use runCtx here instead of timeout context to keep the stream client
// alive.
backupStream, err := h.LN.SubscribeChannelBackups(
h.runCtx, &lnrpc.ChannelBackupSubscription{},
)
h.NoError(err, "SubscribeChannelBackups")
return backupStream
}
// VerifyChanBackup makes a RPC call to node's VerifyChanBackup and asserts.
func (h *HarnessRPC) VerifyChanBackup(
ss *lnrpc.ChanBackupSnapshot) *lnrpc.VerifyChanBackupResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.LN.VerifyChanBackup(ctxt, ss)
h.NoError(err, "VerifyChanBackup")
return resp
}

View File

@ -1,5 +1,39 @@
package rpc
import (
"context"
"github.com/lightningnetwork/lnd/lnrpc/peersrpc"
"github.com/stretchr/testify/require"
)
// =====================
// PeerClient related RPCs.
// =====================
type (
AnnReq *peersrpc.NodeAnnouncementUpdateRequest
AnnResp *peersrpc.NodeAnnouncementUpdateResponse
)
// UpdateNodeAnnouncement makes an UpdateNodeAnnouncement RPC call the the
// peersrpc client and asserts.
func (h *HarnessRPC) UpdateNodeAnnouncement(req AnnReq) AnnResp {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Peer.UpdateNodeAnnouncement(ctxt, req)
h.NoError(err, "UpdateNodeAnnouncement")
return resp
}
// UpdateNodeAnnouncementErr makes an UpdateNodeAnnouncement RPC call the the
// peersrpc client and asserts an error is returned.
func (h *HarnessRPC) UpdateNodeAnnouncementErr(req AnnReq) {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.Peer.UpdateNodeAnnouncement(ctxt, req)
require.Error(h, err, "expect an error from update announcement")
}

View File

@ -5,6 +5,7 @@ import (
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/stretchr/testify/require"
)
// =====================
@ -73,3 +74,78 @@ func (h *HarnessRPC) FinalizePsbt(
return resp
}
// LabelTransactionAssertErr makes a RPC call to the node's LabelTransaction
// and asserts an error is returned. It then returns the error.
func (h *HarnessRPC) LabelTransactionAssertErr(
req *walletrpc.LabelTransactionRequest) error {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.WalletKit.LabelTransaction(ctxt, req)
require.Error(h, err, "expected error returned")
return err
}
// LabelTransaction makes a RPC call to the node's LabelTransaction
// and asserts no error is returned.
func (h *HarnessRPC) LabelTransaction(req *walletrpc.LabelTransactionRequest) {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.WalletKit.LabelTransaction(ctxt, req)
h.NoError(err, "LabelTransaction")
}
// DeriveNextKey makes a RPC call to the DeriveNextKey and asserts.
func (h *HarnessRPC) DeriveNextKey(
req *walletrpc.KeyReq) *signrpc.KeyDescriptor {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
key, err := h.WalletKit.DeriveNextKey(ctxt, req)
h.NoError(err, "DeriveNextKey")
return key
}
// ListAddresses makes a RPC call to the ListAddresses and asserts.
func (h *HarnessRPC) ListAddresses(
req *walletrpc.ListAddressesRequest) *walletrpc.ListAddressesResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
key, err := h.WalletKit.ListAddresses(ctxt, req)
h.NoError(err, "ListAddresses")
return key
}
// ListSweeps makes a ListSweeps RPC call to the node's WalletKit client.
func (h *HarnessRPC) ListSweeps(verbose bool) *walletrpc.ListSweepsResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &walletrpc.ListSweepsRequest{Verbose: verbose}
resp, err := h.WalletKit.ListSweeps(ctxt, req)
h.NoError(err, "ListSweeps")
return resp
}
// PendingSweeps makes a RPC call to the node's WalletKitClient and asserts.
func (h *HarnessRPC) PendingSweeps() *walletrpc.PendingSweepsResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &walletrpc.PendingSweepsRequest{}
resp, err := h.WalletKit.PendingSweeps(ctxt, req)
h.NoError(err, "PendingSweeps")
return resp
}

View File

@ -5,7 +5,11 @@ import (
"io"
"math"
"os"
"strconv"
"strings"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
)
@ -53,3 +57,68 @@ func errNumNotMatched(name string, subject string,
return fmt.Errorf("%s: assert %s failed: want %d, got: %d, total: "+
"%d, previously had: %d", name, subject, want, got, total, old)
}
// parseDerivationPath parses a path in the form of m/x'/y'/z'/a/b into a slice
// of [x, y, z, a, b], meaning that the apostrophe is ignored and 2^31 is _not_
// added to the numbers.
func ParseDerivationPath(path string) ([]uint32, error) {
path = strings.TrimSpace(path)
if len(path) == 0 {
return nil, fmt.Errorf("path cannot be empty")
}
if !strings.HasPrefix(path, "m/") {
return nil, fmt.Errorf("path must start with m/")
}
// Just the root key, no path was provided. This is valid but not useful
// in most cases.
rest := strings.ReplaceAll(path, "m/", "")
if rest == "" {
return []uint32{}, nil
}
parts := strings.Split(rest, "/")
indices := make([]uint32, len(parts))
for i := 0; i < len(parts); i++ {
part := parts[i]
if strings.Contains(parts[i], "'") {
part = strings.TrimRight(parts[i], "'")
}
parsed, err := strconv.ParseInt(part, 10, 32)
if err != nil {
return nil, fmt.Errorf("could not parse part \"%s\": "+
"%v", part, err)
}
indices[i] = uint32(parsed)
}
return indices, nil
}
// ChanPointFromPendingUpdate constructs a channel point from a lnrpc pending
// update.
func ChanPointFromPendingUpdate(pu *lnrpc.PendingUpdate) *lnrpc.ChannelPoint {
chanPoint := &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
FundingTxidBytes: pu.Txid,
},
OutputIndex: pu.OutputIndex,
}
return chanPoint
}
// channelPointStr returns the string representation of the channel's
// funding transaction.
func channelPointStr(chanPoint *lnrpc.ChannelPoint) string {
fundingTxID, err := lnrpc.GetChanPointFundingTxid(chanPoint)
if err != nil {
return ""
}
cp := wire.OutPoint{
Hash: *fundingTxID,
Index: chanPoint.OutputIndex,
}
return cp.String()
}

View File

@ -16,9 +16,7 @@ import (
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/peersrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
@ -816,100 +814,6 @@ func checkPendingHtlcStageAndMaturity(
return nil
}
// assertReports checks that the count of resolutions we have present per
// type matches a set of expected resolutions.
func assertReports(t *harnessTest, node *lntest.HarnessNode,
channelPoint wire.OutPoint, expected map[string]*lnrpc.Resolution) {
// Get our node's closed channels.
ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
closed, err := node.ClosedChannels(
ctxt, &lnrpc.ClosedChannelsRequest{},
)
require.NoError(t.t, err)
var resolutions []*lnrpc.Resolution
for _, close := range closed.Channels {
if close.ChannelPoint == channelPoint.String() {
resolutions = close.Resolutions
break
}
}
require.NotNil(t.t, resolutions)
require.Equal(t.t, len(expected), len(resolutions))
for _, res := range resolutions {
outPointStr := fmt.Sprintf("%v:%v", res.Outpoint.TxidStr,
res.Outpoint.OutputIndex)
expected, ok := expected[outPointStr]
require.True(t.t, ok)
require.Equal(t.t, expected, res)
}
}
// assertSweepFound looks up a sweep in a nodes list of broadcast sweeps.
func assertSweepFound(t *testing.T, node *lntest.HarnessNode,
sweep string, verbose bool) {
// List all sweeps that alice's node had broadcast.
ctxb := context.Background()
ctx, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
sweepResp, err := node.WalletKitClient.ListSweeps(
ctx, &walletrpc.ListSweepsRequest{
Verbose: verbose,
},
)
require.NoError(t, err)
var found bool
if verbose {
found = findSweepInDetails(t, sweep, sweepResp)
} else {
found = findSweepInTxids(t, sweep, sweepResp)
}
require.True(t, found, "sweep: %v not found", sweep)
}
func findSweepInTxids(t *testing.T, sweepTxid string,
sweepResp *walletrpc.ListSweepsResponse) bool {
sweepTxIDs := sweepResp.GetTransactionIds()
require.NotNil(t, sweepTxIDs, "expected transaction ids")
require.Nil(t, sweepResp.GetTransactionDetails())
// Check that the sweep tx we have just produced is present.
for _, tx := range sweepTxIDs.TransactionIds {
if tx == sweepTxid {
return true
}
}
return false
}
func findSweepInDetails(t *testing.T, sweepTxid string,
sweepResp *walletrpc.ListSweepsResponse) bool {
sweepDetails := sweepResp.GetTransactionDetails()
require.NotNil(t, sweepDetails, "expected transaction details")
require.Nil(t, sweepResp.GetTransactionIds())
for _, tx := range sweepDetails.Transactions {
if tx.TxHash == sweepTxid {
return true
}
}
return false
}
// assertAmountSent generates a closure which queries listchannels for sndr and
// rcvr, and asserts that sndr sent amt satoshis, and that rcvr received amt
// satoshis.
@ -977,34 +881,6 @@ func assertLastHTLCError(t *harnessTest, node *lntest.HarnessNode,
require.Equal(t.t, code, htlc.Failure.Code, "unexpected failure code")
}
func assertChannelConstraintsEqual(
t *harnessTest, want, got *lnrpc.ChannelConstraints) {
t.t.Helper()
require.Equal(t.t, want.CsvDelay, got.CsvDelay, "CsvDelay mismatched")
require.Equal(
t.t, want.ChanReserveSat, got.ChanReserveSat,
"ChanReserveSat mismatched",
)
require.Equal(
t.t, want.DustLimitSat, got.DustLimitSat,
"DustLimitSat mismatched",
)
require.Equal(
t.t, want.MaxPendingAmtMsat, got.MaxPendingAmtMsat,
"MaxPendingAmtMsat mismatched",
)
require.Equal(
t.t, want.MinHtlcMsat, got.MinHtlcMsat,
"MinHtlcMsat mismatched",
)
require.Equal(
t.t, want.MaxAcceptedHtlcs, got.MaxAcceptedHtlcs,
"MaxAcceptedHtlcs mismatched",
)
}
// assertAmountPaid checks that the ListChannels command of the provided
// node list the total amount sent and received as expected for the
// provided channel.
@ -1199,28 +1075,6 @@ func assertNodeNumChannels(t *harnessTest, node *lntest.HarnessNode,
)
}
func assertSyncType(t *harnessTest, node *lntest.HarnessNode,
peer string, syncType lnrpc.Peer_SyncType) {
t.t.Helper()
ctxb := context.Background()
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
resp, err := node.ListPeers(ctxt, &lnrpc.ListPeersRequest{})
require.NoError(t.t, err)
for _, rpcPeer := range resp.Peers {
if rpcPeer.PubKey != peer {
continue
}
require.Equal(t.t, syncType, rpcPeer.SyncType)
return
}
t.t.Fatalf("unable to find peer: %s", peer)
}
// assertActiveHtlcs makes sure all the passed nodes have the _exact_ HTLCs
// matching payHashes on _all_ their channels.
func assertActiveHtlcs(nodes []*lntest.HarnessNode, payHashes ...[]byte) error {
@ -1614,27 +1468,3 @@ func assertNodeAnnouncement(t *harnessTest, n1, n2 *lnrpc.NodeUpdate) {
}
}
}
// assertUpdateNodeAnnouncementResponse is a helper function to assert
// the response expected values.
func assertUpdateNodeAnnouncementResponse(t *harnessTest,
response *peersrpc.NodeAnnouncementUpdateResponse,
expectedOps map[string]int) {
require.Equal(
t.t, len(response.Ops), len(expectedOps),
"unexpected number of Ops updating dave's node announcement",
)
ops := make(map[string]int, len(response.Ops))
for _, op := range response.Ops {
ops[op.Entity] = len(op.Actions)
}
for k, v := range expectedOps {
if v != ops[k] {
t.Fatalf("unexpected number of actions for operation "+
"%s: got %d wanted %d", k, ops[k], v)
}
}
}

View File

@ -71,4 +71,152 @@ var allTestCasesTemp = []*lntemp.TestCase{
Name: "data loss protection",
TestFunc: testDataLossProtection,
},
{
Name: "sweep coins",
TestFunc: testSweepAllCoins,
},
{
Name: "disconnecting target peer",
TestFunc: testDisconnectingTargetPeer,
},
{
Name: "sphinx replay persistence",
TestFunc: testSphinxReplayPersistence,
},
{
Name: "list channels",
TestFunc: testListChannels,
},
{
Name: "max pending channel",
TestFunc: testMaxPendingChannels,
},
{
Name: "garbage collect link nodes",
TestFunc: testGarbageCollectLinkNodes,
},
{
Name: "reject onward htlc",
TestFunc: testRejectHTLC,
},
{
Name: "node sign verify",
TestFunc: testNodeSignVerify,
},
{
Name: "list addresses",
TestFunc: testListAddresses,
},
{
Name: "abandonchannel",
TestFunc: testAbandonChannel,
},
{
Name: "recovery info",
TestFunc: testGetRecoveryInfo,
},
{
Name: "onchain fund recovery",
TestFunc: testOnchainFundRecovery,
},
{
Name: "basic funding flow with all input types",
TestFunc: testChannelFundingInputTypes,
},
{
Name: "unconfirmed channel funding",
TestFunc: testUnconfirmedChannelFunding,
},
{
Name: "funding flow persistence",
TestFunc: testChannelFundingPersistence,
},
{
Name: "batch channel funding",
TestFunc: testBatchChanFunding,
},
{
Name: "update channel policy",
TestFunc: testUpdateChannelPolicy,
},
{
Name: "send update disable channel",
TestFunc: testSendUpdateDisableChannel,
},
{
Name: "private channel update policy",
TestFunc: testUpdateChannelPolicyForPrivateChannel,
},
{
Name: "update channel policy fee rate accuracy",
TestFunc: testUpdateChannelPolicyFeeRateAccuracy,
},
{
Name: "connection timeout",
TestFunc: testNetworkConnectionTimeout,
},
{
Name: "reconnect after ip change",
TestFunc: testReconnectAfterIPChange,
},
{
Name: "addpeer config",
TestFunc: testAddPeerConfig,
},
{
Name: "unannounced channels",
TestFunc: testUnannouncedChannels,
},
{
Name: "graph topology notifications",
TestFunc: testGraphTopologyNotifications,
},
{
Name: "node announcement",
TestFunc: testNodeAnnouncement,
},
{
Name: "update node announcement rpc",
TestFunc: testUpdateNodeAnnouncement,
},
{
Name: "list outgoing payments",
TestFunc: testListPayments,
},
{
Name: "immediate payment after channel opened",
TestFunc: testPaymentFollowingChannelOpen,
},
{
Name: "invoice update subscription",
TestFunc: testInvoiceSubscriptions,
},
{
Name: "streaming channel backup update",
TestFunc: testChannelBackupUpdates,
},
{
Name: "export channel backup",
TestFunc: testExportChannelBackup,
},
{
Name: "channel balance",
TestFunc: testChannelBalance,
},
{
Name: "channel unsettled balance",
TestFunc: testChannelUnsettledBalance,
},
{
Name: "commitment deadline",
TestFunc: testCommitmentTransactionDeadline,
},
{
Name: "channel force closure",
TestFunc: testChannelForceClosure,
},
{
Name: "failing link",
TestFunc: testFailingChannel,
},
}

View File

@ -20,7 +20,6 @@ import (
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
@ -40,6 +39,10 @@ type (
password []byte, mnemonic []string) nodeRestorer
)
// revocationWindow is used when we specify the revocation window used when
// restoring node.
const revocationWindow = 100
// chanRestoreScenario represents a test case used by testing the channel
// restore methods.
type chanRestoreScenario struct {
@ -310,7 +313,8 @@ func testChannelBackupRestoreBasic(ht *lntemp.HarnessTest) {
return func() *node.HarnessNode {
return st.RestoreNodeWithSeed(
"dave", nil, password, mnemonic,
"", 1000, backupSnapshot,
"", revocationWindow,
backupSnapshot,
copyPorts(oldNode),
)
}
@ -342,7 +346,7 @@ func testChannelBackupRestoreBasic(ht *lntemp.HarnessTest) {
return func() *node.HarnessNode {
newNode := st.RestoreNodeWithSeed(
"dave", nil, password, mnemonic,
"", 1000, nil,
"", revocationWindow, nil,
copyPorts(oldNode),
)
@ -378,7 +382,7 @@ func testChannelBackupRestoreBasic(ht *lntemp.HarnessTest) {
return func() *node.HarnessNode {
newNode := st.RestoreNodeWithSeed(
"dave", nil, password, mnemonic,
"", 1000, nil,
"", revocationWindow, nil,
copyPorts(oldNode),
)
@ -666,11 +670,16 @@ func testChannelBackupRestoreLegacy(ht *lntemp.HarnessTest) {
func testChannelBackupRestoreForceClose(ht *lntemp.HarnessTest) {
// Restore a channel that was force closed by dave just before going
// offline.
ht.Run("from backup file anchors", func(t *testing.T) {
success := ht.Run("from backup file anchors", func(t *testing.T) {
st := ht.Subtest(t)
runChanRestoreScenarioForceClose(st, false)
})
// Only run the second test if the first passed.
if !success {
return
}
// Restore a zero-conf anchors channel that was force closed by dave
// just before going offline.
ht.Run("from backup file anchors w/ zero-conf", func(t *testing.T) {
@ -782,12 +791,12 @@ func runChanRestoreScenarioForceClose(ht *lntemp.HarnessTest, zeroConf bool) {
// testChannelBackupUpdates tests that both the streaming channel update RPC,
// and the on-disk channel.backup are updated each time a channel is
// opened/closed.
func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testChannelBackupUpdates(ht *lntemp.HarnessTest) {
alice := ht.Alice
// First, we'll make a temp directory that we'll use to store our
// backup file, so we can check in on it during the test easily.
backupDir := t.t.TempDir()
backupDir := ht.T.TempDir()
// First, we'll create a new node, Carol. We'll also create a temporary
// file that Carol will use to store her channel backups.
@ -795,17 +804,11 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
backupDir, chanbackup.DefaultBackupFileName,
)
carolArgs := fmt.Sprintf("--backupfilepath=%v", backupFilePath)
carol := net.NewNode(t.t, "carol", []string{carolArgs})
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("carol", []string{carolArgs})
// Next, we'll register for streaming notifications for changes to the
// backup file.
backupStream, err := carol.SubscribeChannelBackups(
ctxb, &lnrpc.ChannelBackupSubscription{},
)
if err != nil {
t.Fatalf("unable to create backup stream: %v", err)
}
backupStream := carol.RPC.SubscribeChannelBackups()
// We'll use this goroutine to proxy any updates to a channel we can
// easily use below.
@ -838,18 +841,16 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
// With Carol up, we'll now connect her to Alice, and open a channel
// between them.
net.ConnectNodes(t.t, carol, net.Alice)
ht.ConnectNodes(carol, alice)
// Next, we'll open two channels between Alice and Carol back to back.
var chanPoints []*lnrpc.ChannelPoint
numChans := 2
chanAmt := btcutil.Amount(1000000)
for i := 0; i < numChans; i++ {
chanPoint := openChannelAndAssert(
t, net, net.Alice, carol,
lntest.OpenChannelParams{Amt: chanAmt},
chanPoint := ht.OpenChannel(
alice, carol, lntemp.OpenChannelParams{Amt: chanAmt},
)
chanPoints = append(chanPoints, chanPoint)
}
@ -860,12 +861,14 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
for i := 0; i < numNtfns; i++ {
select {
case err := <-streamErr:
t.Fatalf("error with backup stream: %v", err)
require.Failf(ht, "stream err",
"error with backup stream: %v", err)
case currentBackup = <-backupUpdates:
case <-time.After(time.Second * 5):
t.Fatalf("didn't receive channel backup "+
require.Failf(ht, "timeout", "didn't "+
"receive channel backup "+
"notification %v", i+1)
}
}
@ -885,32 +888,29 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
// nonce, we can't compare them directly, so instead
// we'll compare the length which is a proxy for the
// number of channels that the multi-backup contains.
rawBackup := currentBackup.MultiChanBackup.MultiChanBackup
if len(rawBackup) != len(packedBackup) {
backup := currentBackup.MultiChanBackup.MultiChanBackup
if len(backup) != len(packedBackup) {
return fmt.Errorf("backup files don't match: "+
"expected %x got %x", rawBackup, packedBackup)
"expected %x got %x", backup,
packedBackup)
}
// Additionally, we'll assert that both backups up
// returned are valid.
for i, backup := range [][]byte{rawBackup, packedBackup} {
for _, backup := range [][]byte{backup, packedBackup} {
snapshot := &lnrpc.ChanBackupSnapshot{
MultiChanBackup: &lnrpc.MultiChanBackup{
MultiChanBackup: backup,
},
}
_, err := carol.VerifyChanBackup(ctxb, snapshot)
if err != nil {
return fmt.Errorf("unable to verify "+
"backup #%d: %v", i, err)
}
carol.RPC.VerifyChanBackup(snapshot)
}
return nil
}, defaultTimeout)
if err != nil {
t.Fatalf("backup state invalid: %v", err)
}
require.NoError(ht, err, "timeout while checking "+
"backup state: %v", err)
}
// As these two channels were just opened, we should've got two times
@ -931,11 +931,11 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
chanPoint := chanPoints[i]
closeChannelAndAssert(t, net, net.Alice, chanPoint, forceClose)
// If we force closed the channel, then we'll mine enough
// blocks to ensure all outputs have been swept.
if forceClose {
ht.ForceCloseChannel(alice, chanPoint)
// A local force closed channel will trigger a
// notification once the commitment TX confirms on
// chain. But that won't remove the channel from the
@ -943,13 +943,12 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
// locked contract was fully resolved on chain.
assertBackupNtfns(1)
cleanupForceClose(t, net, net.Alice, chanPoint)
// Now that the channel's been fully resolved, we expect
// another notification.
// Now that the channel's been fully resolved, we
// expect another notification.
assertBackupNtfns(1)
assertBackupFileState()
} else {
ht.CloseChannel(alice, chanPoint)
// We should get a single notification after closing,
// and the on-disk state should match this latest
// notifications.
@ -962,73 +961,49 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
// testExportChannelBackup tests that we're able to properly export either a
// targeted channel's backup, or export backups of all the currents open
// channels.
func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testExportChannelBackup(ht *lntemp.HarnessTest) {
// First, we'll create our primary test node: Carol. We'll use Carol to
// open channels and also export backups that we'll examine throughout
// the test.
carol := net.NewNode(t.t, "carol", nil)
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("carol", nil)
// With Carol up, we'll now connect her to Alice, and open a channel
// between them.
net.ConnectNodes(t.t, carol, net.Alice)
alice := ht.Alice
ht.ConnectNodes(carol, alice)
// Next, we'll open two channels between Alice and Carol back to back.
var chanPoints []*lnrpc.ChannelPoint
numChans := 2
chanAmt := btcutil.Amount(1000000)
for i := 0; i < numChans; i++ {
chanPoint := openChannelAndAssert(
t, net, net.Alice, carol,
lntest.OpenChannelParams{Amt: chanAmt},
chanPoint := ht.OpenChannel(
alice, carol, lntemp.OpenChannelParams{Amt: chanAmt},
)
chanPoints = append(chanPoints, chanPoint)
}
// Now that the channels are open, we should be able to fetch the
// backups of each of the channels.
for _, chanPoint := range chanPoints {
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
req := &lnrpc.ExportChannelBackupRequest{
ChanPoint: chanPoint,
}
chanBackup, err := carol.ExportChannelBackup(ctxt, req)
if err != nil {
t.Fatalf("unable to fetch backup for channel %v: %v",
chanPoint, err)
}
chanBackup := carol.RPC.ExportChanBackup(chanPoint)
// The returned backup should be full populated. Since it's
// encrypted, we can't assert any more than that atm.
if len(chanBackup.ChanBackup) == 0 {
t.Fatalf("obtained empty backup for channel: %v", chanPoint)
}
require.NotEmptyf(ht, chanBackup.ChanBackup,
"obtained empty backup for channel: %v", chanPoint)
// The specified chanPoint in the response should match our
// requested chanPoint.
if chanBackup.ChanPoint.String() != chanPoint.String() {
t.Fatalf("chanPoint mismatched: expected %v, got %v",
chanPoint.String(),
chanBackup.ChanPoint.String())
}
require.Equal(ht, chanBackup.ChanPoint.String(),
chanPoint.String())
}
// Before we proceed, we'll make two utility methods we'll use below
// for our primary assertions.
assertNumSingleBackups := func(numSingles int) {
err := wait.NoError(func() error {
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
req := &lnrpc.ChanBackupExportRequest{}
chanSnapshot, err := carol.ExportAllChannelBackups(
ctxt, req,
)
if err != nil {
return fmt.Errorf("unable to export channel "+
"backup: %v", err)
}
chanSnapshot := carol.RPC.ExportAllChanBackups()
if chanSnapshot.SingleChanBackups == nil {
return fmt.Errorf("single chan backups not " +
@ -1043,29 +1018,23 @@ func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) {
return nil
}, defaultTimeout)
if err != nil {
t.Fatalf(err.Error())
}
require.NoError(ht, err, "timeout checking num single backup")
}
assertMultiBackupFound := func() func(bool, map[wire.OutPoint]struct{}) {
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
req := &lnrpc.ChanBackupExportRequest{}
chanSnapshot, err := carol.ExportAllChannelBackups(ctxt, req)
if err != nil {
t.Fatalf("unable to export channel backup: %v", err)
}
chanSnapshot := carol.RPC.ExportAllChanBackups()
return func(found bool, chanPoints map[wire.OutPoint]struct{}) {
switch {
case found && chanSnapshot.MultiChanBackup == nil:
t.Fatalf("multi-backup not present")
require.Fail(ht, "multi-backup not present")
case !found && chanSnapshot.MultiChanBackup != nil &&
(len(chanSnapshot.MultiChanBackup.MultiChanBackup) !=
chanbackup.NilMultiSizePacked):
t.Fatalf("found multi-backup when non should " +
"be found")
require.Fail(ht, "found multi-backup when "+
"non should be found")
}
if !found {
@ -1073,23 +1042,20 @@ func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) {
}
backedUpChans := chanSnapshot.MultiChanBackup.ChanPoints
if len(chanPoints) != len(backedUpChans) {
t.Fatalf("expected %v chans got %v", len(chanPoints),
len(backedUpChans))
}
require.Len(ht, backedUpChans, len(chanPoints))
for _, chanPoint := range backedUpChans {
wirePoint := rpcPointToWirePoint(t, chanPoint)
if _, ok := chanPoints[wirePoint]; !ok {
t.Fatalf("unexpected backup: %v", wirePoint)
}
wp := ht.OutPointFromChannelPoint(chanPoint)
_, ok := chanPoints[wp]
require.True(ht, ok, "unexpected "+
"backup: %v", wp)
}
}
}
chans := make(map[wire.OutPoint]struct{})
for _, chanPoint := range chanPoints {
chans[rpcPointToWirePoint(t, chanPoint)] = struct{}{}
chans[ht.OutPointFromChannelPoint(chanPoint)] = struct{}{}
}
// We should have exactly two single channel backups contained, and we
@ -1101,11 +1067,11 @@ func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) {
// shouldn't be able to find that channel as a backup still. We should
// also have one less single written to disk.
for i, chanPoint := range chanPoints {
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
ht.CloseChannel(alice, chanPoint)
assertNumSingleBackups(len(chanPoints) - i - 1)
delete(chans, rpcPointToWirePoint(t, chanPoint))
delete(chans, ht.OutPointFromChannelPoint(chanPoint))
assertMultiBackupFound()(true, chans)
}
@ -1388,8 +1354,8 @@ func chanRestoreViaRPC(ht *lntemp.HarnessTest, password []byte,
return func() *node.HarnessNode {
newNode := ht.RestoreNodeWithSeed(
"dave", nil, password, mnemonic, "", 1000, nil,
copyPorts(oldNode),
"dave", nil, password, mnemonic, "", revocationWindow,
nil, copyPorts(oldNode),
)
req := &lnrpc.RestoreChanBackupRequest{Backup: backup}
newNode.RPC.RestoreChanBackups(req)
@ -1409,15 +1375,6 @@ func copyPorts(oldNode *node.HarnessNode) node.Option {
}
}
func rpcPointToWirePoint(t *harnessTest,
chanPoint *lnrpc.ChannelPoint) wire.OutPoint {
op, err := lntest.MakeOutpoint(chanPoint)
require.NoError(t.t, err, "unable to get txid")
return op
}
// assertTimeLockSwept when dave's outputs matures, he should claim them. This
// function will advance 2 blocks such that all the pending closing
// transactions would be swept in the end.

View File

@ -1,15 +1,14 @@
package itest
import (
"context"
"fmt"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/funding"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
@ -17,14 +16,14 @@ import (
// testChannelBalance creates a new channel between Alice and Bob, then checks
// channel balance to be equal amount specified while creation of channel.
func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) {
func testChannelBalance(ht *lntemp.HarnessTest) {
// Open a channel with 0.16 BTC between Alice and Bob, ensuring the
// channel has been opened properly.
amount := funding.MaxBtcFundingAmount
// Creates a helper closure to be used below which asserts the proper
// response to a channel balance RPC.
checkChannelBalance := func(node *lntest.HarnessNode,
checkChannelBalance := func(node *node.HarnessNode,
local, remote btcutil.Amount) {
expectedResponse := &lnrpc.ChannelBalanceResponse{
@ -45,46 +44,28 @@ func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) {
// Deprecated fields.
Balance: int64(local),
}
assertChannelBalanceResp(t, node, expectedResponse)
ht.AssertChannelBalanceResp(node, expectedResponse)
}
// Before beginning, make sure alice and bob are connected.
net.EnsureConnected(t.t, net.Alice, net.Bob)
alice, bob := ht.Alice, ht.Bob
ht.EnsureConnected(alice, bob)
chanPoint := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
Amt: amount,
},
chanPoint := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: amount},
)
// Wait for both Alice and Bob to recognize this new channel.
err := net.Alice.WaitForNetworkChannelOpen(chanPoint)
if err != nil {
t.Fatalf("alice didn't advertise channel before "+
"timeout: %v", err)
}
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
if err != nil {
t.Fatalf("bob didn't advertise channel before "+
"timeout: %v", err)
}
cType, err := channelCommitType(net.Alice, chanPoint)
if err != nil {
t.Fatalf("unable to get channel type: %v", err)
}
cType := ht.GetChannelCommitType(alice, chanPoint)
// As this is a single funder channel, Alice's balance should be
// exactly 0.5 BTC since now state transitions have taken place yet.
checkChannelBalance(net.Alice, amount-calcStaticFee(cType, 0), 0)
checkChannelBalance(alice, amount-calcStaticFee(cType, 0), 0)
// Ensure Bob currently has no available balance within the channel.
checkChannelBalance(net.Bob, 0, amount-calcStaticFee(cType, 0))
checkChannelBalance(bob, 0, amount-calcStaticFee(cType, 0))
// Finally close the channel between Alice and Bob, asserting that the
// channel has been properly closed on-chain.
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
ht.CloseChannel(alice, chanPoint)
}
// testChannelUnsettledBalance will test that the UnsettledBalance field
@ -92,13 +73,12 @@ func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) {
// Alice will send Htlcs to Carol while she is in hodl mode. This will result
// in a build of pending Htlcs. We expect the channels unsettled balance to
// equal the sum of all the Pending Htlcs.
func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) {
func testChannelUnsettledBalance(ht *lntemp.HarnessTest) {
const chanAmt = btcutil.Amount(1000000)
ctxb := context.Background()
// Creates a helper closure to be used below which asserts the proper
// response to a channel balance RPC.
checkChannelBalance := func(node *lntest.HarnessNode,
checkChannelBalance := func(node *node.HarnessNode,
local, remote, unsettledLocal, unsettledRemote btcutil.Amount) {
expectedResponse := &lnrpc.ChannelBalanceResponse{
@ -131,44 +111,25 @@ func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) {
// Deprecated fields.
Balance: int64(local),
}
assertChannelBalanceResp(t, node, expectedResponse)
ht.AssertChannelBalanceResp(node, expectedResponse)
}
// Create carol in hodl mode.
carol := net.NewNode(t.t, "Carol", []string{"--hodl.exit-settle"})
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("Carol", []string{"--hodl.exit-settle"})
// Connect Alice to Carol.
net.ConnectNodes(t.t, net.Alice, carol)
alice := ht.Alice
ht.ConnectNodes(alice, carol)
// Open a channel between Alice and Carol.
chanPointAlice := openChannelAndAssert(
t, net, net.Alice, carol,
lntest.OpenChannelParams{
Amt: chanAmt,
},
chanPointAlice := ht.OpenChannel(
alice, carol, lntemp.OpenChannelParams{Amt: chanAmt},
)
// Wait for Alice and Carol to receive the channel edge from the
// funding manager.
err := net.Alice.WaitForNetworkChannelOpen(chanPointAlice)
if err != nil {
t.Fatalf("alice didn't see the alice->carol channel before "+
"timeout: %v", err)
}
err = carol.WaitForNetworkChannelOpen(chanPointAlice)
if err != nil {
t.Fatalf("alice didn't see the alice->carol channel before "+
"timeout: %v", err)
}
cType, err := channelCommitType(net.Alice, chanPointAlice)
require.NoError(t.t, err, "unable to get channel type")
cType := ht.GetChannelCommitType(alice, chanPointAlice)
// Check alice's channel balance, which should have zero remote and zero
// pending balance.
checkChannelBalance(net.Alice, chanAmt-calcStaticFee(cType, 0), 0, 0, 0)
checkChannelBalance(alice, chanAmt-calcStaticFee(cType, 0), 0, 0, 0)
// Check carol's channel balance, which should have zero local and zero
// pending balance.
@ -181,79 +142,61 @@ func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) {
)
// Simulateneously send numInvoices payments from Alice to Carol.
carolPubKey := carol.PubKey[:]
errChan := make(chan error)
for i := 0; i < numInvoices; i++ {
go func() {
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
_, err := net.Alice.RouterClient.SendPaymentV2(ctxt,
&routerrpc.SendPaymentRequest{
Dest: carolPubKey,
Amt: int64(payAmt),
PaymentHash: makeFakePayHash(t),
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
})
if err != nil {
errChan <- err
req := &routerrpc.SendPaymentRequest{
Dest: carol.PubKey[:],
Amt: int64(payAmt),
PaymentHash: ht.Random32Bytes(),
FinalCltvDelta: finalCltvDelta,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
alice.RPC.SendPayment(req)
}()
}
// There should be a number of PendingHtlcs equal
// to the amount of Invoices sent.
ht.AssertNumActiveHtlcs(alice, numInvoices)
ht.AssertNumActiveHtlcs(carol, numInvoices)
// Set the amount expected for the Unsettled Balance for this channel.
expectedBalance := numInvoices * payAmt
// Test that the UnsettledBalance for both Alice and Carol
// is equal to the amount of invoices * payAmt.
var unsettledErr error
nodes := []*lntest.HarnessNode{net.Alice, carol}
err = wait.Predicate(func() bool {
// There should be a number of PendingHtlcs equal
// to the amount of Invoices sent.
unsettledErr = assertNumActiveHtlcs(nodes, numInvoices)
if unsettledErr != nil {
return false
checkUnsettledBalance := func() error {
// Get channel info for the Alice.
chanInfo := ht.QueryChannelByChanPoint(alice, chanPointAlice)
// Check that UnsettledBalance is what we expect.
if int(chanInfo.UnsettledBalance) != expectedBalance {
return fmt.Errorf("unsettled balance failed "+
"expected: %v, received: %v", expectedBalance,
chanInfo.UnsettledBalance)
}
// Set the amount expected for the Unsettled Balance for
// this channel.
expectedBalance := numInvoices * payAmt
// Get channel info for the Carol.
chanInfo = ht.QueryChannelByChanPoint(carol, chanPointAlice)
// Check each nodes UnsettledBalance field.
for _, node := range nodes {
// Get channel info for the node.
chanInfo, err := getChanInfo(node)
if err != nil {
unsettledErr = err
return false
}
// Check that UnsettledBalance is what we expect.
if int(chanInfo.UnsettledBalance) != expectedBalance {
unsettledErr = fmt.Errorf("unsettled balance failed "+
"expected: %v, received: %v", expectedBalance,
chanInfo.UnsettledBalance)
return false
}
// Check that UnsettledBalance is what we expect.
if int(chanInfo.UnsettledBalance) != expectedBalance {
return fmt.Errorf("unsettled balance failed "+
"expected: %v, received: %v", expectedBalance,
chanInfo.UnsettledBalance)
}
return true
}, defaultTimeout)
if err != nil {
t.Fatalf("unsettled balace error: %v", unsettledErr)
}
// Check for payment errors.
select {
case err := <-errChan:
t.Fatalf("payment error: %v", err)
default:
return nil
}
require.NoError(ht, wait.NoError(checkUnsettledBalance, defaultTimeout),
"timeout while checking unsettled balance")
// Check alice's channel balance, which should have a remote unsettled
// balance that equals to the amount of invoices * payAmt. The remote
// balance remains zero.
aliceLocal := chanAmt - calcStaticFee(cType, 0) - numInvoices*payAmt
checkChannelBalance(net.Alice, aliceLocal, 0, 0, numInvoices*payAmt)
checkChannelBalance(alice, aliceLocal, 0, 0, numInvoices*payAmt)
// Check carol's channel balance, which should have a local unsettled
// balance that equals to the amount of invoices * payAmt. The local
@ -261,8 +204,5 @@ func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) {
checkChannelBalance(carol, 0, aliceLocal, numInvoices*payAmt, 0)
// Force and assert the channel closure.
closeChannelAndAssert(t, net, net.Alice, chanPointAlice, true)
// Cleanup by mining the force close and sweep transaction.
cleanupForceClose(t, net, net.Alice, chanPointAlice)
ht.ForceCloseChannel(alice, chanPointAlice)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,9 @@
package itest
import (
"bytes"
"context"
"fmt"
"io"
"strings"
"testing"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/chainreg"
@ -17,7 +13,6 @@ import (
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
@ -222,134 +217,65 @@ func testUpdateChanStatus(ht *lntemp.HarnessTest) {
// testUnannouncedChannels checks unannounced channels are not returned by
// describeGraph RPC request unless explicitly asked for.
func testUnannouncedChannels(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testUnannouncedChannels(ht *lntemp.HarnessTest) {
amount := funding.MaxBtcFundingAmount
alice, bob := ht.Alice, ht.Bob
// Open a channel between Alice and Bob, ensuring the
// channel has been opened properly.
chanOpenUpdate := openChannelStream(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
Amt: amount,
},
chanOpenUpdate := ht.OpenChannelAssertStream(
alice, bob, lntemp.OpenChannelParams{Amt: amount},
)
// Mine 2 blocks, and check that the channel is opened but not yet
// announced to the network.
mineBlocks(t, net, 2, 1)
ht.MineBlocksAndAssertNumTxes(2, 1)
// One block is enough to make the channel ready for use, since the
// nodes have defaultNumConfs=1 set.
fundingChanPoint, err := net.WaitForChannelOpen(chanOpenUpdate)
if err != nil {
t.Fatalf("error while waiting for channel open: %v", err)
}
fundingChanPoint := ht.WaitForChannelOpenEvent(chanOpenUpdate)
// Alice should have 1 edge in her graph.
req := &lnrpc.ChannelGraphRequest{
IncludeUnannounced: true,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
chanGraph, err := net.Alice.DescribeGraph(ctxt, req)
if err != nil {
t.Fatalf("unable to query alice's graph: %v", err)
}
numEdges := len(chanGraph.Edges)
if numEdges != 1 {
t.Fatalf("expected to find 1 edge in the graph, found %d", numEdges)
}
ht.AssertNumEdges(alice, 1, true)
// Channels should not be announced yet, hence Alice should have no
// announced edges in her graph.
req.IncludeUnannounced = false
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanGraph, err = net.Alice.DescribeGraph(ctxt, req)
if err != nil {
t.Fatalf("unable to query alice's graph: %v", err)
}
numEdges = len(chanGraph.Edges)
if numEdges != 0 {
t.Fatalf("expected to find 0 announced edges in the graph, found %d",
numEdges)
}
ht.AssertNumEdges(alice, 0, false)
// Mine 4 more blocks, and check that the channel is now announced.
mineBlocks(t, net, 4, 0)
ht.MineBlocks(4)
// Give the network a chance to learn that auth proof is confirmed.
var predErr error
err = wait.Predicate(func() bool {
// The channel should now be announced. Check that Alice has 1
// announced edge.
req.IncludeUnannounced = false
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanGraph, err = net.Alice.DescribeGraph(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query alice's graph: %v", err)
return false
}
numEdges = len(chanGraph.Edges)
if numEdges != 1 {
predErr = fmt.Errorf("expected to find 1 announced edge in "+
"the graph, found %d", numEdges)
return false
}
return true
}, defaultTimeout)
if err != nil {
t.Fatalf("%v", predErr)
}
// The channel should now be announced. Check that Alice has 1 announced
// edge.
req.IncludeUnannounced = false
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanGraph, err = net.Alice.DescribeGraph(ctxt, req)
if err != nil {
t.Fatalf("unable to query alice's graph: %v", err)
}
numEdges = len(chanGraph.Edges)
if numEdges != 1 {
t.Fatalf("expected to find 1 announced edge in the graph, found %d",
numEdges)
}
ht.AssertNumEdges(alice, 1, false)
// Close the channel used during the test.
closeChannelAndAssert(t, net, net.Alice, fundingChanPoint, false)
ht.CloseChannel(alice, fundingChanPoint)
}
func testGraphTopologyNotifications(net *lntest.NetworkHarness, t *harnessTest) {
t.t.Run("pinned", func(t *testing.T) {
ht := newHarnessTest(t, net)
testGraphTopologyNtfns(net, ht, true)
func testGraphTopologyNotifications(ht *lntemp.HarnessTest) {
ht.Run("pinned", func(t *testing.T) {
subT := ht.Subtest(t)
testGraphTopologyNtfns(subT, true)
})
t.t.Run("unpinned", func(t *testing.T) {
ht := newHarnessTest(t, net)
testGraphTopologyNtfns(net, ht, false)
ht.Run("unpinned", func(t *testing.T) {
subT := ht.Subtest(t)
testGraphTopologyNtfns(subT, false)
})
}
func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned bool) {
ctxb := context.Background()
func testGraphTopologyNtfns(ht *lntemp.HarnessTest, pinned bool) {
const chanAmt = funding.MaxBtcFundingAmount
// Spin up Bob first, since we will need to grab his pubkey when
// starting Alice to test pinned syncing.
bob := net.NewNode(t.t, "bob", nil)
defer shutdownAndAssert(net, t, bob)
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
bobInfo, err := bob.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
require.NoError(t.t, err)
bob := ht.Bob
bobInfo := bob.RPC.GetInfo()
bobPubkey := bobInfo.IdentityPubkey
// Restart Bob as he may have leftover announcements from previous
// tests, causing the graph to be unsynced.
ht.RestartNodeWithExtraArgs(bob, nil)
// For unpinned syncing, start Alice as usual. Otherwise grab Bob's
// pubkey to include in his pinned syncer set.
var aliceArgs []string
@ -360,169 +286,64 @@ func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned b
}
}
alice := net.NewNode(t.t, "alice", aliceArgs)
defer shutdownAndAssert(net, t, alice)
alice := ht.Alice
ht.RestartNodeWithExtraArgs(alice, aliceArgs)
// Connect Alice and Bob.
net.EnsureConnected(t.t, alice, bob)
// Alice stimmy.
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice)
// Bob stimmy.
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, bob)
ht.EnsureConnected(alice, bob)
// Assert that Bob has the correct sync type before proceeding.
if pinned {
assertSyncType(t, alice, bobPubkey, lnrpc.Peer_PINNED_SYNC)
assertSyncType(ht, alice, bobPubkey, lnrpc.Peer_PINNED_SYNC)
} else {
assertSyncType(t, alice, bobPubkey, lnrpc.Peer_ACTIVE_SYNC)
assertSyncType(ht, alice, bobPubkey, lnrpc.Peer_ACTIVE_SYNC)
}
// Regardless of syncer type, ensure that both peers report having
// completed their initial sync before continuing to make a channel.
waitForGraphSync(t, alice)
// Let Alice subscribe to graph notifications.
graphSub := subscribeGraphNotifications(ctxb, t, alice)
defer close(graphSub.quit)
ht.WaitForGraphSync(alice)
// Open a new channel between Alice and Bob.
chanPoint := openChannelAndAssert(
t, net, alice, bob,
lntest.OpenChannelParams{
Amt: chanAmt,
},
chanPoint := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt},
)
// The channel opening above should have triggered a few notifications
// sent to the notification client. We'll expect two channel updates,
// and two node announcements.
var numChannelUpds int
var numNodeAnns int
for numChannelUpds < 2 && numNodeAnns < 2 {
select {
// Ensure that a new update for both created edges is properly
// dispatched to our registered client.
case graphUpdate := <-graphSub.updateChan:
// Process all channel updates presented in this update
// message.
for _, chanUpdate := range graphUpdate.ChannelUpdates {
switch chanUpdate.AdvertisingNode {
case alice.PubKeyStr:
case bob.PubKeyStr:
default:
t.Fatalf("unknown advertising node: %v",
chanUpdate.AdvertisingNode)
}
switch chanUpdate.ConnectingNode {
case alice.PubKeyStr:
case bob.PubKeyStr:
default:
t.Fatalf("unknown connecting node: %v",
chanUpdate.ConnectingNode)
}
ht.AssertNumChannelUpdates(alice, chanPoint, 2)
ht.AssertNumNodeAnns(alice, alice.PubKeyStr, 1)
ht.AssertNumNodeAnns(alice, bob.PubKeyStr, 1)
if chanUpdate.Capacity != int64(chanAmt) {
t.Fatalf("channel capacities mismatch:"+
" expected %v, got %v", chanAmt,
btcutil.Amount(chanUpdate.Capacity))
}
numChannelUpds++
}
_, blockHeight := ht.Miner.GetBestBlock()
for _, nodeUpdate := range graphUpdate.NodeUpdates {
switch nodeUpdate.IdentityKey {
case alice.PubKeyStr:
case bob.PubKeyStr:
default:
t.Fatalf("unknown node: %v",
nodeUpdate.IdentityKey)
}
numNodeAnns++
}
case err := <-graphSub.errChan:
t.Fatalf("unable to recv graph update: %v", err)
case <-time.After(time.Second * 10):
t.Fatalf("timeout waiting for graph notifications, "+
"only received %d/2 chanupds and %d/2 nodeanns",
numChannelUpds, numNodeAnns)
}
}
_, blockHeight, err := net.Miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current blockheight %v", err)
}
// Now we'll test that updates are properly sent after channels are closed
// within the network.
closeChannelAndAssert(t, net, alice, chanPoint, false)
// Now we'll test that updates are properly sent after channels are
// closed within the network.
ht.CloseChannel(alice, chanPoint)
// Now that the channel has been closed, we should receive a
// notification indicating so.
out:
for {
select {
case graphUpdate := <-graphSub.updateChan:
if len(graphUpdate.ClosedChans) != 1 {
continue
}
closedChan := ht.AssertTopologyChannelClosed(alice, chanPoint)
closedChan := graphUpdate.ClosedChans[0]
if closedChan.ClosedHeight != uint32(blockHeight+1) {
t.Fatalf("close heights of channel mismatch: "+
"expected %v, got %v", blockHeight+1,
closedChan.ClosedHeight)
}
chanPointTxid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
closedChanTxid, err := lnrpc.GetChanPointFundingTxid(
closedChan.ChanPoint,
)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
if !bytes.Equal(closedChanTxid[:], chanPointTxid[:]) {
t.Fatalf("channel point hash mismatch: "+
"expected %v, got %v", chanPointTxid,
closedChanTxid)
}
if closedChan.ChanPoint.OutputIndex != chanPoint.OutputIndex {
t.Fatalf("output index mismatch: expected %v, "+
"got %v", chanPoint.OutputIndex,
closedChan.ChanPoint)
}
require.Equal(ht, uint32(blockHeight+1), closedChan.ClosedHeight,
"close heights of channel mismatch")
break out
case err := <-graphSub.errChan:
t.Fatalf("unable to recv graph update: %v", err)
case <-time.After(time.Second * 10):
t.Fatalf("notification for channel closure not " +
"sent")
}
}
fundingTxid := ht.OutPointFromChannelPoint(chanPoint)
closeTxid := ht.OutPointFromChannelPoint(closedChan.ChanPoint)
require.EqualValues(ht, fundingTxid, closeTxid,
"channel point hash mismatch")
// For the final portion of the test, we'll ensure that once a new node
// appears in the network, the proper notification is dispatched. Note
// that a node that does not have any channels open is ignored, so first
// we disconnect Alice and Bob, open a channel between Bob and Carol,
// and finally connect Alice to Bob again.
if err := net.DisconnectNodes(alice, bob); err != nil {
t.Fatalf("unable to disconnect alice and bob: %v", err)
}
carol := net.NewNode(t.t, "Carol", nil)
defer shutdownAndAssert(net, t, carol)
ht.DisconnectNodes(alice, bob)
net.ConnectNodes(t.t, bob, carol)
chanPoint = openChannelAndAssert(
t, net, bob, carol,
lntest.OpenChannelParams{
Amt: chanAmt,
},
carol := ht.NewNode("Carol", nil)
ht.ConnectNodes(bob, carol)
chanPoint = ht.OpenChannel(
bob, carol, lntemp.OpenChannelParams{Amt: chanAmt},
)
// Reconnect Alice and Bob. This should result in the nodes syncing up
@ -531,76 +352,29 @@ out:
// and Carol. Note that we will also receive a node announcement from
// Bob, since a node will update its node announcement after a new
// channel is opened.
net.EnsureConnected(t.t, alice, bob)
ht.EnsureConnected(alice, bob)
// We should receive an update advertising the newly connected node,
// Bob's new node announcement, and the channel between Bob and Carol.
numNodeAnns = 0
numChannelUpds = 0
for numChannelUpds < 2 && numNodeAnns < 1 {
select {
case graphUpdate := <-graphSub.updateChan:
for _, nodeUpdate := range graphUpdate.NodeUpdates {
switch nodeUpdate.IdentityKey {
case carol.PubKeyStr:
case bob.PubKeyStr:
default:
t.Fatalf("unknown node update pubey: %v",
nodeUpdate.IdentityKey)
}
numNodeAnns++
}
for _, chanUpdate := range graphUpdate.ChannelUpdates {
switch chanUpdate.AdvertisingNode {
case carol.PubKeyStr:
case bob.PubKeyStr:
default:
t.Fatalf("unknown advertising node: %v",
chanUpdate.AdvertisingNode)
}
switch chanUpdate.ConnectingNode {
case carol.PubKeyStr:
case bob.PubKeyStr:
default:
t.Fatalf("unknown connecting node: %v",
chanUpdate.ConnectingNode)
}
if chanUpdate.Capacity != int64(chanAmt) {
t.Fatalf("channel capacities mismatch:"+
" expected %v, got %v", chanAmt,
btcutil.Amount(chanUpdate.Capacity))
}
numChannelUpds++
}
case err := <-graphSub.errChan:
t.Fatalf("unable to recv graph update: %v", err)
case <-time.After(time.Second * 10):
t.Fatalf("timeout waiting for graph notifications, "+
"only received %d/2 chanupds and %d/2 nodeanns",
numChannelUpds, numNodeAnns)
}
}
ht.AssertNumChannelUpdates(alice, chanPoint, 2)
ht.AssertNumNodeAnns(alice, bob.PubKeyStr, 1)
// Close the channel between Bob and Carol.
closeChannelAndAssert(t, net, bob, chanPoint, false)
ht.CloseChannel(bob, chanPoint)
}
// testNodeAnnouncement ensures that when a node is started with one or more
// external IP addresses specified on the command line, that those addresses
// announced to the network and reported in the network graph.
func testNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
aliceSub := subscribeGraphNotifications(ctxb, t, net.Alice)
defer close(aliceSub.quit)
func testNodeAnnouncement(ht *lntemp.HarnessTest) {
alice, bob := ht.Alice, ht.Bob
advertisedAddrs := []string{
"192.168.1.1:8333",
"[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8337",
"bkb6azqggsaiskzi.onion:9735",
"fomvuglh6h6vcag73xo5t5gv56ombih3zr2xvplkpbfd7wrog4swjwid.onion:1234",
"fomvuglh6h6vcag73xo5t5gv56ombih3zr2xvplkpbfd7wrog4swj" +
"wid.onion:1234",
}
var lndArgs []string
@ -608,29 +382,17 @@ func testNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
lndArgs = append(lndArgs, "--externalip="+addr)
}
dave := net.NewNode(t.t, "Dave", lndArgs)
defer shutdownAndAssert(net, t, dave)
dave := ht.NewNode("Dave", lndArgs)
// We must let Dave have an open channel before he can send a node
// announcement, so we open a channel with Bob,
net.ConnectNodes(t.t, net.Bob, dave)
// Alice shouldn't receive any new updates yet since the channel has yet
// to be opened.
select {
case <-aliceSub.updateChan:
t.Fatalf("received unexpected update from dave")
case <-time.After(time.Second):
}
ht.ConnectNodes(bob, dave)
// We'll then go ahead and open a channel between Bob and Dave. This
// ensures that Alice receives the node announcement from Bob as part of
// the announcement broadcast.
chanPoint := openChannelAndAssert(
t, net, net.Bob, dave,
lntest.OpenChannelParams{
Amt: 1000000,
},
chanPoint := ht.OpenChannel(
bob, dave, lntemp.OpenChannelParams{Amt: 1000000},
)
assertAddrs := func(addrsFound []string, targetAddrs ...string) {
@ -640,151 +402,27 @@ func testNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
}
for _, addr := range targetAddrs {
if _, ok := addrs[addr]; !ok {
t.Fatalf("address %v not found in node "+
"announcement", addr)
}
_, ok := addrs[addr]
require.True(ht, ok, "address %v not found in node "+
"announcement", addr)
}
}
waitForAddrsInUpdate := func(graphSub graphSubscription,
nodePubKey string, targetAddrs ...string) {
for {
select {
case graphUpdate := <-graphSub.updateChan:
for _, update := range graphUpdate.NodeUpdates {
if update.IdentityKey == nodePubKey {
assertAddrs(
update.Addresses, // nolint:staticcheck
targetAddrs...,
)
return
}
}
case err := <-graphSub.errChan:
t.Fatalf("unable to recv graph update: %v", err)
case <-time.After(defaultTimeout):
t.Fatalf("did not receive node ann update")
}
}
}
// We'll then wait for Alice to receive Dave's node announcement
// including the expected advertised addresses from Bob since they
// should already be connected.
waitForAddrsInUpdate(
aliceSub, dave.PubKeyStr, advertisedAddrs...,
)
allUpdates := ht.AssertNumNodeAnns(alice, dave.PubKeyStr, 1)
nodeUpdate := allUpdates[len(allUpdates)-1]
assertAddrs(nodeUpdate.Addresses, advertisedAddrs...)
// Close the channel between Bob and Dave.
closeChannelAndAssert(t, net, net.Bob, chanPoint, false)
}
// graphSubscription houses the proxied update and error chans for a node's
// graph subscriptions.
type graphSubscription struct {
updateChan chan *lnrpc.GraphTopologyUpdate
errChan chan error
quit chan struct{}
}
// subscribeGraphNotifications subscribes to channel graph updates and launches
// a goroutine that forwards these to the returned channel.
func subscribeGraphNotifications(ctxb context.Context, t *harnessTest,
node *lntest.HarnessNode) graphSubscription {
// We'll first start by establishing a notification client which will
// send us notifications upon detected changes in the channel graph.
req := &lnrpc.GraphTopologySubscription{}
ctx, cancelFunc := context.WithCancel(ctxb)
topologyClient, err := node.SubscribeChannelGraph(ctx, req)
require.NoError(t.t, err, "unable to create topology client")
// We'll launch a goroutine that will be responsible for proxying all
// notifications recv'd from the client into the channel below.
errChan := make(chan error, 1)
quit := make(chan struct{})
graphUpdates := make(chan *lnrpc.GraphTopologyUpdate, 20)
go func() {
for {
defer cancelFunc()
select {
case <-quit:
return
default:
graphUpdate, err := topologyClient.Recv()
select {
case <-quit:
return
default:
}
if err == io.EOF {
return
} else if err != nil {
select {
case errChan <- err:
case <-quit:
}
return
}
select {
case graphUpdates <- graphUpdate:
case <-quit:
return
}
}
}
}()
return graphSubscription{
updateChan: graphUpdates,
errChan: errChan,
quit: quit,
}
}
// waitForNodeAnnUpdates monitors the nodeAnnUpdates until we get one for
// the expected node and asserts that has the expected information.
func waitForNodeAnnUpdates(graphSub graphSubscription, nodePubKey string,
expectedUpdate *lnrpc.NodeUpdate, t *harnessTest) {
for {
select {
case graphUpdate := <-graphSub.updateChan:
for _, update := range graphUpdate.NodeUpdates {
if update.IdentityKey == nodePubKey {
assertNodeAnnouncement(
t, update, expectedUpdate,
)
return
}
}
case err := <-graphSub.errChan:
t.Fatalf("unable to recv graph update: %v", err)
case <-time.After(defaultTimeout):
t.Fatalf("did not receive node ann update")
}
}
ht.CloseChannel(bob, chanPoint)
}
// testUpdateNodeAnnouncement ensures that the RPC endpoint validates
// the requests correctly and that the new node announcement is brodcasted
// with the right information after updating our node.
func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
// context timeout for the whole test.
ctxt, cancel := context.WithTimeout(
context.Background(), defaultTimeout,
)
defer cancel()
// Launch notification clients for alice, such that we can
// get notified when there are updates in the graph.
aliceSub := subscribeGraphNotifications(ctxt, t, net.Alice)
defer close(aliceSub.quit)
func testUpdateNodeAnnouncement(ht *lntemp.HarnessTest) {
alice, bob := ht.Alice, ht.Bob
var lndArgs []string
@ -793,20 +431,42 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
"192.168.1.1:8333",
"[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8337",
"bkb6azqggsaiskzi.onion:9735",
"fomvuglh6h6vcag73xo5t5gv56ombih3zr2xvplkpbfd7wrog4swjwid.onion:1234",
"fomvuglh6h6vcag73xo5t5gv56ombih3zr2xvplkpbfd7wrog4swj" +
"wid.onion:1234",
}
for _, addr := range extraAddrs {
lndArgs = append(lndArgs, "--externalip="+addr)
}
dave := net.NewNode(t.t, "Dave", lndArgs)
defer shutdownAndAssert(net, t, dave)
dave := ht.NewNode("Dave", lndArgs)
// Get dave default information so we can compare
// it lately with the brodcasted updates.
nodeInfoReq := &lnrpc.GetInfoRequest{}
resp, err := dave.GetInfo(ctxt, nodeInfoReq)
require.NoError(t.t, err, "unable to get dave's information")
// assertNodeAnn is a helper closure that checks a given node update
// from Dave is seen by Alice.
assertNodeAnn := func(expected *lnrpc.NodeUpdate) {
err := wait.NoError(func() error {
// Get a list of node updates seen by Alice.
updates := alice.Watcher.GetNodeUpdates(dave.PubKeyStr)
// Check at least one of the updates matches the given
// node update.
for _, update := range updates {
err := compareNodeAnns(update, expected)
// Found a match, return nil.
if err == nil {
return nil
}
}
// We've check all the updates and no match found.
return fmt.Errorf("alice didn't see the update: %v",
expected)
}, defaultTimeout)
require.NoError(ht, err, "assertNodeAnn failed")
}
// Get dave default information so we can compare it lately with the
// brodcasted updates.
resp := dave.RPC.GetInfo()
defaultAddrs := make([]*lnrpc.NodeAddress, 0, len(resp.Uris))
for _, uri := range resp.GetUris() {
values := strings.Split(uri, "@")
@ -823,9 +483,8 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
// update this one for another one unset by default at random.
featureBit := lnrpc.FeatureBit_WUMBO_CHANNELS_REQ
featureIdx := uint32(featureBit)
if _, ok := resp.Features[featureIdx]; ok {
t.Fatalf("unexpected feature bit enabled by default")
}
_, ok := resp.Features[featureIdx]
require.False(ht, ok, "unexpected feature bit enabled by default")
defaultDaveNodeAnn := &lnrpc.NodeUpdate{
Alias: resp.Alias,
@ -835,44 +494,33 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
// Dave must have an open channel before he can send a node
// announcement, so we open a channel with Bob.
net.ConnectNodes(t.t, net.Bob, dave)
ht.ConnectNodes(bob, dave)
// Go ahead and open a channel between Bob and Dave. This
// ensures that Alice receives the node announcement from Bob as part of
// the announcement broadcast.
chanPoint := openChannelAndAssert(
t, net, net.Bob, dave,
lntest.OpenChannelParams{
chanPoint := ht.OpenChannel(
bob, dave, lntemp.OpenChannelParams{
Amt: 1000000,
},
)
require.NoError(t.t, err, "unexpected error opening a channel")
// Wait for Alice to receive dave's node announcement with the default
// values.
waitForNodeAnnUpdates(
aliceSub, dave.PubKeyStr, defaultDaveNodeAnn, t,
)
assertNodeAnn(defaultDaveNodeAnn)
// We cannot differentiate between requests with Alias = "" and requests
// that do not provide that field. If a user sets Alias = "" in the request
// the field will simply be ignored. The request must fail because no
// modifiers are applied.
invalidNodeAnnReq := &peersrpc.NodeAnnouncementUpdateRequest{
Alias: "",
}
_, err = dave.UpdateNodeAnnouncement(ctxt, invalidNodeAnnReq)
require.Error(t.t, err, "requests without modifiers should field")
// We cannot differentiate between requests with Alias = "" and
// requests that do not provide that field. If a user sets Alias = ""
// in the request the field will simply be ignored. The request must
// fail because no modifiers are applied.
invalidNodeAnnReq := &peersrpc.NodeAnnouncementUpdateRequest{Alias: ""}
dave.RPC.UpdateNodeAnnouncementErr(invalidNodeAnnReq)
// Alias too long.
invalidNodeAnnReq = &peersrpc.NodeAnnouncementUpdateRequest{
Alias: strings.Repeat("a", 50),
}
_, err = dave.UpdateNodeAnnouncement(ctxt, invalidNodeAnnReq)
require.Error(t.t, err, "failed to validate an invalid alias for an "+
"update node announcement request")
dave.RPC.UpdateNodeAnnouncementErr(invalidNodeAnnReq)
// Update Node.
newAlias := "new-alias"
@ -912,8 +560,7 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
FeatureUpdates: updateFeatureActions,
}
response, err := dave.UpdateNodeAnnouncement(ctxt, nodeAnnReq)
require.NoError(t.t, err, "unable to update dave's node announcement")
response := dave.RPC.UpdateNodeAnnouncement(nodeAnnReq)
expectedOps := map[string]int{
"features": 1,
@ -921,7 +568,7 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
"alias": 1,
"addresses": 3,
}
assertUpdateNodeAnnouncementResponse(t, response, expectedOps)
assertUpdateNodeAnnouncementResponse(ht, response, expectedOps)
newNodeAddresses := []*lnrpc.NodeAddress{}
// We removed the first address.
@ -942,28 +589,18 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
// We'll then wait for Alice to receive dave's node announcement
// with the new values.
waitForNodeAnnUpdates(
aliceSub, dave.PubKeyStr, newDaveNodeAnn, t,
)
assertNodeAnn(newDaveNodeAnn)
// Check that the feature bit was set correctly.
resp, err = dave.GetInfo(ctxt, nodeInfoReq)
require.NoError(t.t, err, "unable to get dave's information")
if _, ok := resp.Features[featureIdx]; !ok {
t.Fatalf("failed to set feature bit")
}
resp = dave.RPC.GetInfo()
_, ok = resp.Features[featureIdx]
require.True(ht, ok, "failed to set feature bit")
// Check that we cannot set a feature bit that is already set.
nodeAnnReq = &peersrpc.NodeAnnouncementUpdateRequest{
FeatureUpdates: updateFeatureActions,
}
_, err = dave.UpdateNodeAnnouncement(ctxt, nodeAnnReq)
require.Error(
t.t, err, "missing expected error: cannot set a feature bit "+
"that is already set",
)
dave.RPC.UpdateNodeAnnouncementErr(nodeAnnReq)
// Check that we can unset feature bits.
updateFeatureActions = []*peersrpc.UpdateFeatureAction{
@ -976,33 +613,104 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
nodeAnnReq = &peersrpc.NodeAnnouncementUpdateRequest{
FeatureUpdates: updateFeatureActions,
}
response, err = dave.UpdateNodeAnnouncement(ctxt, nodeAnnReq)
require.NoError(t.t, err, "unable to update dave's node announcement")
response = dave.RPC.UpdateNodeAnnouncement(nodeAnnReq)
expectedOps = map[string]int{
"features": 1,
}
assertUpdateNodeAnnouncementResponse(t, response, expectedOps)
assertUpdateNodeAnnouncementResponse(ht, response, expectedOps)
resp, err = dave.GetInfo(ctxt, nodeInfoReq)
require.NoError(t.t, err, "unable to get dave's information")
if _, ok := resp.Features[featureIdx]; ok {
t.Fatalf("failed to unset feature bit")
}
resp = dave.RPC.GetInfo()
_, ok = resp.Features[featureIdx]
require.False(ht, ok, "failed to unset feature bit")
// Check that we cannot unset a feature bit that is already unset.
nodeAnnReq = &peersrpc.NodeAnnouncementUpdateRequest{
FeatureUpdates: updateFeatureActions,
}
_, err = dave.UpdateNodeAnnouncement(ctxt, nodeAnnReq)
require.Error(
t.t, err, "missing expected error: cannot unset a feature bit "+
"that is already unset",
)
dave.RPC.UpdateNodeAnnouncementErr(nodeAnnReq)
// Close the channel between Bob and Dave.
closeChannelAndAssert(t, net, net.Bob, chanPoint, false)
ht.CloseChannel(bob, chanPoint)
}
// assertSyncType asserts that the peer has an expected syncType.
//
// NOTE: only made for tests in this file.
func assertSyncType(ht *lntemp.HarnessTest, hn *node.HarnessNode,
peer string, syncType lnrpc.Peer_SyncType) {
resp := hn.RPC.ListPeers()
for _, rpcPeer := range resp.Peers {
if rpcPeer.PubKey != peer {
continue
}
require.Equal(ht, syncType, rpcPeer.SyncType)
return
}
ht.Fatalf("unable to find peer: %s", peer)
}
// compareNodeAnns compares that two node announcements match or returns an
// error.
//
// NOTE: only used for tests in this file.
func compareNodeAnns(n1, n2 *lnrpc.NodeUpdate) error {
// Alias should match.
if n1.Alias != n2.Alias {
return fmt.Errorf("alias not match")
}
// Color should match.
if n1.Color != n2.Color {
return fmt.Errorf("color not match")
}
// NodeAddresses should match.
if len(n1.NodeAddresses) != len(n2.NodeAddresses) {
return fmt.Errorf("node addresses don't match")
}
addrs := make(map[string]struct{}, len(n1.NodeAddresses))
for _, nodeAddr := range n1.NodeAddresses {
addrs[nodeAddr.Addr] = struct{}{}
}
for _, nodeAddr := range n2.NodeAddresses {
if _, ok := addrs[nodeAddr.Addr]; !ok {
return fmt.Errorf("address %v not found in node "+
"announcement", nodeAddr.Addr)
}
}
return nil
}
// assertUpdateNodeAnnouncementResponse is a helper function to assert
// the response expected values.
//
// NOTE: only used for tests in this file.
func assertUpdateNodeAnnouncementResponse(ht *lntemp.HarnessTest,
response *peersrpc.NodeAnnouncementUpdateResponse,
expectedOps map[string]int) {
require.Equal(
ht, len(response.Ops), len(expectedOps),
"unexpected number of Ops updating dave's node announcement",
)
ops := make(map[string]int, len(response.Ops))
for _, op := range response.Ops {
ops[op.Entity] = len(op.Actions)
}
for k, v := range expectedOps {
if v != ops[k] {
ht.Fatalf("unexpected number of actions for operation "+
"%s: got %d wanted %d", k, ops[k], v)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,6 @@
package itest
import (
"context"
"fmt"
"testing"
"time"
@ -16,7 +15,6 @@ import (
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
@ -218,31 +216,27 @@ func basicChannelFundingTest(ht *lntemp.HarnessTest,
// testUnconfirmedChannelFunding tests that our unconfirmed change outputs can
// be used to fund channels.
func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
func testUnconfirmedChannelFunding(ht *lntemp.HarnessTest) {
const (
chanAmt = funding.MaxBtcFundingAmount
pushAmt = btcutil.Amount(100000)
)
// We'll start off by creating a node for Carol.
carol := net.NewNode(t.t, "Carol", nil)
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("Carol", nil)
// We'll send her some confirmed funds.
net.SendCoinsUnconfirmed(t.t, chanAmt*2, carol)
alice := ht.Alice
// Make sure the unconfirmed tx is seen in the mempool.
_, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
require.NoError(t.t, err, "failed to find tx in miner mempool")
// We'll send her some unconfirmed funds.
ht.FundCoinsUnconfirmed(2*chanAmt, carol)
// Now, we'll connect her to Alice so that they can open a channel
// together. The funding flow should select Carol's unconfirmed output
// as she doesn't have any other funds since it's a new node.
net.ConnectNodes(t.t, carol, net.Alice)
ht.ConnectNodes(carol, alice)
chanOpenUpdate := openChannelStream(
t, net, carol, net.Alice,
lntest.OpenChannelParams{
chanOpenUpdate := ht.OpenChannelAssertStream(
carol, alice, lntemp.OpenChannelParams{
Amt: chanAmt,
PushAmt: pushAmt,
SpendUnconfirmed: true,
@ -251,7 +245,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
// Creates a helper closure to be used below which asserts the proper
// response to a channel balance RPC.
checkChannelBalance := func(node *lntest.HarnessNode,
checkChannelBalance := func(node *node.HarnessNode,
local, remote, pendingLocal, pendingRemote btcutil.Amount) {
expectedResponse := &lnrpc.ChannelBalanceResponse{
@ -285,7 +279,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
Balance: int64(local),
PendingOpenBalance: int64(pendingLocal),
}
assertChannelBalanceResp(t, node, expectedResponse)
ht.AssertChannelBalanceResp(node, expectedResponse)
}
// As the channel is pending open, it's expected Carol has both zero
@ -300,99 +294,107 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
// For Alice, her local/remote balances should be zero, and the
// local/remote balances are the mirror of Carol's.
checkChannelBalance(net.Alice, 0, 0, pushAmt, carolLocalBalance)
checkChannelBalance(alice, 0, 0, pushAmt, carolLocalBalance)
// Confirm the channel and wait for it to be recognized by both
// parties. Two transactions should be mined, the unconfirmed spend and
// the funding tx.
mineBlocks(t, net, 6, 2)
chanPoint, err := net.WaitForChannelOpen(chanOpenUpdate)
require.NoError(t.t, err, "error while waitinng for channel open")
// parties. For neutrino backend, the funding transaction should be
// mined. Otherwise, two transactions should be mined, the unconfirmed
// spend and the funding tx.
if ht.IsNeutrinoBackend() {
ht.MineBlocksAndAssertNumTxes(6, 1)
} else {
ht.MineBlocksAndAssertNumTxes(6, 2)
}
chanPoint := ht.WaitForChannelOpenEvent(chanOpenUpdate)
// With the channel open, we'll check the balances on each side of the
// channel as a sanity check to ensure things worked out as intended.
checkChannelBalance(carol, carolLocalBalance, pushAmt, 0, 0)
checkChannelBalance(net.Alice, pushAmt, carolLocalBalance, 0, 0)
checkChannelBalance(alice, pushAmt, carolLocalBalance, 0, 0)
// TODO(yy): remove the sleep once the following bug is fixed.
//
// We may get the error `unable to gracefully close channel while peer
// is offline (try force closing it instead): channel link not found`.
// This happens because the channel link hasn't been added yet but we
// now proceed to closing the channel. We may need to revisit how the
// channel open event is created and make sure the event is only sent
// after all relevant states have been updated.
time.Sleep(2 * time.Second)
// Now that we're done with the test, the channel can be closed.
closeChannelAndAssert(t, net, carol, chanPoint, false)
ht.CloseChannel(carol, chanPoint)
}
// testChannelFundingInputTypes tests that any type of supported input type can
// be used to fund channels.
func testChannelFundingInputTypes(net *lntest.NetworkHarness, t *harnessTest) {
func testChannelFundingInputTypes(ht *lntemp.HarnessTest) {
const (
chanAmt = funding.MaxBtcFundingAmount
burnAddr = "bcrt1qxsnqpdc842lu8c0xlllgvejt6rhy49u6fmpgyz"
)
addrTypes := []lnrpc.AddressType{
lnrpc.AddressType_WITNESS_PUBKEY_HASH,
lnrpc.AddressType_NESTED_PUBKEY_HASH,
lnrpc.AddressType_TAPROOT_PUBKEY,
fundWithTypes := []func(amt btcutil.Amount, target *node.HarnessNode){
ht.FundCoins, ht.FundCoinsNP2WKH, ht.FundCoinsP2TR,
}
alice := ht.Alice
// We'll start off by creating a node for Carol.
carol := net.NewNode(t.t, "Carol", nil)
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("Carol", nil)
// Now, we'll connect her to Alice so that they can open a
// channel together.
net.ConnectNodes(t.t, carol, net.Alice)
ht.ConnectNodes(carol, alice)
for _, addrType := range addrTypes {
// Creates a helper closure to be used below which asserts the
// proper response to a channel balance RPC.
checkChannelBalance := func(node *node.HarnessNode, local,
remote, pendingLocal, pendingRemote btcutil.Amount) {
expectedResponse := &lnrpc.ChannelBalanceResponse{
LocalBalance: &lnrpc.Amount{
Sat: uint64(local),
Msat: uint64(lnwire.NewMSatFromSatoshis(local)),
},
RemoteBalance: &lnrpc.Amount{
Sat: uint64(remote),
Msat: uint64(lnwire.NewMSatFromSatoshis(
remote,
)),
},
PendingOpenLocalBalance: &lnrpc.Amount{
Sat: uint64(pendingLocal),
Msat: uint64(lnwire.NewMSatFromSatoshis(
pendingLocal,
)),
},
PendingOpenRemoteBalance: &lnrpc.Amount{
Sat: uint64(pendingRemote),
Msat: uint64(lnwire.NewMSatFromSatoshis(
pendingRemote,
)),
},
UnsettledLocalBalance: &lnrpc.Amount{},
UnsettledRemoteBalance: &lnrpc.Amount{},
// Deprecated fields.
Balance: int64(local),
PendingOpenBalance: int64(pendingLocal),
}
ht.AssertChannelBalanceResp(node, expectedResponse)
}
for _, funder := range fundWithTypes {
// We'll send her some confirmed funds.
err := net.SendCoinsOfType(chanAmt*2, carol, addrType, true)
require.NoErrorf(
t.t, err, "unable to send coins for carol and addr "+
"type %v", addrType,
)
funder(chanAmt*2, carol)
chanOpenUpdate := openChannelStream(
t, net, carol, net.Alice, lntest.OpenChannelParams{
chanOpenUpdate := ht.OpenChannelAssertStream(
carol, alice, lntemp.OpenChannelParams{
Amt: chanAmt,
},
)
// Creates a helper closure to be used below which asserts the
// proper response to a channel balance RPC.
checkChannelBalance := func(node *lntest.HarnessNode,
local, remote, pendingLocal,
pendingRemote btcutil.Amount) {
expectedResponse := &lnrpc.ChannelBalanceResponse{
LocalBalance: &lnrpc.Amount{
Sat: uint64(local),
Msat: uint64(lnwire.NewMSatFromSatoshis(
local,
)),
},
RemoteBalance: &lnrpc.Amount{
Sat: uint64(remote),
Msat: uint64(lnwire.NewMSatFromSatoshis(
remote,
)),
},
PendingOpenLocalBalance: &lnrpc.Amount{
Sat: uint64(pendingLocal),
Msat: uint64(lnwire.NewMSatFromSatoshis(
pendingLocal,
)),
},
PendingOpenRemoteBalance: &lnrpc.Amount{
Sat: uint64(pendingRemote),
Msat: uint64(lnwire.NewMSatFromSatoshis(
pendingRemote,
)),
},
UnsettledLocalBalance: &lnrpc.Amount{},
UnsettledRemoteBalance: &lnrpc.Amount{},
// Deprecated fields.
Balance: int64(local),
PendingOpenBalance: int64(pendingLocal),
}
assertChannelBalanceResp(t, node, expectedResponse)
}
// As the channel is pending open, it's expected Carol has both
// zero local and remote balances, and pending local/remote
// should not be zero.
@ -405,52 +407,50 @@ func testChannelFundingInputTypes(net *lntest.NetworkHarness, t *harnessTest) {
// For Alice, her local/remote balances should be zero, and the
// local/remote balances are the mirror of Carol's.
checkChannelBalance(net.Alice, 0, 0, 0, carolLocalBalance)
checkChannelBalance(alice, 0, 0, 0, carolLocalBalance)
// Confirm the channel and wait for it to be recognized by both
// parties. Two transactions should be mined, the unconfirmed
// spend and the funding tx.
mineBlocks(t, net, 6, 1)
chanPoint, err := net.WaitForChannelOpen(chanOpenUpdate)
require.NoError(
t.t, err, "error while waiting for channel open",
)
ht.MineBlocksAndAssertNumTxes(1, 1)
chanPoint := ht.WaitForChannelOpenEvent(chanOpenUpdate)
// With the channel open, we'll check the balances on each side
// of the channel as a sanity check to ensure things worked out
// as intended.
checkChannelBalance(carol, carolLocalBalance, 0, 0, 0)
checkChannelBalance(net.Alice, 0, carolLocalBalance, 0, 0)
checkChannelBalance(alice, 0, carolLocalBalance, 0, 0)
// TODO(yy): remove the sleep once the following bug is fixed.
//
// We may get the error `unable to gracefully close channel
// while peer is offline (try force closing it instead):
// channel link not found`. This happens because the channel
// link hasn't been added yet but we now proceed to closing the
// channel. We may need to revisit how the channel open event
// is created and make sure the event is only sent after all
// relevant states have been updated.
time.Sleep(2 * time.Second)
// Now that we're done with the test, the channel can be closed.
closeChannelAndAssert(t, net, carol, chanPoint, false)
ht.CloseChannel(carol, chanPoint)
// Empty out the wallet so there aren't any lingering coins.
sendAllCoinsConfirm(net, carol, t, burnAddr)
sendAllCoinsConfirm(ht, carol, burnAddr)
}
}
// sendAllCoinsConfirm sends all coins of the node's wallet to the given address
// and awaits one confirmation.
func sendAllCoinsConfirm(net *lntest.NetworkHarness, node *lntest.HarnessNode,
t *harnessTest, addr string) {
ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
func sendAllCoinsConfirm(ht *lntemp.HarnessTest, node *node.HarnessNode,
addr string) {
sweepReq := &lnrpc.SendCoinsRequest{
Addr: addr,
SendAll: true,
}
_, err := node.SendCoins(ctxt, sweepReq)
require.NoError(t.t, err)
// Make sure the unconfirmed tx is seen in the mempool.
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
require.NoError(t.t, err, "failed to find tx in miner mempool")
mineBlocks(t, net, 1, 1)
node.RPC.SendCoins(sweepReq)
ht.MineBlocksAndAssertNumTxes(1, 1)
}
// testExternalFundingChanPoint tests that we're able to carry out a normal
@ -570,7 +570,7 @@ func testExternalFundingChanPoint(ht *lntemp.HarnessTest) {
// representation of channels if the system is restarted or disconnected.
// testFundingPersistence mirrors testBasicChannelFunding, but adds restarts
// and checks for the state of channels with unconfirmed funding transactions.
func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
func testChannelFundingPersistence(ht *lntemp.HarnessTest) {
chanAmt := funding.MaxBtcFundingAmount
pushAmt := btcutil.Amount(0)
@ -578,161 +578,120 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
// confirmation before it's open, with the current set of defaults,
// we'll need to create a new node instance.
const numConfs = 5
carolArgs := []string{fmt.Sprintf("--bitcoin.defaultchanconfs=%v", numConfs)}
carol := net.NewNode(t.t, "Carol", carolArgs)
carolArgs := []string{
fmt.Sprintf("--bitcoin.defaultchanconfs=%v", numConfs),
}
carol := ht.NewNode("Carol", carolArgs)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
net.ConnectNodes(t.t, net.Alice, carol)
alice := ht.Alice
ht.ConnectNodes(alice, carol)
// Create a new channel that requires 5 confs before it's considered
// open, then broadcast the funding transaction
pendingUpdate, err := net.OpenPendingChannel(
net.Alice, carol, chanAmt, pushAmt,
)
if err != nil {
t.Fatalf("unable to open channel: %v", err)
param := lntemp.OpenChannelParams{
Amt: chanAmt,
PushAmt: pushAmt,
}
update := ht.OpenChannelAssertPending(alice, carol, param)
// At this point, the channel's funding transaction will have been
// broadcast, but not confirmed. Alice and Bob's nodes should reflect
// this when queried via RPC.
assertNumOpenChannelsPending(t, net.Alice, carol, 1)
ht.AssertNumPendingOpenChannels(alice, 1)
ht.AssertNumPendingOpenChannels(carol, 1)
// Restart both nodes to test that the appropriate state has been
// persisted and that both nodes recover gracefully.
if err := net.RestartNode(net.Alice, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
if err := net.RestartNode(carol, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
ht.RestartNode(alice)
ht.RestartNode(carol)
fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid)
if err != nil {
t.Fatalf("unable to convert funding txid into chainhash.Hash:"+
" %v", err)
}
fundingTxStr := fundingTxID.String()
fundingTxID, err := chainhash.NewHash(update.Txid)
require.NoError(ht, err, "unable to convert funding txid "+
"into chainhash.Hash")
// Mine a block, then wait for Alice's node to notify us that the
// channel has been opened. The funding transaction should be found
// within the newly mined block.
block := mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, fundingTxID)
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.Miner.AssertTxInBlock(block, fundingTxID)
// Get the height that our transaction confirmed at.
_, height, err := net.Miner.Client.GetBestBlock()
require.NoError(t.t, err, "could not get best block")
_, height := ht.Miner.GetBestBlock()
// Restart both nodes to test that the appropriate state has been
// persisted and that both nodes recover gracefully.
if err := net.RestartNode(net.Alice, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
if err := net.RestartNode(carol, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
ht.RestartNode(alice)
ht.RestartNode(carol)
// The following block ensures that after both nodes have restarted,
// they have reconnected before the execution of the next test.
net.EnsureConnected(t.t, net.Alice, carol)
ht.EnsureConnected(alice, carol)
// Next, mine enough blocks s.t the channel will open with a single
// additional block mined.
if _, err := net.Miner.Client.Generate(3); err != nil {
t.Fatalf("unable to mine blocks: %v", err)
}
ht.MineBlocks(3)
// Assert that our wallet has our opening transaction with a label
// that does not have a channel ID set yet, because we have not
// reached our required confirmations.
tx := findTxAtHeight(t, height, fundingTxStr, net.Alice)
tx := ht.AssertTxAtHeight(alice, height, fundingTxID)
// At this stage, we expect the transaction to be labelled, but not with
// our channel ID because our transaction has not yet confirmed.
label := labels.MakeLabel(labels.LabelTypeChannelOpen, nil)
require.Equal(t.t, label, tx.Label, "open channel label wrong")
require.Equal(ht, label, tx.Label, "open channel label wrong")
// Both nodes should still show a single channel as pending.
time.Sleep(time.Second * 1)
assertNumOpenChannelsPending(t, net.Alice, carol, 1)
ht.AssertNumPendingOpenChannels(alice, 1)
ht.AssertNumPendingOpenChannels(carol, 1)
// Finally, mine the last block which should mark the channel as open.
if _, err := net.Miner.Client.Generate(1); err != nil {
t.Fatalf("unable to mine blocks: %v", err)
}
ht.MineBlocks(1)
// At this point, the channel should be fully opened and there should
// be no pending channels remaining for either node.
time.Sleep(time.Second * 1)
assertNumOpenChannelsPending(t, net.Alice, carol, 0)
ht.AssertNumPendingOpenChannels(alice, 0)
ht.AssertNumPendingOpenChannels(carol, 0)
// The channel should be listed in the peer information returned by
// both peers.
outPoint := wire.OutPoint{
Hash: *fundingTxID,
Index: pendingUpdate.OutputIndex,
}
chanPoint := lntemp.ChanPointFromPendingUpdate(update)
// Re-lookup our transaction in the block that it confirmed in.
tx = findTxAtHeight(t, height, fundingTxStr, net.Alice)
tx = ht.AssertTxAtHeight(alice, height, fundingTxID)
// Check both nodes to ensure that the channel is ready for operation.
chanAlice := ht.AssertChannelExists(alice, chanPoint)
ht.AssertChannelExists(carol, chanPoint)
// Create an additional check for our channel assertion that will
// check that our label is as expected.
check := func(channel *lnrpc.Channel) {
shortChanID := lnwire.NewShortChanIDFromInt(
channel.ChanId,
)
label := labels.MakeLabel(
labels.LabelTypeChannelOpen, &shortChanID,
)
require.Equal(t.t, label, tx.Label,
"open channel label not updated")
}
// Check both nodes to ensure that the channel is ready for operation.
err = net.AssertChannelExists(net.Alice, &outPoint, check)
if err != nil {
t.Fatalf("unable to assert channel existence: %v", err)
}
if err := net.AssertChannelExists(carol, &outPoint); err != nil {
t.Fatalf("unable to assert channel existence: %v", err)
}
shortChanID := lnwire.NewShortChanIDFromInt(chanAlice.ChanId)
label = labels.MakeLabel(labels.LabelTypeChannelOpen, &shortChanID)
require.Equal(ht, label, tx.Label, "open channel label not updated")
// Finally, immediately close the channel. This function will also
// block until the channel is closed and will additionally assert the
// relevant channel closing post conditions.
chanPoint := &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
FundingTxidBytes: pendingUpdate.Txid,
},
OutputIndex: pendingUpdate.OutputIndex,
}
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
ht.CloseChannel(alice, chanPoint)
}
// testBatchChanFunding makes sure multiple channels can be opened in one batch
// transaction in an atomic way.
func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testBatchChanFunding(ht *lntemp.HarnessTest) {
// First, we'll create two new nodes that we'll use to open channels
// to during this test. Carol has a high minimum funding amount that
// we'll use to trigger an error during the batch channel open.
carol := net.NewNode(t.t, "carol", []string{"--minchansize=200000"})
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("carol", []string{"--minchansize=200000"})
dave := ht.NewNode("dave", nil)
dave := net.NewNode(t.t, "dave", nil)
defer shutdownAndAssert(net, t, dave)
alice, bob := ht.Alice, ht.Bob
// Before we start the test, we'll ensure Alice is connected to Carol
// and Dave so she can open channels to both of them (and Bob).
net.EnsureConnected(t.t, net.Alice, net.Bob)
net.EnsureConnected(t.t, net.Alice, carol)
net.EnsureConnected(t.t, net.Alice, dave)
ht.EnsureConnected(alice, bob)
ht.EnsureConnected(alice, carol)
ht.EnsureConnected(alice, dave)
// Let's create our batch TX request. This first one should fail as we
// open a channel to Carol that is too small for her min chan size.
@ -740,7 +699,7 @@ func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) {
SatPerVbyte: 12,
MinConfs: 1,
Channels: []*lnrpc.BatchOpenChannel{{
NodePubkey: net.Bob.PubKey[:],
NodePubkey: bob.PubKey[:],
LocalFundingAmount: 100_000,
}, {
NodePubkey: carol.PubKey[:],
@ -751,22 +710,16 @@ func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) {
}},
}
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
_, err := net.Alice.BatchOpenChannel(ctxt, batchReq)
require.Error(t.t, err)
require.Contains(t.t, err.Error(), "initial negotiation failed")
err := alice.RPC.BatchOpenChannelAssertErr(batchReq)
require.Contains(ht, err.Error(), "initial negotiation failed")
// Let's fix the minimum amount for Carol now and try again.
// Let's fix the minimum amount for Alice now and try again.
batchReq.Channels[1].LocalFundingAmount = 200_000
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
batchResp, err := net.Alice.BatchOpenChannel(ctxt, batchReq)
require.NoError(t.t, err)
require.Len(t.t, batchResp.PendingChannels, 3)
batchResp := alice.RPC.BatchOpenChannel(batchReq)
require.Len(ht, batchResp.PendingChannels, 3)
txHash, err := chainhash.NewHash(batchResp.PendingChannels[0].Txid)
require.NoError(t.t, err)
require.NoError(ht, err)
chanPoint1 := &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
@ -787,23 +740,16 @@ func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) {
OutputIndex: batchResp.PendingChannels[2].OutputIndex,
}
block := mineBlocks(t, net, 6, 1)[0]
assertTxInBlock(t, block, txHash)
err = net.Alice.WaitForNetworkChannelOpen(chanPoint1)
require.NoError(t.t, err)
err = net.Alice.WaitForNetworkChannelOpen(chanPoint2)
require.NoError(t.t, err)
err = net.Alice.WaitForNetworkChannelOpen(chanPoint3)
require.NoError(t.t, err)
block := ht.MineBlocksAndAssertNumTxes(6, 1)[0]
ht.Miner.AssertTxInBlock(block, txHash)
ht.AssertTopologyChannelOpen(alice, chanPoint1)
ht.AssertTopologyChannelOpen(alice, chanPoint2)
ht.AssertTopologyChannelOpen(alice, chanPoint3)
// With the channel open, ensure that it is counted towards Carol's
// With the channel open, ensure that it is counted towards Alice's
// total channel balance.
balReq := &lnrpc.ChannelBalanceRequest{}
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
balRes, err := net.Alice.ChannelBalance(ctxt, balReq)
require.NoError(t.t, err)
require.NotEqual(t.t, int64(0), balRes.LocalBalance.Sat)
balRes := alice.RPC.ChannelBalance()
require.NotEqual(ht, int64(0), balRes.LocalBalance.Sat)
// Next, to make sure the channel functions as normal, we'll make some
// payments within the channel.
@ -812,23 +758,16 @@ func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) {
Memo: "new chans",
Value: int64(payAmt),
}
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
resp, err := carol.AddInvoice(ctxt, invoice)
require.NoError(t.t, err)
err = completePaymentRequests(
net.Alice, net.Alice.RouterClient,
[]string{resp.PaymentRequest}, true,
)
require.NoError(t.t, err)
resp := carol.RPC.AddInvoice(invoice)
ht.CompletePaymentRequests(alice, []string{resp.PaymentRequest})
// To conclude, we'll close the newly created channel between Carol and
// Dave. This function will also block until the channel is closed and
// will additionally assert the relevant channel closing post
// conditions.
closeChannelAndAssert(t, net, net.Alice, chanPoint1, false)
closeChannelAndAssert(t, net, net.Alice, chanPoint2, false)
closeChannelAndAssert(t, net, net.Alice, chanPoint3, false)
ht.CloseChannel(alice, chanPoint1)
ht.CloseChannel(alice, chanPoint2)
ht.CloseChannel(alice, chanPoint3)
}
// deriveFundingShim creates a channel funding shim by deriving the necessary

File diff suppressed because it is too large Load Diff

View File

@ -1,30 +1,27 @@
package itest
import (
"context"
"fmt"
network "net"
"strings"
"time"
"net"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
// testNetworkConnectionTimeout checks that the connectiontimeout is taking
// effect. It creates a node with a small connection timeout value, and connects
// it to a non-routable IP address.
func testNetworkConnectionTimeout(net *lntest.NetworkHarness, t *harnessTest) {
// effect. It creates a node with a small connection timeout value, and
// connects it to a non-routable IP address.
func testNetworkConnectionTimeout(ht *lntemp.HarnessTest) {
var (
ctxt, _ = context.WithTimeout(
context.Background(), defaultTimeout,
)
// testPub is a random public key for testing only.
testPub = "0332bda7da70fefe4b6ab92f53b3c4f4ee7999" +
"f312284a8e89c8670bb3f67dbee2"
// testHost is a non-routable IP address. It's used to cause a
// connection timeout.
testHost = "10.255.255.255"
@ -32,8 +29,7 @@ func testNetworkConnectionTimeout(net *lntest.NetworkHarness, t *harnessTest) {
// First, test the global timeout settings.
// Create Carol with a connection timeout of 1 millisecond.
carol := net.NewNode(t.t, "Carol", []string{"--connectiontimeout=1ms"})
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("Carol", []string{"--connectiontimeout=1ms"})
// Try to connect Carol to a non-routable IP address, which should give
// us a timeout error.
@ -43,12 +39,27 @@ func testNetworkConnectionTimeout(net *lntest.NetworkHarness, t *harnessTest) {
Host: testHost,
},
}
assertTimeoutError(ctxt, t, carol, req)
// assertTimeoutError asserts that a connection timeout error is
// raised. A context with a default timeout is used to make the
// request. If our customized connection timeout is less than the
// default, we won't see the request context times out, instead a
// network connection timeout will be returned.
assertTimeoutError := func(hn *node.HarnessNode,
req *lnrpc.ConnectPeerRequest) {
err := hn.RPC.ConnectPeerAssertErr(req)
// Check that the network returns a timeout error.
require.Containsf(ht, err.Error(), "i/o timeout",
"expected to get a timeout error, instead got: %v", err)
}
assertTimeoutError(carol, req)
// Second, test timeout on the connect peer request.
// Create Dave with the default timeout setting.
dave := net.NewNode(t.t, "Dave", nil)
defer shutdownAndAssert(net, t, dave)
dave := ht.NewNode("Dave", nil)
// Try to connect Dave to a non-routable IP address, using a timeout
// value of 1ms, which should give us a timeout error immediately.
@ -59,12 +70,12 @@ func testNetworkConnectionTimeout(net *lntest.NetworkHarness, t *harnessTest) {
},
Timeout: 1,
}
assertTimeoutError(ctxt, t, dave, req)
assertTimeoutError(dave, req)
}
// testReconnectAfterIPChange verifies that if a persistent inbound node changes
// its listening address then it's peer will still be able to reconnect to it.
func testReconnectAfterIPChange(net *lntest.NetworkHarness, t *harnessTest) {
func testReconnectAfterIPChange(ht *lntemp.HarnessTest) {
// In this test, the following network will be set up. A single
// dash line represents a peer connection and a double dash line
// represents a channel.
@ -90,115 +101,81 @@ func testReconnectAfterIPChange(net *lntest.NetworkHarness, t *harnessTest) {
// reconnect.
// Create a new node, Charlie.
charlie := net.NewNode(t.t, "Charlie", nil)
defer shutdownAndAssert(net, t, charlie)
charlie := ht.NewNode("Charlie", nil)
// We derive two ports for Dave, and we initialise his node with
// these ports advertised as `--externalip` arguments.
ip1 := lntest.NextAvailablePort()
// We derive an extra port for Dave, and we initialise his node with
// the port advertised as `--externalip` arguments.
ip2 := lntest.NextAvailablePort()
// Create a new node, Dave, which will initialize a P2P port for him.
daveArgs := []string{fmt.Sprintf("--externalip=127.0.0.1:%d", ip2)}
dave := ht.NewNode("Dave", daveArgs)
// We now have two ports, the initial P2P port from creating the node,
// and the `externalip` specified above.
advertisedAddrs := []string{
fmt.Sprintf("127.0.0.1:%d", ip1),
fmt.Sprintf("127.0.0.1:%d", dave.Cfg.P2PPort),
fmt.Sprintf("127.0.0.1:%d", ip2),
}
var daveArgs []string
for _, addr := range advertisedAddrs {
daveArgs = append(daveArgs, "--externalip="+addr)
}
// withP2PPort is a helper closure used to set the P2P port that a node
// should use.
var withP2PPort = func(port int) lntest.NodeOption {
return func(cfg *lntest.BaseNodeConfig) {
cfg.P2PPort = port
}
}
// Create a new node, Dave, and ensure that his initial P2P port is
// ip1 derived above.
dave := net.NewNode(t.t, "Dave", daveArgs, withP2PPort(ip1))
defer shutdownAndAssert(net, t, dave)
// Subscribe to graph notifications from Charlie so that we can tell
// when he receives Dave's NodeAnnouncements.
ctxb := context.Background()
charlieSub := subscribeGraphNotifications(ctxb, t, charlie)
defer close(charlieSub.quit)
// Connect Alice to Dave and Charlie.
net.ConnectNodes(t.t, net.Alice, dave)
net.ConnectNodes(t.t, net.Alice, charlie)
alice := ht.Alice
ht.ConnectNodes(alice, dave)
ht.ConnectNodes(alice, charlie)
// We'll then go ahead and open a channel between Alice and Dave. This
// ensures that Charlie receives the node announcement from Alice as
// part of the announcement broadcast.
chanPoint := openChannelAndAssert(
t, net, net.Alice, dave, lntest.OpenChannelParams{
Amt: 1000000,
},
chanPoint := ht.OpenChannel(
alice, dave, lntemp.OpenChannelParams{Amt: 1000000},
)
defer closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
// waitForNodeAnnouncement is a closure used to wait on the given graph
// subscription for a node announcement from a node with the given
// public key. It also waits for the node announcement that advertises
// a particular set of addresses.
waitForNodeAnnouncement := func(graphSub graphSubscription,
nodePubKey string, addrs []string) {
waitForNodeAnnouncement := func(nodePubKey string, addrs []string) {
err := wait.NoError(func() error {
// Expect to have at least 1 node announcement now.
updates := ht.AssertNumNodeAnns(charlie, nodePubKey, 1)
for {
select {
case graphUpdate := <-graphSub.updateChan:
nextUpdate:
for _, update := range graphUpdate.NodeUpdates {
if update.IdentityKey != nodePubKey {
continue
}
// Get latest node update from the node.
update := updates[len(updates)-1]
addrMap := make(map[string]bool)
for _, addr := range update.NodeAddresses {
addrMap[addr.GetAddr()] = true
}
for _, addr := range addrs {
if !addrMap[addr] {
continue nextUpdate
}
}
return
}
case err := <-graphSub.errChan:
t.Fatalf("unable to recv graph update: %v", err)
case <-time.After(defaultTimeout):
t.Fatalf("did not receive node ann update")
addrMap := make(map[string]bool)
for _, addr := range update.NodeAddresses {
addrMap[addr.GetAddr()] = true
}
}
// Check that our wanted addresses can be found from
// the node update.
for _, addr := range addrs {
if !addrMap[addr] {
return fmt.Errorf("address %s not "+
"found", addr)
}
}
return nil
}, defaultTimeout)
require.NoError(ht, err, "timeout checking node ann")
}
// Wait for Charlie to receive Dave's initial NodeAnnouncement.
waitForNodeAnnouncement(charlieSub, dave.PubKeyStr, advertisedAddrs)
waitForNodeAnnouncement(dave.PubKeyStr, advertisedAddrs)
// Now create a persistent connection between Charlie and Bob with no
// channels. Charlie is the outbound node and Bob is the inbound node.
net.ConnectNodesPerm(t.t, charlie, dave)
// Assert that Dave and Charlie are connected
assertConnected(t, dave, charlie)
// Now create a persistent connection between Charlie and Dave with no
// channels. Charlie is the outbound node and Dave is the inbound node.
ht.ConnectNodesPerm(charlie, dave)
// Change Dave's P2P port to the second IP address that he advertised
// and restart his node.
dave.Cfg.P2PPort = ip2
err := net.RestartNode(dave, nil)
require.NoError(t.t, err)
ht.RestartNode(dave)
// assert that Dave and Charlie reconnect successfully after Dave
// changes to his second advertised address.
assertConnected(t, dave, charlie)
ht.AssertConnected(dave, charlie)
// Next we test the case where Dave changes his listening address to one
// that was not listed in his original advertised addresses. The desired
@ -213,113 +190,51 @@ func testReconnectAfterIPChange(net *lntest.NetworkHarness, t *harnessTest) {
"--externalip=127.0.0.1:%d", dave.Cfg.P2PPort,
),
}
err = net.RestartNode(dave, nil)
require.NoError(t.t, err)
ht.RestartNode(dave)
// Show that Charlie does receive Dave's new listening address in
// a Node Announcement.
waitForNodeAnnouncement(
charlieSub, dave.PubKeyStr,
dave.PubKeyStr,
[]string{fmt.Sprintf("127.0.0.1:%d", dave.Cfg.P2PPort)},
)
// assert that Dave and Charlie do reconnect after Dave changes his P2P
// address to one not listed in Dave's original advertised list of
// addresses.
assertConnected(t, dave, charlie)
}
ht.AssertConnected(dave, charlie)
// assertTimeoutError asserts that a connection timeout error is raised. A
// context with a default timeout is used to make the request. If our customized
// connection timeout is less than the default, we won't see the request context
// times out, instead a network connection timeout will be returned.
func assertTimeoutError(ctxt context.Context, t *harnessTest,
node *lntest.HarnessNode, req *lnrpc.ConnectPeerRequest) {
t.t.Helper()
err := connect(ctxt, node, req)
// a DeadlineExceeded error will appear in the context if the above
// ctxtTimeout value is reached.
require.NoError(t.t, ctxt.Err(), "context time out")
// Check that the network returns a timeout error.
require.Containsf(
t.t, err.Error(), "i/o timeout",
"expected to get a timeout error, instead got: %v", err,
)
}
func connect(ctxt context.Context, node *lntest.HarnessNode,
req *lnrpc.ConnectPeerRequest) error {
syncTimeout := time.After(15 * time.Second)
ticker := time.NewTicker(time.Millisecond * 100)
defer ticker.Stop()
for {
select {
case <-ticker.C:
_, err := node.ConnectPeer(ctxt, req)
// If there's no error, return nil
if err == nil {
return err
}
// If the error is no ErrServerNotActive, return it.
// Otherwise, we will retry until timeout.
if !strings.Contains(err.Error(),
lnd.ErrServerNotActive.Error()) {
return err
}
case <-syncTimeout:
return fmt.Errorf("chain backend did not " +
"finish syncing")
}
}
return nil
// Finally, close the channel.
ht.CloseChannel(alice, chanPoint)
}
// testAddPeerConfig tests that the "--addpeer" config flag successfully adds
// a new peer.
func testAddPeerConfig(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
alice := net.Alice
info, err := alice.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
require.NoError(t.t, err)
func testAddPeerConfig(ht *lntemp.HarnessTest) {
alice := ht.Alice
info := alice.RPC.GetInfo()
alicePeerAddress := info.Uris[0]
// Create a new node (Carol) with Alice as a peer.
args := []string{
fmt.Sprintf("--addpeer=%v", alicePeerAddress),
}
carol := net.NewNode(t.t, "Carol", args)
defer shutdownAndAssert(net, t, carol)
args := []string{fmt.Sprintf("--addpeer=%v", alicePeerAddress)}
carol := ht.NewNode("Carol", args)
assertConnected(t, alice, carol)
ht.EnsureConnected(alice, carol)
// If we list Carol's peers, Alice should already be
// listed as one, since we specified her using the
// addpeer flag.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
listPeersRequest := &lnrpc.ListPeersRequest{}
listPeersResp, err := carol.ListPeers(ctxt, listPeersRequest)
require.NoError(t.t, err)
listPeersResp := carol.RPC.ListPeers()
parsedPeerAddr, err := lncfg.ParseLNAddressString(
alicePeerAddress, "9735", network.ResolveTCPAddr,
alicePeerAddress, "9735", net.ResolveTCPAddr,
)
require.NoError(t.t, err)
require.NoError(ht, err)
parsedKeyStr := fmt.Sprintf(
"%x", parsedPeerAddr.IdentityKey.SerializeCompressed(),
)
require.Equal(t.t, parsedKeyStr, listPeersResp.Peers[0].PubKey)
require.Equal(ht, parsedKeyStr, listPeersResp.Peers[0].PubKey)
}

View File

@ -6,49 +6,29 @@ import (
"crypto/sha256"
"encoding/hex"
"fmt"
"reflect"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
func testListPayments(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
// First start by deleting all payments that Alice knows of. This will
// allow us to execute the test with a clean state for Alice.
delPaymentsReq := &lnrpc.DeleteAllPaymentsRequest{}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
if _, err := net.Alice.DeleteAllPayments(ctxt, delPaymentsReq); err != nil {
t.Fatalf("unable to delete payments: %v", err)
}
func testListPayments(ht *lntemp.HarnessTest) {
alice, bob := ht.Alice, ht.Bob
// Check that there are no payments before test.
reqInit := &lnrpc.ListPaymentsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
paymentsRespInit, err := net.Alice.ListPayments(ctxt, reqInit)
if err != nil {
t.Fatalf("error when obtaining Alice payments: %v", err)
}
if len(paymentsRespInit.Payments) != 0 {
t.Fatalf("incorrect number of payments, got %v, want %v",
len(paymentsRespInit.Payments), 0)
}
ht.AssertNumPayments(alice, 0)
// Open a channel with 100k satoshis between Alice and Bob with Alice
// being the sole funder of the channel.
chanAmt := btcutil.Amount(100000)
chanPoint := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
Amt: chanAmt,
},
chanPoint := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt},
)
// Now that the channel is open, create an invoice for Bob which
@ -61,101 +41,58 @@ func testListPayments(net *lntest.NetworkHarness, t *harnessTest) {
RPreimage: preimage,
Value: paymentAmt,
}
addInvoiceCtxt, _ := context.WithTimeout(ctxb, defaultTimeout)
invoiceResp, err := net.Bob.AddInvoice(addInvoiceCtxt, invoice)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
// Wait for Alice to recognize and advertise the new channel generated
// above.
if err = net.Alice.WaitForNetworkChannelOpen(chanPoint); err != nil {
t.Fatalf("alice didn't advertise channel before "+
"timeout: %v", err)
}
if err = net.Bob.WaitForNetworkChannelOpen(chanPoint); err != nil {
t.Fatalf("bob didn't advertise channel before "+
"timeout: %v", err)
}
invoiceResp := bob.RPC.AddInvoice(invoice)
// With the invoice for Bob added, send a payment towards Alice paying
// to the above generated invoice.
sendAndAssertSuccess(
t, net.Alice, &routerrpc.SendPaymentRequest{
PaymentRequest: invoiceResp.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitSat: 1000000,
},
)
payReqs := []string{invoiceResp.PaymentRequest}
ht.CompletePaymentRequests(alice, payReqs)
// Grab Alice's list of payments, she should show the existence of
// exactly one payment.
req := &lnrpc.ListPaymentsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
paymentsResp, err := net.Alice.ListPayments(ctxt, req)
if err != nil {
t.Fatalf("error when obtaining Alice payments: %v", err)
}
if len(paymentsResp.Payments) != 1 {
t.Fatalf("incorrect number of payments, got %v, want %v",
len(paymentsResp.Payments), 1)
}
p := paymentsResp.Payments[0] // nolint:staticcheck
p := ht.AssertNumPayments(alice, 1)[0]
path := p.Htlcs[len(p.Htlcs)-1].Route.Hops
// Ensure that the stored path shows a direct payment to Bob with no
// other nodes in-between.
if len(path) != 1 || path[0].PubKey != net.Bob.PubKeyStr {
t.Fatalf("incorrect path")
}
require.Len(ht, path, 1, "wrong number of routes in path")
require.Equal(ht, bob.PubKeyStr, path[0].PubKey, "wrong pub key")
// The payment amount should also match our previous payment directly.
if p.Value != paymentAmt { // nolint:staticcheck
t.Fatalf("incorrect amount, got %v, want %v",
p.Value, paymentAmt) // nolint:staticcheck
}
require.EqualValues(ht, paymentAmt, p.ValueSat, "incorrect sat amount")
require.EqualValues(ht, paymentAmt*1000, p.ValueMsat,
"incorrect msat amount")
// The payment hash (or r-hash) should have been stored correctly.
correctRHash := hex.EncodeToString(invoiceResp.RHash)
if !reflect.DeepEqual(p.PaymentHash, correctRHash) {
t.Fatalf("incorrect RHash, got %v, want %v",
p.PaymentHash, correctRHash)
}
require.Equal(ht, correctRHash, p.PaymentHash, "incorrect RHash")
// As we made a single-hop direct payment, there should have been no fee
// applied.
if p.Fee != 0 { // nolint:staticcheck
t.Fatalf("incorrect Fee, got %v, want %v", p.Fee, 0) // nolint:staticcheck
}
// As we made a single-hop direct payment, there should have been no
// fee applied.
require.Zero(ht, p.FeeSat, "fee should be 0")
require.Zero(ht, p.FeeMsat, "fee should be 0")
// Finally, verify that the payment request returned by the rpc matches
// the invoice that we paid.
if p.PaymentRequest != invoiceResp.PaymentRequest {
t.Fatalf("incorrect payreq, got: %v, want: %v",
p.PaymentRequest, invoiceResp.PaymentRequest)
}
require.Equal(ht, invoiceResp.PaymentRequest, p.PaymentRequest,
"incorrect payreq")
// Delete all payments from Alice. DB should have no payments.
delReq := &lnrpc.DeleteAllPaymentsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
_, err = net.Alice.DeleteAllPayments(ctxt, delReq)
if err != nil {
t.Fatalf("Can't delete payments at the end: %v", err)
}
alice.RPC.DeleteAllPayments()
// Check that there are no payments after test.
listReq := &lnrpc.ListPaymentsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
paymentsResp, err = net.Alice.ListPayments(ctxt, listReq)
if err != nil {
t.Fatalf("error when obtaining Alice payments: %v", err)
}
if len(paymentsResp.Payments) != 0 {
t.Fatalf("incorrect number of payments, got %v, want %v",
len(paymentsResp.Payments), 0)
}
ht.AssertNumPayments(alice, 0)
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
// TODO(yy): remove the sleep once the following bug is fixed.
// When the invoice is reported settled, the commitment dance is not
// yet finished, which can cause an error when closing the channel,
// saying there's active HTLCs. We need to investigate this issue and
// reverse the order to, first finish the commitment dance, then report
// the invoice as settled.
time.Sleep(2 * time.Second)
// Close the channel.
defer ht.CloseChannel(alice, chanPoint)
}
// testPaymentFollowingChannelOpen tests that the channel transition from
@ -163,81 +100,58 @@ func testListPayments(net *lntest.NetworkHarness, t *harnessTest) {
// subsystems trying to update the channel state in the db. We follow this
// transition with a payment that updates the commitment state and verify that
// the pending state is up to date.
func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testPaymentFollowingChannelOpen(ht *lntemp.HarnessTest) {
const paymentAmt = btcutil.Amount(100)
channelCapacity := paymentAmt * 1000
// We first establish a channel between Alice and Bob.
pendingUpdate, err := net.OpenPendingChannel(
net.Alice, net.Bob, channelCapacity, 0,
)
if err != nil {
t.Fatalf("unable to open channel: %v", err)
alice, bob := ht.Alice, ht.Bob
p := lntemp.OpenChannelParams{
Amt: channelCapacity,
}
pendingUpdate := ht.OpenChannelAssertPending(alice, bob, p)
// At this point, the channel's funding transaction will have been
// broadcast, but not confirmed. Alice and Bob's nodes
// should reflect this when queried via RPC.
assertNumOpenChannelsPending(t, net.Alice, net.Bob, 1)
ht.AssertNodesNumPendingOpenChannels(alice, bob, 1)
// We are restarting Bob's node to let the link be created for the
// pending channel.
if err := net.RestartNode(net.Bob, nil); err != nil {
t.Fatalf("Bob restart failed: %v", err)
}
ht.RestartNode(bob)
// We ensure that Bob reconnects to Alice.
net.EnsureConnected(t.t, net.Bob, net.Alice)
ht.EnsureConnected(bob, alice)
// We mine one block for the channel to be confirmed.
_ = mineBlocks(t, net, 6, 1)[0]
// We mine six blocks for the channel to be confirmed.
ht.MineBlocksAndAssertNumTxes(6, 1)
// We verify that the channel is open from both nodes point of view.
assertNumOpenChannelsPending(t, net.Alice, net.Bob, 0)
chanPoint := lntemp.ChanPointFromPendingUpdate(pendingUpdate)
ht.AssertNodesNumPendingOpenChannels(alice, bob, 0)
ht.AssertChannelExists(alice, chanPoint)
ht.AssertChannelExists(bob, chanPoint)
// With the channel open, we'll create invoices for Bob that Alice will
// pay to in order to advance the state of the channel.
bobPayReqs, _, _, err := createPayReqs(
net.Bob, paymentAmt, 1,
)
if err != nil {
t.Fatalf("unable to create pay reqs: %v", err)
}
bobPayReqs, _, _ := ht.CreatePayReqs(bob, paymentAmt, 1)
// Send payment to Bob so that a channel update to disk will be
// executed.
sendAndAssertSuccess(
t, net.Alice, &routerrpc.SendPaymentRequest{
PaymentRequest: bobPayReqs[0],
TimeoutSeconds: 60,
FeeLimitSat: 1000000,
},
)
ht.CompletePaymentRequests(alice, []string{bobPayReqs[0]})
// At this point we want to make sure the channel is opened and not
// pending.
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
res, err := net.Bob.ListChannels(ctxt, &lnrpc.ListChannelsRequest{})
if err != nil {
t.Fatalf("unable to list bob channels: %v", err)
}
if len(res.Channels) == 0 {
t.Fatalf("bob list of channels is empty")
}
// TODO(yy): remove the sleep once the following bug is fixed.
// When the invoice is reported settled, the commitment dance is not
// yet finished, which can cause an error when closing the channel,
// saying there's active HTLCs. We need to investigate this issue and
// reverse the order to, first finish the commitment dance, then report
// the invoice as settled.
time.Sleep(2 * time.Second)
// Finally, immediately close the channel. This function will also
// block until the channel is closed and will additionally assert the
// relevant channel closing post conditions.
chanPoint := &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
FundingTxidBytes: pendingUpdate.Txid,
},
OutputIndex: pendingUpdate.OutputIndex,
}
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
ht.CloseChannel(alice, chanPoint)
}
// testAsyncPayments tests the performance of the async payments.
@ -597,182 +511,86 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest)
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
}
func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testInvoiceSubscriptions(ht *lntemp.HarnessTest) {
const chanAmt = btcutil.Amount(500000)
// Open a channel with 500k satoshis between Alice and Bob with Alice
// being the sole funder of the channel.
chanPoint := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
Amt: chanAmt,
},
)
// Next create a new invoice for Bob requesting 1k satoshis.
// TODO(roasbeef): make global list of invoices for each node to re-use
// and avoid collisions
const paymentAmt = 1000
invoice := &lnrpc.Invoice{
Memo: "testing",
RPreimage: makeFakePayHash(t),
Value: paymentAmt,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
invoiceResp, err := net.Bob.AddInvoice(ctxt, invoice)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
lastAddIndex := invoiceResp.AddIndex
alice, bob := ht.Alice, ht.Bob
// Create a new invoice subscription client for Bob, the notification
// should be dispatched shortly below.
req := &lnrpc.InvoiceSubscription{}
ctx, cancelInvoiceSubscription := context.WithCancel(ctxb)
bobInvoiceSubscription, err := net.Bob.SubscribeInvoices(ctx, req)
if err != nil {
t.Fatalf("unable to subscribe to bob's invoice updates: %v", err)
bobInvoiceSubscription := bob.RPC.SubscribeInvoices(req)
// Open a channel with 500k satoshis between Alice and Bob with Alice
// being the sole funder of the channel.
chanPoint := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt},
)
// Next create a new invoice for Bob requesting 1k satoshis.
const paymentAmt = 1000
invoice := &lnrpc.Invoice{
Memo: "testing",
RPreimage: ht.Random32Bytes(),
Value: paymentAmt,
}
invoiceResp := bob.RPC.AddInvoice(invoice)
lastAddIndex := invoiceResp.AddIndex
var settleIndex uint64
quit := make(chan struct{})
updateSent := make(chan struct{})
go func() {
invoiceUpdate, err := bobInvoiceSubscription.Recv()
select {
case <-quit:
// Received cancellation
return
default:
}
if err != nil {
t.Fatalf("unable to recv invoice update: %v", err)
}
// The invoice update should exactly match the invoice created
// above, but should now be settled and have SettleDate
if !invoiceUpdate.Settled { // nolint:staticcheck
t.Fatalf("invoice not settled but should be")
}
if invoiceUpdate.SettleDate == 0 {
t.Fatalf("invoice should have non zero settle date, but doesn't")
}
if !bytes.Equal(invoiceUpdate.RPreimage, invoice.RPreimage) {
t.Fatalf("payment preimages don't match: expected %v, got %v",
invoice.RPreimage, invoiceUpdate.RPreimage)
}
if invoiceUpdate.SettleIndex == 0 {
t.Fatalf("invoice should have settle index")
}
settleIndex = invoiceUpdate.SettleIndex
close(updateSent)
}()
// Wait for the channel to be recognized by both Alice and Bob before
// continuing the rest of the test.
err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
if err != nil {
// TODO(roasbeef): will need to make num blocks to advertise a
// node param
close(quit)
t.Fatalf("channel not seen by alice before timeout: %v", err)
}
// With the above invoice added, we should receive an update event.
invoiceUpdate := ht.ReceiveInvoiceUpdate(bobInvoiceSubscription)
require.NotEqual(ht, lnrpc.Invoice_SETTLED, invoiceUpdate.State,
"invoice should not be settled")
// With the assertion above set up, send a payment from Alice to Bob
// which should finalize and settle the invoice.
sendReq := &routerrpc.SendPaymentRequest{
PaymentRequest: invoiceResp.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
stream, err := net.Alice.RouterClient.SendPaymentV2(ctxt, sendReq)
if err != nil {
close(quit)
t.Fatalf("unable to send payment: %v", err)
}
result, err := getPaymentResult(stream)
if err != nil {
close(quit)
t.Fatalf("cannot get payment result: %v", err)
}
if result.Status != lnrpc.Payment_SUCCEEDED {
close(quit)
t.Fatalf("error when attempting recv: %v", result.Status)
}
ht.CompletePaymentRequests(alice, []string{invoiceResp.PaymentRequest})
select {
case <-time.After(time.Second * 10):
close(quit)
t.Fatalf("update not sent after 10 seconds")
case <-updateSent: // Fall through on success
}
// With the base case working, we'll now cancel Bob's current
// subscription in order to exercise the backlog fill behavior.
cancelInvoiceSubscription()
// The invoice update should exactly match the invoice created
// above, but should now be settled and have SettleDate
invoiceUpdate = ht.ReceiveInvoiceUpdate(bobInvoiceSubscription)
require.Equal(ht, lnrpc.Invoice_SETTLED, invoiceUpdate.State,
"invoice not settled but should be")
require.NotZero(ht, invoiceUpdate.SettleDate,
"invoice should have non zero settle date, but doesn't")
require.Equal(ht, invoice.RPreimage, invoiceUpdate.RPreimage,
"payment preimages don't match")
require.NotZero(ht, invoiceUpdate.SettleIndex,
"invoice should have settle index")
settleIndex := invoiceUpdate.SettleIndex
// We'll now add 3 more invoices to Bob's invoice registry.
const numInvoices = 3
payReqs, _, newInvoices, err := createPayReqs(
net.Bob, paymentAmt, numInvoices,
payReqs, _, newInvoices := ht.CreatePayReqs(
bob, paymentAmt, numInvoices,
)
if err != nil {
t.Fatalf("unable to create pay reqs: %v", err)
}
// Now that the set of invoices has been added, we'll re-register for
// streaming invoice notifications for Bob, this time specifying the
// add invoice of the last prior invoice.
req = &lnrpc.InvoiceSubscription{
AddIndex: lastAddIndex,
}
ctx, cancelInvoiceSubscription = context.WithCancel(ctxb)
bobInvoiceSubscription, err = net.Bob.SubscribeInvoices(ctx, req)
if err != nil {
t.Fatalf("unable to subscribe to bob's invoice updates: %v", err)
}
req = &lnrpc.InvoiceSubscription{AddIndex: lastAddIndex}
bobInvoiceSubscription = bob.RPC.SubscribeInvoices(req)
// Since we specified a value of the prior add index above, we should
// now immediately get the invoices we just added as we should get the
// backlog of notifications.
for i := 0; i < numInvoices; i++ {
invoiceUpdate, err := bobInvoiceSubscription.Recv()
if err != nil {
t.Fatalf("unable to receive subscription")
}
invoiceUpdate := ht.ReceiveInvoiceUpdate(bobInvoiceSubscription)
// We should now get the ith invoice we added, as they should
// be returned in order.
if invoiceUpdate.Settled { // nolint:staticcheck
t.Fatalf("should have only received add events")
}
require.NotEqual(ht, lnrpc.Invoice_SETTLED, invoiceUpdate.State,
"should have only received add events")
originalInvoice := newInvoices[i]
rHash := sha256.Sum256(originalInvoice.RPreimage)
if !bytes.Equal(invoiceUpdate.RHash, rHash[:]) {
t.Fatalf("invoices have mismatched payment hashes: "+
"expected %x, got %x", rHash[:],
invoiceUpdate.RHash)
}
require.Equal(ht, rHash[:], invoiceUpdate.RHash,
"invoices have mismatched payment hashes")
}
cancelInvoiceSubscription()
// We'll now have Bob settle out the remainder of these invoices so we
// can test that all settled invoices are properly notified.
err = completePaymentRequests(
net.Alice, net.Alice.RouterClient, payReqs, true,
)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
ht.CompletePaymentRequests(alice, payReqs)
// With the set of invoices paid, we'll now cancel the old
// subscription, and create a new one for Bob, this time using the
@ -780,13 +598,7 @@ func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) {
req = &lnrpc.InvoiceSubscription{
SettleIndex: settleIndex,
}
ctx, cancelInvoiceSubscription = context.WithCancel(ctxb)
bobInvoiceSubscription, err = net.Bob.SubscribeInvoices(ctx, req)
if err != nil {
t.Fatalf("unable to subscribe to bob's invoice updates: %v", err)
}
defer cancelInvoiceSubscription()
bobInvoiceSubscription = bob.RPC.SubscribeInvoices(req)
// As we specified the index of the past settle index, we should now
// receive notifications for the three HTLCs that we just settled. As
@ -798,30 +610,31 @@ func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) {
settledInvoices[rHash] = struct{}{}
}
for i := 0; i < numInvoices; i++ {
invoiceUpdate, err := bobInvoiceSubscription.Recv()
if err != nil {
t.Fatalf("unable to receive subscription")
}
invoiceUpdate := ht.ReceiveInvoiceUpdate(bobInvoiceSubscription)
// We should now get the ith invoice we added, as they should
// be returned in order.
if !invoiceUpdate.Settled { // nolint:staticcheck
t.Fatalf("should have only received settle events")
}
require.Equal(ht, lnrpc.Invoice_SETTLED, invoiceUpdate.State,
"should have only received settle events")
var rHash [32]byte
copy(rHash[:], invoiceUpdate.RHash)
if _, ok := settledInvoices[rHash]; !ok {
t.Fatalf("unknown invoice settled: %x", rHash)
}
require.Contains(ht, settledInvoices, rHash,
"unknown invoice settled")
delete(settledInvoices, rHash)
}
// At this point, all the invoices should be fully settled.
if len(settledInvoices) != 0 {
t.Fatalf("not all invoices settled")
}
require.Empty(ht, settledInvoices, "not all invoices settled")
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
// TODO(yy): remove the sleep once the following bug is fixed.
// When the invoice is reported settled, the commitment dance is not
// yet finished, which can cause an error when closing the channel,
// saying there's active HTLCs. We need to investigate this issue and
// reverse the order to, first finish the commitment dance, then report
// the invoice as settled.
time.Sleep(2 * time.Second)
ht.CloseChannel(alice, chanPoint)
}

View File

@ -1,101 +1,69 @@
package itest
import (
"context"
"fmt"
"math"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/lightningnetwork/lnd/aezeed"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
// testGetRecoveryInfo checks whether lnd gives the right information about
// the wallet recovery process.
func testGetRecoveryInfo(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testGetRecoveryInfo(ht *lntemp.HarnessTest) {
// First, create a new node with strong passphrase and grab the mnemonic
// used for key derivation. This will bring up Carol with an empty
// wallet, and such that she is synced up.
password := []byte("The Magic Words are Squeamish Ossifrage")
carol, mnemonic, _, err := net.NewNodeWithSeed(
"Carol", nil, password, false,
)
if err != nil {
t.Fatalf("unable to create node with seed; %v", err)
}
shutdownAndAssert(net, t, carol)
carol, mnemonic, _ := ht.NewNodeWithSeed("Carol", nil, password, false)
checkInfo := func(expectedRecoveryMode, expectedRecoveryFinished bool,
expectedProgress float64, recoveryWindow int32) {
// Restore Carol, passing in the password, mnemonic, and
// desired recovery window.
node, err := net.RestoreNodeWithSeed(
"Carol", nil, password, mnemonic, "", recoveryWindow,
nil,
node := ht.RestoreNodeWithSeed(
carol.Name(), nil, password, mnemonic, "",
recoveryWindow, nil,
)
if err != nil {
t.Fatalf("unable to restore node: %v", err)
}
// Wait for Carol to sync to the chain.
_, minerHeight, err := net.Miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current blockheight %v", err)
}
err = waitForNodeBlockHeight(node, minerHeight)
if err != nil {
t.Fatalf("unable to sync to chain: %v", err)
}
// Query carol for her current wallet recovery progress.
var (
recoveryMode bool
recoveryFinished bool
progress float64
)
err = wait.Predicate(func() bool {
err := wait.NoError(func() error {
// Verify that recovery info gives the right response.
req := &lnrpc.GetRecoveryInfoRequest{}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
resp, err := node.GetRecoveryInfo(ctxt, req)
if err != nil {
t.Fatalf("unable to query recovery info: %v", err)
resp := node.RPC.GetRecoveryInfo(nil)
mode := resp.RecoveryMode
finished := resp.RecoveryFinished
progress := resp.Progress
if mode != expectedRecoveryMode {
return fmt.Errorf("expected recovery mode %v "+
"got %v", expectedRecoveryMode, mode)
}
if finished != expectedRecoveryFinished {
return fmt.Errorf("expected finished %v "+
"got %v", expectedRecoveryFinished,
finished)
}
if progress != expectedProgress {
return fmt.Errorf("expected progress %v"+
"got %v", expectedProgress, progress)
}
recoveryMode = resp.RecoveryMode
recoveryFinished = resp.RecoveryFinished
progress = resp.Progress
if recoveryMode != expectedRecoveryMode ||
recoveryFinished != expectedRecoveryFinished ||
progress != expectedProgress {
return false
}
return true
return nil
}, defaultTimeout)
if err != nil {
t.Fatalf("expected recovery mode to be %v, got %v, "+
"expected recovery finished to be %v, got %v, "+
"expected progress %v, got %v",
expectedRecoveryMode, recoveryMode,
expectedRecoveryFinished, recoveryFinished,
expectedProgress, progress,
)
}
require.NoError(ht, err)
// Lastly, shutdown this Carol so we can move on to the next
// restoration.
shutdownAndAssert(net, t, node)
ht.Shutdown(node)
}
// Restore Carol with a recovery window of 0. Since it's not in recovery
@ -103,15 +71,15 @@ func testGetRecoveryInfo(net *lntest.NetworkHarness, t *harnessTest) {
// recoveryFinished=false, and progress=0
checkInfo(false, false, 0, 0)
// Change the recovery windown to be 1 to turn on recovery mode. Since the
// current chain height is the same as the birthday height, it should
// indicate the recovery process is finished.
// Change the recovery windown to be 1 to turn on recovery mode. Since
// the current chain height is the same as the birthday height, it
// should indicate the recovery process is finished.
checkInfo(true, true, 1, 1)
// We now go ahead 5 blocks. Because the wallet's syncing process is
// controlled by a goroutine in the background, it will catch up quickly.
// This makes the recovery progress back to 1.
mineBlocks(t, net, 5, 0)
// controlled by a goroutine in the background, it will catch up
// quickly. This makes the recovery progress back to 1.
ht.MineBlocks(5)
checkInfo(true, true, 1, 1)
}
@ -119,18 +87,12 @@ func testGetRecoveryInfo(net *lntest.NetworkHarness, t *harnessTest) {
// when providing a valid aezeed that owns outputs on the chain. This test
// performs multiple restorations using the same seed and various recovery
// windows to ensure we detect funds properly.
func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testOnchainFundRecovery(ht *lntemp.HarnessTest) {
// First, create a new node with strong passphrase and grab the mnemonic
// used for key derivation. This will bring up Carol with an empty
// wallet, and such that she is synced up.
password := []byte("The Magic Words are Squeamish Ossifrage")
carol, mnemonic, _, err := net.NewNodeWithSeed(
"Carol", nil, password, false,
)
require.NoError(t.t, err)
shutdownAndAssert(net, t, carol)
carol, mnemonic, _ := ht.NewNodeWithSeed("Carol", nil, password, false)
// As long as the mnemonic is non-nil and the extended key is empty, the
// closure below will always restore the node from the seed. The tests
@ -142,17 +104,16 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) {
// given recovery window. Additionally, the caller can specify an action
// to perform on the restored node before the node is shutdown.
restoreCheckBalance := func(expAmount int64, expectedNumUTXOs uint32,
recoveryWindow int32, fn func(*lntest.HarnessNode)) {
recoveryWindow int32, fn func(*node.HarnessNode)) {
t.t.Helper()
ht.Helper()
// Restore Carol, passing in the password, mnemonic, and
// desired recovery window.
node, err := net.RestoreNodeWithSeed(
"Carol", nil, password, mnemonic, rootKey,
node := ht.RestoreNodeWithSeed(
carol.Name(), nil, password, mnemonic, rootKey,
recoveryWindow, nil,
)
require.NoError(t.t, err)
// Query carol for her current wallet balance, and also that we
// gain the expected number of UTXOs.
@ -160,38 +121,33 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) {
currBalance int64
currNumUTXOs uint32
)
err = wait.Predicate(func() bool {
req := &lnrpc.WalletBalanceRequest{}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
resp, err := node.WalletBalance(ctxt, req)
require.NoError(t.t, err)
err := wait.NoError(func() error {
resp := node.RPC.WalletBalance()
currBalance = resp.ConfirmedBalance
utxoReq := &lnrpc.ListUnspentRequest{
req := &walletrpc.ListUnspentRequest{
Account: "",
MaxConfs: math.MaxInt32,
MinConfs: 0,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
utxoResp, err := node.ListUnspent(ctxt, utxoReq)
require.NoError(t.t, err)
utxoResp := node.RPC.ListUnspent(req)
currNumUTXOs = uint32(len(utxoResp.Utxos))
// Verify that Carol's balance and number of UTXOs
// matches what's expected.
if expAmount != currBalance {
return false
return fmt.Errorf("balance not matched, want "+
"%d, got %d", expAmount, currBalance)
}
if currNumUTXOs != expectedNumUTXOs {
return false
return fmt.Errorf("num of UTXOs not matched, "+
"want %d, got %d", expectedNumUTXOs,
currNumUTXOs)
}
return true
return nil
}, defaultTimeout)
if err != nil {
t.Fatalf("expected restored node to have %d satoshis, "+
"instead has %d satoshis, expected %d utxos "+
"instead has %d", expAmount, currBalance,
expectedNumUTXOs, currNumUTXOs)
}
require.NoError(ht, err, "timeout checking Carol")
// If the user provided a callback, execute the commands against
// the restored Carol.
@ -199,71 +155,41 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) {
fn(node)
}
// Check if the previous outpoints are set correctly.
req := &lnrpc.GetTransactionsRequest{
StartHeight: 0,
EndHeight: -1,
}
txDetails, err := node.GetTransactions(ctxb, req)
require.NoError(t.t, err)
for _, tx := range txDetails.Transactions {
require.Greater(t.t, len(tx.PreviousOutpoints), 0)
}
// Lastly, shutdown this Carol so we can move on to the next
// restoration.
shutdownAndAssert(net, t, node)
ht.Shutdown(node)
}
// Create a closure-factory for building closures that can generate and
// skip a configurable number of addresses, before finally sending coins
// to a next generated address. The returned closure will apply the same
// behavior to both default P2WKH and NP2WKH scopes.
skipAndSend := func(nskip int) func(*lntest.HarnessNode) {
return func(node *lntest.HarnessNode) {
t.t.Helper()
newP2WKHAddrReq := &lnrpc.NewAddressRequest{
Type: AddrTypeWitnessPubkeyHash,
}
newNP2WKHAddrReq := &lnrpc.NewAddressRequest{
Type: AddrTypeNestedPubkeyHash,
}
newP2TRAddrReq := &lnrpc.NewAddressRequest{
Type: AddrTypeTaprootPubkey,
}
skipAndSend := func(nskip int) func(*node.HarnessNode) {
return func(node *node.HarnessNode) {
ht.Helper()
// Generate and skip the number of addresses requested.
ctxt, cancel := context.WithTimeout(
ctxb, defaultTimeout,
)
defer cancel()
for i := 0; i < nskip; i++ {
_, err = node.NewAddress(ctxt, newP2WKHAddrReq)
require.NoError(t.t, err)
req := &lnrpc.NewAddressRequest{}
_, err = node.NewAddress(ctxt, newNP2WKHAddrReq)
require.NoError(t.t, err)
req.Type = AddrTypeWitnessPubkeyHash
node.RPC.NewAddress(req)
_, err = node.NewAddress(ctxt, newP2TRAddrReq)
require.NoError(t.t, err)
req.Type = AddrTypeNestedPubkeyHash
node.RPC.NewAddress(req)
req.Type = AddrTypeTaprootPubkey
node.RPC.NewAddress(req)
}
// Send one BTC to the next P2WKH address.
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, node)
ht.FundCoins(btcutil.SatoshiPerBitcoin, node)
// And another to the next NP2WKH address.
net.SendCoinsNP2WKH(
t.t, btcutil.SatoshiPerBitcoin, node,
)
ht.FundCoinsNP2WKH(btcutil.SatoshiPerBitcoin, node)
// Add another whole coin to the P2TR address.
net.SendCoinsP2TR(
t.t, btcutil.SatoshiPerBitcoin, node,
)
ht.FundCoinsP2TR(btcutil.SatoshiPerBitcoin, node)
}
}
@ -316,25 +242,21 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) {
// avoid fee discrepancies and a change output is formed.
const minerAmt = 8 * btcutil.SatoshiPerBitcoin
const finalBalance = 9 * btcutil.SatoshiPerBitcoin
promptChangeAddr := func(node *lntest.HarnessNode) {
t.t.Helper()
promptChangeAddr := func(node *node.HarnessNode) {
ht.Helper()
minerAddr, err := net.Miner.NewAddress()
require.NoError(t.t, err)
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
resp, err := node.SendCoins(ctxt, &lnrpc.SendCoinsRequest{
minerAddr := ht.Miner.NewMinerAddress()
req := &lnrpc.SendCoinsRequest{
Addr: minerAddr.String(),
Amount: minerAmt,
})
require.NoError(t.t, err)
txid, err := waitForTxInMempool(
net.Miner.Client, minerMempoolTimeout,
)
require.NoError(t.t, err)
require.Equal(t.t, txid.String(), resp.Txid)
}
resp := node.RPC.SendCoins(req)
block := mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, txid)
txid := ht.Miner.AssertNumTxsInMempool(1)[0]
require.Equal(ht, txid.String(), resp.Txid)
block := ht.MineBlocks(1)[0]
ht.Miner.AssertTxInBlock(block, txid)
}
restoreCheckBalance(finalBalance, 9, 20, promptChangeAddr)
@ -350,11 +272,11 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) {
var seedMnemonic aezeed.Mnemonic
copy(seedMnemonic[:], mnemonic)
cipherSeed, err := seedMnemonic.ToCipherSeed(password)
require.NoError(t.t, err)
require.NoError(ht, err)
extendedRootKey, err := hdkeychain.NewMaster(
cipherSeed.Entropy[:], harnessNetParams,
)
require.NoError(t.t, err)
require.NoError(ht, err)
rootKey = extendedRootKey.String()
mnemonic = nil

View File

@ -4,94 +4,14 @@
package itest
var allTestCases = []*testCase{
{
name: "sweep coins",
test: testSweepAllCoins,
},
{
name: "list addresses",
test: testListAddresses,
},
{
name: "recovery info",
test: testGetRecoveryInfo,
},
{
name: "onchain fund recovery",
test: testOnchainFundRecovery,
},
{
name: "basic funding flow with all input types",
test: testChannelFundingInputTypes,
},
{
name: "unconfirmed channel funding",
test: testUnconfirmedChannelFunding,
},
{
name: "update channel policy",
test: testUpdateChannelPolicy,
},
{
name: "update channel policy fee rate accuracy",
test: testUpdateChannelPolicyFeeRateAccuracy,
},
{
name: "open channel reorg test",
test: testOpenChannelAfterReorg,
},
{
name: "disconnecting target peer",
test: testDisconnectingTargetPeer,
},
{
name: "reconnect after ip change",
test: testReconnectAfterIPChange,
},
{
name: "graph topology notifications",
test: testGraphTopologyNotifications,
},
{
name: "funding flow persistence",
test: testChannelFundingPersistence,
},
{
name: "channel force closure",
test: testChannelForceClosure,
},
{
name: "channel balance",
test: testChannelBalance,
},
{
name: "channel unsettled balance",
test: testChannelUnsettledBalance,
},
{
name: "single hop invoice",
test: testSingleHopInvoice,
},
{
name: "sphinx replay persistence",
test: testSphinxReplayPersistence,
},
{
name: "list channels",
test: testListChannels,
},
{
name: "test update node announcement rpc",
test: testUpdateNodeAnnouncement,
},
{
name: "list outgoing payments",
test: testListPayments,
},
{
name: "max pending channel",
test: testMaxPendingChannels,
},
{
name: "multi-hop payments",
test: testMultiHopPayments,
@ -108,18 +28,10 @@ var allTestCases = []*testCase{
name: "send to route error propagation",
test: testSendToRouteErrorPropagation,
},
{
name: "unannounced channels",
test: testUnannouncedChannels,
},
{
name: "private channels",
test: testPrivateChannels,
},
{
name: "private channel update policy",
test: testUpdateChannelPolicyForPrivateChannel,
},
{
name: "invoice routing hints",
test: testInvoiceRoutingHints,
@ -132,27 +44,10 @@ var allTestCases = []*testCase{
name: "multiple channel creation and update subscription",
test: testBasicChannelCreationAndUpdates,
},
{
name: "invoice update subscription",
test: testInvoiceSubscriptions,
},
{
name: "multi-hop htlc error propagation",
test: testHtlcErrorPropagation,
},
{
name: "reject onward htlc",
test: testRejectHTLC,
},
// TODO(roasbeef): multi-path integration test
{
name: "node announcement",
test: testNodeAnnouncement,
},
{
name: "node sign verify",
test: testNodeSignVerify,
},
{
name: "derive shared key",
test: testDeriveSharedKey,
@ -195,18 +90,6 @@ var allTestCases = []*testCase{
name: "revoked uncooperative close retribution",
test: testRevokedCloseRetribution,
},
{
name: "failing link",
test: testFailingChannel,
},
{
name: "garbage collect link nodes",
test: testGarbageCollectLinkNodes,
},
{
name: "abandonchannel",
test: testAbandonChannel,
},
{
name: "revoked uncooperative close retribution zero value remote output",
test: testRevokedCloseRetributionZeroValueRemoteOutput,
@ -227,18 +110,6 @@ var allTestCases = []*testCase{
name: "route fee cutoff",
test: testRouteFeeCutoff,
},
{
name: "send update disable channel",
test: testSendUpdateDisableChannel,
},
{
name: "streaming channel backup update",
test: testChannelBackupUpdates,
},
{
name: "export channel backup",
test: testExportChannelBackup,
},
{
name: "hold invoice sender persistence",
test: testHoldInvoicePersistence,
@ -247,10 +118,6 @@ var allTestCases = []*testCase{
name: "hold invoice force close",
test: testHoldInvoiceForceClose,
},
{
name: "commitment deadline",
test: testCommitmentTransactionDeadline,
},
{
name: "cpfp",
test: testCPFP,
@ -271,10 +138,6 @@ var allTestCases = []*testCase{
name: "delete macaroon id",
test: testDeleteMacaroonID,
},
{
name: "immediate payment after channel opened",
test: testPaymentFollowingChannelOpen,
},
{
name: "psbt channel funding",
test: testPsbtChanFunding,
@ -287,10 +150,6 @@ var allTestCases = []*testCase{
name: "sign psbt",
test: testSignPsbt,
},
{
name: "batch channel funding",
test: testBatchChanFunding,
},
{
name: "psbt channel funding single step",
test: testPsbtChanFundingSingleStep,
@ -339,10 +198,6 @@ var allTestCases = []*testCase{
name: "maximum channel size",
test: testMaxChannelSize,
},
{
name: "connection timeout",
test: testNetworkConnectionTimeout,
},
{
name: "stateless init",
test: testStatelessInit,
@ -383,10 +238,6 @@ var allTestCases = []*testCase{
name: "taproot",
test: testTaproot,
},
{
name: "addpeer config",
test: testAddPeerConfig,
},
{
name: "resolution handoff",
test: testResHandoff,

View File

@ -5,8 +5,6 @@ import (
"crypto/rand"
"fmt"
"io"
"strconv"
"strings"
"testing"
"time"
@ -501,42 +499,6 @@ func getOutputIndex(t *harnessTest, miner *lntest.HarnessMiner,
return p2trOutputIndex
}
// parseDerivationPath parses a path in the form of m/x'/y'/z'/a/b into a slice
// of [x, y, z, a, b], meaning that the apostrophe is ignored and 2^31 is _not_
// added to the numbers.
func parseDerivationPath(path string) ([]uint32, error) {
path = strings.TrimSpace(path)
if len(path) == 0 {
return nil, fmt.Errorf("path cannot be empty")
}
if !strings.HasPrefix(path, "m/") {
return nil, fmt.Errorf("path must start with m/")
}
// Just the root key, no path was provided. This is valid but not useful
// in most cases.
rest := strings.ReplaceAll(path, "m/", "")
if rest == "" {
return []uint32{}, nil
}
parts := strings.Split(rest, "/")
indices := make([]uint32, len(parts))
for i := 0; i < len(parts); i++ {
part := parts[i]
if strings.Contains(parts[i], "'") {
part = strings.TrimRight(parts[i], "'")
}
parsed, err := strconv.ParseInt(part, 10, 32)
if err != nil {
return nil, fmt.Errorf("could not parse part \"%s\": "+
"%v", part, err)
}
indices[i] = uint32(parsed)
}
return indices, nil
}
// acceptChannel is used to accept a single channel that comes across. This
// should be run in a goroutine and is used to test nodes with the zero-conf
// feature bit.

View File

@ -28,5 +28,5 @@ const (
// NodeStartTimeout is the timeout value when waiting for a node to
// become fully started.
NodeStartTimeout = time.Second * 60
NodeStartTimeout = time.Second * 120
)

View File

@ -28,5 +28,5 @@ const (
// NodeStartTimeout is the timeout value when waiting for a node to
// become fully started.
NodeStartTimeout = time.Second * 60
NodeStartTimeout = time.Second * 120
)

View File

@ -13,6 +13,9 @@ const PollInterval = 200 * time.Millisecond
// timing doesn't always line up well when running integration tests with
// several running lnd nodes. This function gives callers a way to assert that
// some property is upheld within a particular time frame.
//
// TODO(yy): build a counter here so we know how many times we've tried the
// `pred`.
func Predicate(pred func() bool, timeout time.Duration) error {
exitTimer := time.After(timeout)
result := make(chan bool, 1)

View File

@ -81,11 +81,11 @@ LOG_TAGS := nolog
endif
# If a timeout was requested, construct initialize the proper flag for the go
# test command. If not, we set 60m (up from the default 10m).
# test command. If not, we set 120m (up from the default 10m).
ifneq ($(timeout),)
TEST_FLAGS += -test.timeout=$(timeout)
else
TEST_FLAGS += -test.timeout=60m
TEST_FLAGS += -test.timeout=120m
endif
GOLIST := go list -tags="$(DEV_TAGS)" -deps $(PKG)/... | grep '$(PKG)'| grep -v '/vendor/'