Merge pull request #6823 from yyforyongyu/4-new-itest

itest: continued itest refactor and fix - III
This commit is contained in:
Oliver Gugger 2023-01-17 09:38:31 +01:00 committed by GitHub
commit b4febb382a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
43 changed files with 3394 additions and 4828 deletions

View file

@ -351,7 +351,8 @@ PRs([6776](https://github.com/lightningnetwork/lnd/pull/6776),
[6822](https://github.com/lightningnetwork/lnd/pull/6822),
[7172](https://github.com/lightningnetwork/lnd/pull/7172),
[7242](https://github.com/lightningnetwork/lnd/pull/7242),
[7245](https://github.com/lightningnetwork/lnd/pull/7245)) have been made to
[7245](https://github.com/lightningnetwork/lnd/pull/7245)),
[6823](https://github.com/lightningnetwork/lnd/pull/6823)) have been made to
refactor the itest for code health and maintenance.
# Contributors (Alphabetical Order)

View file

@ -1505,6 +1505,10 @@ func (f *Manager) handleFundingOpen(peer lnpeer.Peer,
return
}
log.Debugf("Initialized channel reservation: zeroConf=%v, psbt=%v, "+
"cannedShim=%v", reservation.IsZeroConf(),
reservation.IsPsbt(), reservation.IsCannedShim())
if zeroConf {
// Store an alias for zero-conf channels. Other option-scid
// channels will do this at a later point.

View file

@ -14,6 +14,7 @@ import (
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/kvdb/etcd"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
@ -155,6 +156,16 @@ func (h *HarnessTest) ChainBackendName() string {
return h.manager.chainBackend.Name()
}
// Context returns the run context used in this test. Usaually it should be
// managed by the test itself otherwise undefined behaviors will occur. It can
// be used, however, when a test needs to have its own context being managed
// differently. In that case, instead of using a background context, the run
// context should be used such that the test context scope can be fully
// controlled.
func (h *HarnessTest) Context() context.Context {
return h.runCtx
}
// SetUp starts the initial seeder nodes within the test harness. The initial
// node's wallets will be funded wallets with 10x10 BTC outputs each.
func (h *HarnessTest) SetupStandbyNodes() {
@ -205,7 +216,8 @@ func (h *HarnessTest) SetupStandbyNodes() {
// We generate several blocks in order to give the outputs created
// above a good number of confirmations.
h.MineBlocks(numBlocksSendOutput)
const totalTxes = 20
h.MineBlocksAndAssertNumTxes(numBlocksSendOutput, totalTxes)
// Now we want to wait for the nodes to catch up.
h.WaitForBlockchainSync(h.Alice)
@ -283,6 +295,9 @@ func (h *HarnessTest) resetStandbyNodes(t *testing.T) {
// config for the coming test. This will also inherit the
// test's running context.
h.RestartNodeWithExtraArgs(hn, hn.Cfg.OriginalExtraArgs)
// Update the node's internal state.
hn.UpdateState()
}
}
@ -338,10 +353,6 @@ func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
return
}
// We require the mempool to be cleaned from the test.
require.Empty(st, st.Miner.GetRawMempool(), "mempool not "+
"cleaned, please mine blocks to clean them all.")
// When we finish the test, reset the nodes' configs and take a
// snapshot of each of the nodes' internal states.
for _, node := range st.manager.standbyNodes {
@ -351,8 +362,9 @@ func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
// If found running nodes, shut them down.
st.shutdownNonStandbyNodes()
// Assert that mempool is cleaned
st.Miner.AssertNumTxsInMempool(0)
// We require the mempool to be cleaned from the test.
require.Empty(st, st.Miner.GetRawMempool(), "mempool not "+
"cleaned, please mine blocks to clean them all.")
// Finally, cancel the run context. We have to do it here
// because we need to keep the context alive for the above
@ -397,9 +409,6 @@ func (h *HarnessTest) cleanupStandbyNode(hn *node.HarnessNode) {
// Delete all payments made from this test.
hn.RPC.DeleteAllPayments()
// Update the node's internal state.
hn.UpdateState()
// Finally, check the node is in a clean state for the following tests.
h.validateNodeState(hn)
}
@ -469,17 +478,48 @@ func (h *HarnessTest) SuspendNode(node *node.HarnessNode) func() error {
return func() error {
h.manager.registerNode(node)
return node.Start(h.runCtx)
if err := node.Start(h.runCtx); err != nil {
return err
}
h.WaitForBlockchainSync(node)
return nil
}
}
// RestartNode restarts a given node and asserts.
func (h *HarnessTest) RestartNode(hn *node.HarnessNode,
// RestartNode restarts a given node, unlocks it and asserts it's successfully
// started.
func (h *HarnessTest) RestartNode(hn *node.HarnessNode) {
err := h.manager.restartNode(h.runCtx, hn, nil)
require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
err = h.manager.unlockNode(hn)
require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
if !hn.Cfg.SkipUnlock {
// Give the node some time to catch up with the chain before we
// continue with the tests.
h.WaitForBlockchainSync(hn)
}
}
// RestartNodeNoUnlock restarts a given node without unlocking its wallet.
func (h *HarnessTest) RestartNodeNoUnlock(hn *node.HarnessNode) {
err := h.manager.restartNode(h.runCtx, hn, nil)
require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
}
// RestartNodeWithChanBackups restarts a given node with the specified channel
// backups.
func (h *HarnessTest) RestartNodeWithChanBackups(hn *node.HarnessNode,
chanBackups ...*lnrpc.ChanBackupSnapshot) {
err := h.manager.restartNode(h.runCtx, hn, nil, chanBackups...)
err := h.manager.restartNode(h.runCtx, hn, nil)
require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
err = h.manager.unlockNode(hn, chanBackups...)
require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
// Give the node some time to catch up with the chain before we
// continue with the tests.
h.WaitForBlockchainSync(hn)
@ -490,7 +530,7 @@ func (h *HarnessTest) RestartNodeWithExtraArgs(hn *node.HarnessNode,
extraArgs []string) {
hn.SetExtraArgs(extraArgs)
h.RestartNode(hn, nil)
h.RestartNode(hn)
}
// NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh
@ -525,7 +565,7 @@ func (h *HarnessTest) newNodeWithSeed(name string,
// Start the node with seed only, which will only create the `State`
// and `WalletUnlocker` clients.
err = node.StartWithSeed(h.runCtx)
err = node.StartWithNoAuth(h.runCtx)
require.NoErrorf(h, err, "failed to start node %s", node.Name())
// Generate a new seed.
@ -568,7 +608,7 @@ func (h *HarnessTest) RestoreNodeWithSeed(name string, extraArgs []string,
// Start the node with seed only, which will only create the `State`
// and `WalletUnlocker` clients.
err = node.StartWithSeed(h.runCtx)
err = node.StartWithNoAuth(h.runCtx)
require.NoErrorf(h, err, "failed to start node %s", node.Name())
// Create the wallet.
@ -587,6 +627,61 @@ func (h *HarnessTest) RestoreNodeWithSeed(name string, extraArgs []string,
return node
}
// NewNodeEtcd starts a new node with seed that'll use an external etcd
// database as its storage. The passed cluster flag indicates that we'd like
// the node to join the cluster leader election. We won't wait until RPC is
// available (this is useful when the node is not expected to become the leader
// right away).
func (h *HarnessTest) NewNodeEtcd(name string, etcdCfg *etcd.Config,
password []byte, cluster bool,
leaderSessionTTL int) *node.HarnessNode {
// We don't want to use the embedded etcd instance.
h.manager.dbBackend = lntest.BackendBbolt
extraArgs := node.ExtraArgsEtcd(
etcdCfg, name, cluster, leaderSessionTTL,
)
node, err := h.manager.newNode(h.T, name, extraArgs, password, true)
require.NoError(h, err, "failed to create new node with etcd")
// Start the node daemon only.
err = node.StartLndCmd(h.runCtx)
require.NoError(h, err, "failed to start node %s", node.Name())
return node
}
// NewNodeWithSeedEtcd starts a new node with seed that'll use an external etcd
// database as its storage. The passed cluster flag indicates that we'd like
// the node to join the cluster leader election.
func (h *HarnessTest) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config,
password []byte, entropy []byte, statelessInit, cluster bool,
leaderSessionTTL int) (*node.HarnessNode, []string, []byte) {
// We don't want to use the embedded etcd instance.
h.manager.dbBackend = lntest.BackendBbolt
// Create a request to generate a new aezeed. The new seed will have
// the same password as the internal wallet.
req := &lnrpc.GenSeedRequest{
AezeedPassphrase: password,
SeedEntropy: nil,
}
extraArgs := node.ExtraArgsEtcd(
etcdCfg, name, cluster, leaderSessionTTL,
)
return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
}
// KillNode kills the node (but won't wait for the node process to stop).
func (h *HarnessTest) KillNode(hn *node.HarnessNode) {
require.NoErrorf(h, hn.Kill(), "%s: kill got error", hn.Name())
delete(h.manager.activeNodes, hn.Cfg.NodeID)
}
// SetFeeEstimate sets a fee rate to be returned from fee estimator.
//
// NOTE: this method will set the fee rate for a conf target of 1, which is the
@ -1276,6 +1371,9 @@ func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) {
err := h.manager.restartNode(h.runCtx, hn, cb)
require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
err = h.manager.unlockNode(hn)
require.NoErrorf(h, err, "failed to unlock node %s", hn.Name())
// Give the node some time to catch up with the chain before we
// continue with the tests.
h.WaitForBlockchainSync(hn)
@ -1325,6 +1423,19 @@ func (h *HarnessTest) MineBlocksAndAssertNumTxes(num uint32,
return blocks
}
// MineEmptyBlocks mines a given number of empty blocks.
//
// NOTE: this differs from miner's `MineEmptyBlocks` as it requires the nodes
// to be synced.
func (h *HarnessTest) MineEmptyBlocks(num int) []*wire.MsgBlock {
blocks := h.Miner.MineEmptyBlocks(num)
// Finally, make sure all the active nodes are synced.
h.AssertActiveNodesSynced()
return blocks
}
// QueryChannelByChanPoint tries to find a channel matching the channel point
// and asserts. It returns the channel found.
func (h *HarnessTest) QueryChannelByChanPoint(hn *node.HarnessNode,
@ -1358,6 +1469,14 @@ func (h *HarnessTest) SendPaymentAssertFail(hn *node.HarnessNode,
return payment
}
// SendPaymentAssertSettled sends a payment from the passed node and asserts the
// payment is settled.
func (h *HarnessTest) SendPaymentAssertSettled(hn *node.HarnessNode,
req *routerrpc.SendPaymentRequest) *lnrpc.Payment {
return h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_SUCCEEDED)
}
// OpenChannelRequest is used to open a channel using the method
// OpenMultiChannelsAsync.
type OpenChannelRequest struct {
@ -1630,3 +1749,51 @@ func findSweepInDetails(ht *HarnessTest, sweepTxid string,
return false
}
// ConnectMiner connects the miner with the chain backend in the network.
func (h *HarnessTest) ConnectMiner() {
err := h.manager.chainBackend.ConnectMiner()
require.NoError(h, err, "failed to connect miner")
}
// DisconnectMiner removes the connection between the miner and the chain
// backend in the network.
func (h *HarnessTest) DisconnectMiner() {
err := h.manager.chainBackend.DisconnectMiner()
require.NoError(h, err, "failed to disconnect miner")
}
// QueryRoutesAndRetry attempts to keep querying a route until timeout is
// reached.
//
// NOTE: when a channel is opened, we may need to query multiple times to get
// it in our QueryRoutes RPC. This happens even after we check the channel is
// heard by the node using ht.AssertChannelOpen. Deep down, this is because our
// GraphTopologySubscription and QueryRoutes give different results regarding a
// specific channel, with the formal reporting it being open while the latter
// not, resulting GraphTopologySubscription acting "faster" than QueryRoutes.
// TODO(yy): make sure related subsystems share the same view on a given
// channel.
func (h *HarnessTest) QueryRoutesAndRetry(hn *node.HarnessNode,
req *lnrpc.QueryRoutesRequest) *lnrpc.QueryRoutesResponse {
var routes *lnrpc.QueryRoutesResponse
err := wait.NoError(func() error {
ctxt, cancel := context.WithCancel(h.runCtx)
defer cancel()
resp, err := hn.RPC.LN.QueryRoutes(ctxt, req)
if err != nil {
return fmt.Errorf("%s: failed to query route: %w",
hn.Name(), err)
}
routes = resp
return nil
}, DefaultTimeout)
require.NoError(h, err, "timeout querying routes")
return routes
}

View file

@ -19,6 +19,7 @@ import (
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntemp/rpc"
@ -564,8 +565,10 @@ func (h *HarnessTest) AssertStreamChannelCoopClosed(hn *node.HarnessNode,
h.AssertNumWaitingClose(hn, 0)
// Finally, check that the node's topology graph has seen this channel
// closed.
// closed if it's a public channel.
if !resp.Channel.Private {
h.AssertTopologyChannelClosed(hn, cp)
}
return closingTxid
}
@ -610,8 +613,10 @@ func (h *HarnessTest) AssertStreamChannelForceClosed(hn *node.HarnessNode,
h.AssertNumPendingForceClose(hn, 1)
// Finally, check that the node's topology graph has seen this channel
// closed.
// closed if it's a public channel.
if !resp.Channel.Private {
h.AssertTopologyChannelClosed(hn, cp)
}
return closingTxid
}
@ -884,7 +889,11 @@ func (h *HarnessTest) assertPaymentStatusWithTimeout(stream rpc.PaymentClient,
err := wait.NoError(func() error {
// Consume one message. This will raise an error if the message
// is not received within DefaultTimeout.
payment := h.ReceivePaymentUpdate(stream)
payment, err := h.ReceivePaymentUpdate(stream)
if err != nil {
return fmt.Errorf("received error from payment "+
"stream: %s", err)
}
// Return if the desired payment state is reached.
if payment.Status == status {
@ -895,8 +904,8 @@ func (h *HarnessTest) assertPaymentStatusWithTimeout(stream rpc.PaymentClient,
// Return the err so that it can be used for debugging when
// timeout is reached.
return fmt.Errorf("payment status, got %v, want %v",
payment.Status, status)
return fmt.Errorf("payment %v status, got %v, want %v",
payment.PaymentHash, payment.Status, status)
}, timeout)
require.NoError(h, err, "timeout while waiting payment")
@ -907,7 +916,7 @@ func (h *HarnessTest) assertPaymentStatusWithTimeout(stream rpc.PaymentClient,
// ReceivePaymentUpdate waits until a message is received on the payment client
// stream or the timeout is reached.
func (h *HarnessTest) ReceivePaymentUpdate(
stream rpc.PaymentClient) *lnrpc.Payment {
stream rpc.PaymentClient) (*lnrpc.Payment, error) {
chanMsg := make(chan *lnrpc.Payment, 1)
errChan := make(chan error, 1)
@ -926,16 +935,14 @@ func (h *HarnessTest) ReceivePaymentUpdate(
select {
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout waiting for payment update")
return nil, nil
case err := <-errChan:
require.Failf(h, "payment stream",
"received err from payment stream: %v", err)
return nil, err
case updateMsg := <-chanMsg:
return updateMsg
return updateMsg, nil
}
return nil
}
// AssertInvoiceSettled asserts a given invoice specified by its payment
@ -1724,3 +1731,346 @@ func (h *HarnessTest) CreateBurnAddr(addrType lnrpc.AddressType) ([]byte,
return h.PayToAddrScript(addr), addr
}
// ReceiveTrackPayment waits until a message is received on the track payment
// stream or the timeout is reached.
func (h *HarnessTest) ReceiveTrackPayment(
stream rpc.TrackPaymentClient) *lnrpc.Payment {
chanMsg := make(chan *lnrpc.Payment)
errChan := make(chan error)
go func() {
// Consume one message. This will block until the message is
// received.
resp, err := stream.Recv()
if err != nil {
errChan <- err
return
}
chanMsg <- resp
}()
select {
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout trakcing payment")
case err := <-errChan:
require.Failf(h, "err from stream",
"received err from stream: %v", err)
case updateMsg := <-chanMsg:
return updateMsg
}
return nil
}
// ReceiveHtlcEvent waits until a message is received on the subscribe
// htlc event stream or the timeout is reached.
func (h *HarnessTest) ReceiveHtlcEvent(
stream rpc.HtlcEventsClient) *routerrpc.HtlcEvent {
chanMsg := make(chan *routerrpc.HtlcEvent)
errChan := make(chan error)
go func() {
// Consume one message. This will block until the message is
// received.
resp, err := stream.Recv()
if err != nil {
errChan <- err
return
}
chanMsg <- resp
}()
select {
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout receiving htlc "+
"event update")
case err := <-errChan:
require.Failf(h, "err from stream",
"received err from stream: %v", err)
case updateMsg := <-chanMsg:
return updateMsg
}
return nil
}
// AssertHtlcEventType consumes one event from a client and asserts the event
// type is matched.
func (h *HarnessTest) AssertHtlcEventType(client rpc.HtlcEventsClient,
userType routerrpc.HtlcEvent_EventType) *routerrpc.HtlcEvent {
event := h.ReceiveHtlcEvent(client)
require.Equalf(h, userType, event.EventType, "wrong event type, "+
"want %v got %v", userType, event.EventType)
return event
}
// HtlcEvent maps the series of event types used in `*routerrpc.HtlcEvent_*`.
type HtlcEvent int
const (
HtlcEventForward HtlcEvent = iota
HtlcEventForwardFail
HtlcEventSettle
HtlcEventLinkFail
HtlcEventFinal
)
// AssertHtlcEventType consumes one event from a client and asserts both the
// user event type the event.Event type is matched.
func (h *HarnessTest) AssertHtlcEventTypes(client rpc.HtlcEventsClient,
userType routerrpc.HtlcEvent_EventType,
eventType HtlcEvent) *routerrpc.HtlcEvent {
event := h.ReceiveHtlcEvent(client)
require.Equalf(h, userType, event.EventType, "wrong event type, "+
"want %v got %v", userType, event.EventType)
var ok bool
switch eventType {
case HtlcEventForward:
_, ok = event.Event.(*routerrpc.HtlcEvent_ForwardEvent)
case HtlcEventForwardFail:
_, ok = event.Event.(*routerrpc.HtlcEvent_ForwardFailEvent)
case HtlcEventSettle:
_, ok = event.Event.(*routerrpc.HtlcEvent_SettleEvent)
case HtlcEventLinkFail:
_, ok = event.Event.(*routerrpc.HtlcEvent_LinkFailEvent)
case HtlcEventFinal:
_, ok = event.Event.(*routerrpc.HtlcEvent_FinalHtlcEvent)
}
require.Truef(h, ok, "wrong event type: %T, want %T", event.Event,
eventType)
return event
}
// AssertFeeReport checks that the fee report from the given node has the
// desired day, week, and month sum values.
func (h *HarnessTest) AssertFeeReport(hn *node.HarnessNode,
day, week, month int) {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
feeReport, err := hn.RPC.LN.FeeReport(ctxt, &lnrpc.FeeReportRequest{})
require.NoError(h, err, "unable to query for fee report")
require.EqualValues(h, day, feeReport.DayFeeSum, "day fee mismatch")
require.EqualValues(h, week, feeReport.WeekFeeSum, "day week mismatch")
require.EqualValues(h, month, feeReport.MonthFeeSum,
"day month mismatch")
}
// AssertHtlcEvents consumes events from a client and ensures that they are of
// the expected type and contain the expected number of forwards, forward
// failures and settles.
//
// TODO(yy): needs refactor to reduce its complexity.
func (h *HarnessTest) AssertHtlcEvents(client rpc.HtlcEventsClient,
fwdCount, fwdFailCount, settleCount int,
userType routerrpc.HtlcEvent_EventType) []*routerrpc.HtlcEvent {
var forwards, forwardFails, settles int
numEvents := fwdCount + fwdFailCount + settleCount
events := make([]*routerrpc.HtlcEvent, 0)
// It's either the userType or the unknown type.
//
// TODO(yy): maybe the FinalHtlcEvent shouldn't be in UNKNOWN type?
eventTypes := []routerrpc.HtlcEvent_EventType{
userType, routerrpc.HtlcEvent_UNKNOWN,
}
for i := 0; i < numEvents; i++ {
event := h.ReceiveHtlcEvent(client)
require.Containsf(h, eventTypes, event.EventType,
"wrong event type, got %v", userType, event.EventType)
events = append(events, event)
switch e := event.Event.(type) {
case *routerrpc.HtlcEvent_ForwardEvent:
forwards++
case *routerrpc.HtlcEvent_ForwardFailEvent:
forwardFails++
case *routerrpc.HtlcEvent_SettleEvent:
settles++
case *routerrpc.HtlcEvent_FinalHtlcEvent:
if e.FinalHtlcEvent.Settled {
settles++
}
default:
require.Fail(h, "assert event fail",
"unexpected event: %T", event.Event)
}
}
require.Equal(h, fwdCount, forwards, "num of forwards mismatch")
require.Equal(h, fwdFailCount, forwardFails,
"num of forward fails mismatch")
require.Equal(h, settleCount, settles, "num of settles mismatch")
return events
}
// AssertTransactionInWallet asserts a given txid can be found in the node's
// wallet.
func (h *HarnessTest) AssertTransactionInWallet(hn *node.HarnessNode,
txid chainhash.Hash) {
req := &lnrpc.GetTransactionsRequest{}
err := wait.NoError(func() error {
txResp := hn.RPC.GetTransactions(req)
for _, txn := range txResp.Transactions {
if txn.TxHash == txid.String() {
return nil
}
}
return fmt.Errorf("%s: expected txid=%v not found in wallet",
hn.Name(), txid)
}, DefaultTimeout)
require.NoError(h, err, "failed to find tx")
}
// AssertTransactionNotInWallet asserts a given txid can NOT be found in the
// node's wallet.
func (h *HarnessTest) AssertTransactionNotInWallet(hn *node.HarnessNode,
txid chainhash.Hash) {
req := &lnrpc.GetTransactionsRequest{}
err := wait.NoError(func() error {
txResp := hn.RPC.GetTransactions(req)
for _, txn := range txResp.Transactions {
if txn.TxHash == txid.String() {
return fmt.Errorf("expected txid=%v to be "+
"not found", txid)
}
}
return nil
}, DefaultTimeout)
require.NoErrorf(h, err, "%s: failed to assert tx not found", hn.Name())
}
// WaitForNodeBlockHeight queries the node for its current block height until
// it reaches the passed height.
func (h *HarnessTest) WaitForNodeBlockHeight(hn *node.HarnessNode,
height int32) {
err := wait.NoError(func() error {
info := hn.RPC.GetInfo()
if int32(info.BlockHeight) != height {
return fmt.Errorf("expected block height to "+
"be %v, was %v", height, info.BlockHeight)
}
return nil
}, DefaultTimeout)
require.NoErrorf(h, err, "%s: timeout while waiting for height",
hn.Name())
}
// AssertChannelCommitHeight asserts the given channel for the node has the
// expected commit height(`NumUpdates`).
func (h *HarnessTest) AssertChannelCommitHeight(hn *node.HarnessNode,
cp *lnrpc.ChannelPoint, height int) {
err := wait.NoError(func() error {
c, err := h.findChannel(hn, cp)
if err != nil {
return err
}
if int(c.NumUpdates) == height {
return nil
}
return fmt.Errorf("expected commit height to be %v, was %v",
height, c.NumUpdates)
}, DefaultTimeout)
require.NoError(h, err, "timeout while waiting for commit height")
}
// AssertNumInvoices asserts that the number of invoices made within the test
// scope is as expected.
func (h *HarnessTest) AssertNumInvoices(hn *node.HarnessNode,
num int) []*lnrpc.Invoice {
have := hn.State.Invoice.Total
req := &lnrpc.ListInvoiceRequest{
NumMaxInvoices: math.MaxUint64,
IndexOffset: hn.State.Invoice.LastIndexOffset,
}
var invoices []*lnrpc.Invoice
err := wait.NoError(func() error {
resp := hn.RPC.ListInvoices(req)
invoices = resp.Invoices
if len(invoices) == num {
return nil
}
return errNumNotMatched(hn.Name(), "num of invoices",
num, len(invoices), have+len(invoices), have)
}, DefaultTimeout)
require.NoError(h, err, "timeout checking num of invoices")
return invoices
}
// ReceiveSendToRouteUpdate waits until a message is received on the
// SendToRoute client stream or the timeout is reached.
func (h *HarnessTest) ReceiveSendToRouteUpdate(
stream rpc.SendToRouteClient) (*lnrpc.SendResponse, error) {
chanMsg := make(chan *lnrpc.SendResponse, 1)
errChan := make(chan error, 1)
go func() {
// Consume one message. This will block until the message is
// received.
resp, err := stream.Recv()
if err != nil {
errChan <- err
return
}
chanMsg <- resp
}()
select {
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout waiting for send resp")
return nil, nil
case err := <-errChan:
return nil, err
case updateMsg := <-chanMsg:
return updateMsg, nil
}
}

View file

@ -57,6 +57,16 @@ func NewMiner(ctxt context.Context, t *testing.T) *HarnessMiner {
return newMiner(ctxt, t, minerLogDir, minerLogFilename)
}
// NewTempMiner creates a new miner using btcd backend with the specified log
// file dir and name.
func NewTempMiner(ctxt context.Context, t *testing.T,
tempDir, tempLogFilename string) *HarnessMiner {
t.Helper()
return newMiner(ctxt, t, tempDir, tempLogFilename)
}
// newMiner creates a new miner using btcd's rpctest.
func newMiner(ctxb context.Context, t *testing.T, minerDirName,
logFilename string) *HarnessMiner {
@ -383,3 +393,35 @@ func (h *HarnessMiner) NewMinerAddress() btcutil.Address {
require.NoError(h, err, "failed to create new miner address")
return addr
}
// MineBlocksWithTxes mines a single block to include the specifies
// transactions only.
func (h *HarnessMiner) MineBlockWithTxes(txes []*btcutil.Tx) *wire.MsgBlock {
var emptyTime time.Time
// Generate a block.
b, err := h.GenerateAndSubmitBlock(txes, -1, emptyTime)
require.NoError(h, err, "unable to mine block")
block, err := h.Client.GetBlock(b.Hash())
require.NoError(h, err, "unable to get block")
return block
}
// MineEmptyBlocks mines a given number of empty blocks.
func (h *HarnessMiner) MineEmptyBlocks(num int) []*wire.MsgBlock {
var emptyTime time.Time
blocks := make([]*wire.MsgBlock, num)
for i := 0; i < num; i++ {
// Generate an empty block.
b, err := h.GenerateAndSubmitBlock(nil, -1, emptyTime)
require.NoError(h, err, "unable to mine empty block")
block := h.GetBlock(b.Hash())
blocks[i] = block
}
return blocks
}

View file

@ -71,8 +71,7 @@ func (nm *nodeManager) nextNodeID() uint32 {
// node can be used immediately. Otherwise, the node will require an additional
// initialization phase where the wallet is either created or restored.
func (nm *nodeManager) newNode(t *testing.T, name string, extraArgs []string,
password []byte, useSeed bool,
opts ...node.Option) (*node.HarnessNode, error) {
password []byte, noAuth bool) (*node.HarnessNode, error) {
cfg := &node.BaseNodeConfig{
Name: name,
@ -85,10 +84,7 @@ func (nm *nodeManager) newNode(t *testing.T, name string, extraArgs []string,
NodeID: nm.nextNodeID(),
LndBinary: nm.lndBinary,
NetParams: harnessNetParams,
HasSeed: useSeed,
}
for _, opt := range opts {
opt(cfg)
SkipUnlock: noAuth,
}
node, err := node.NewHarnessNode(t, cfg)
@ -129,56 +125,11 @@ func (nm *nodeManager) shutdownNode(node *node.HarnessNode) error {
// the connection attempt is successful. If the callback parameter is non-nil,
// then the function will be executed after the node shuts down, but *before*
// the process has been started up again.
//
// This method can be useful when testing edge cases such as a node broadcast
// and invalidated prior state, or persistent state recovery, simulating node
// crashes, etc. Additionally, each time the node is restarted, the caller can
// pass a set of SCBs to pass in via the Unlock method allowing them to restore
// channels during restart.
func (nm *nodeManager) restartNode(ctxt context.Context, node *node.HarnessNode,
callback func() error, chanBackups ...*lnrpc.ChanBackupSnapshot) error {
func (nm *nodeManager) restartNode(ctxt context.Context,
hn *node.HarnessNode, callback func() error) error {
err := nm.restartNodeNoUnlock(ctxt, node, callback)
if err != nil {
return err
}
// If the node doesn't have a password set, then we can exit here as we
// don't need to unlock it.
if len(node.Cfg.Password) == 0 {
return nil
}
// Otherwise, we'll unlock the wallet, then complete the final steps
// for the node initialization process.
unlockReq := &lnrpc.UnlockWalletRequest{
WalletPassword: node.Cfg.Password,
}
if len(chanBackups) != 0 {
unlockReq.ChannelBackups = chanBackups[0]
unlockReq.RecoveryWindow = 100
}
err = wait.NoError(func() error {
return node.Unlock(unlockReq)
}, DefaultTimeout)
if err != nil {
return fmt.Errorf("%s: failed to unlock: %w", node.Name(), err)
}
return nil
}
// restartNodeNoUnlock attempts to restart a lightning node by shutting it down
// cleanly, then restarting the process. In case the node was setup with a
// seed, it will be left in the unlocked state. This function is fully
// blocking. If the callback parameter is non-nil, then the function will be
// executed after the node shuts down, but *before* the process has been
// started up again.
func (nm *nodeManager) restartNodeNoUnlock(ctxt context.Context,
node *node.HarnessNode, callback func() error) error {
if err := node.Stop(); err != nil {
// Stop the node.
if err := hn.Stop(); err != nil {
return fmt.Errorf("restart node got error: %w", err)
}
@ -188,11 +139,45 @@ func (nm *nodeManager) restartNodeNoUnlock(ctxt context.Context,
}
}
if node.Cfg.HasSeed {
return node.StartWithSeed(ctxt)
// Start the node without unlocking the wallet.
if hn.Cfg.SkipUnlock {
return hn.StartWithNoAuth(ctxt)
}
return node.Start(ctxt)
return hn.Start(ctxt)
}
// unlockNode unlocks the node's wallet if the password is configured.
// Additionally, each time the node is unlocked, the caller can pass a set of
// SCBs to pass in via the Unlock method allowing them to restore channels
// during restart.
func (nm *nodeManager) unlockNode(hn *node.HarnessNode,
chanBackups ...*lnrpc.ChanBackupSnapshot) error {
// If the node doesn't have a password set, then we can exit here as we
// don't need to unlock it.
if len(hn.Cfg.Password) == 0 {
return nil
}
// Otherwise, we'll unlock the wallet, then complete the final steps
// for the node initialization process.
unlockReq := &lnrpc.UnlockWalletRequest{
WalletPassword: hn.Cfg.Password,
}
if len(chanBackups) != 0 {
unlockReq.ChannelBackups = chanBackups[0]
unlockReq.RecoveryWindow = 100
}
err := wait.NoError(func() error {
return hn.Unlock(unlockReq)
}, DefaultTimeout)
if err != nil {
return fmt.Errorf("%s: failed to unlock: %w", hn.Name(), err)
}
return nil
}
// initWalletAndNode will unlock the node's wallet and finish setting up the

View file

@ -7,6 +7,7 @@ import (
"github.com/btcsuite/btcd/chaincfg"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/kvdb/etcd"
"github.com/lightningnetwork/lnd/lntest"
)
@ -62,7 +63,7 @@ type BaseNodeConfig struct {
ReadMacPath string
InvoiceMacPath string
HasSeed bool
SkipUnlock bool
Password []byte
P2PPort int
@ -189,7 +190,7 @@ func (cfg *BaseNodeConfig) GenArgs() []string {
}
args = append(args, nodeArgs...)
if !cfg.HasSeed {
if cfg.Password == nil {
args = append(args, "--noseedbackup")
}
@ -232,3 +233,33 @@ func (cfg *BaseNodeConfig) GenArgs() []string {
return args
}
// ExtraArgsEtcd returns extra args for configuring LND to use an external etcd
// database (for remote channel DB and wallet DB).
func ExtraArgsEtcd(etcdCfg *etcd.Config, name string, cluster bool,
leaderSessionTTL int) []string {
extraArgs := []string{
"--db.backend=etcd",
fmt.Sprintf("--db.etcd.host=%v", etcdCfg.Host),
fmt.Sprintf("--db.etcd.user=%v", etcdCfg.User),
fmt.Sprintf("--db.etcd.pass=%v", etcdCfg.Pass),
fmt.Sprintf("--db.etcd.namespace=%v", etcdCfg.Namespace),
}
if etcdCfg.InsecureSkipVerify {
extraArgs = append(extraArgs, "--db.etcd.insecure_skip_verify")
}
if cluster {
clusterArgs := []string{
"--cluster.enable-leader-election",
fmt.Sprintf("--cluster.id=%v", name),
fmt.Sprintf("--cluster.leader-session-ttl=%v",
leaderSessionTTL),
}
extraArgs = append(extraArgs, clusterArgs...)
}
return extraArgs
}

View file

@ -215,6 +215,33 @@ func (hn *HarnessNode) WaitUntilServerActive() error {
})
}
// WaitUntilLeader attempts to finish the start procedure by initiating an RPC
// connection and setting up the wallet unlocker client. This is needed when
// a node that has recently been started was waiting to become the leader and
// we're at the point when we expect that it is the leader now (awaiting
// unlock).
func (hn *HarnessNode) WaitUntilLeader(timeout time.Duration) error {
var (
conn *grpc.ClientConn
connErr error
)
if err := wait.NoError(func() error {
conn, connErr = hn.ConnectRPCWithMacaroon(nil)
return connErr
}, timeout); err != nil {
return err
}
// Since the conn is not authed, only the `WalletUnlocker` and `State`
// clients can be inited from this conn.
hn.conn = conn
hn.RPC = rpc.NewHarnessRPC(hn.runCtx, hn.T, conn, hn.Name())
// Wait till the server is starting.
return hn.WaitUntilStarted()
}
// Unlock attempts to unlock the wallet of the target HarnessNode. This method
// should be called after the restart of a HarnessNode that was created with a
// seed+password. Once this method returns, the HarnessNode will be ready to
@ -360,12 +387,12 @@ func (hn *HarnessNode) StartLndCmd(ctxb context.Context) error {
return nil
}
// StartWithSeed will start the lnd process, creates the grpc connection
// StartWithNoAuth will start the lnd process, creates the grpc connection
// without macaroon auth, and waits until the server is reported as waiting to
// start.
//
// NOTE: caller needs to take extra step to create and unlock the wallet.
func (hn *HarnessNode) StartWithSeed(ctxt context.Context) error {
func (hn *HarnessNode) StartWithNoAuth(ctxt context.Context) error {
// Start lnd process and prepare logs.
if err := hn.StartLndCmd(ctxt); err != nil {
return fmt.Errorf("start lnd error: %w", err)
@ -399,7 +426,7 @@ func (hn *HarnessNode) Start(ctxt context.Context) error {
conn, err := hn.ConnectRPC()
if err != nil {
err = fmt.Errorf("ConnectRPC err: %w", err)
cmdErr := hn.kill()
cmdErr := hn.Kill()
if cmdErr != nil {
err = fmt.Errorf("kill process got err: %w: %v",
cmdErr, err)
@ -453,9 +480,27 @@ func (hn *HarnessNode) InitNode(macBytes []byte) error {
// Init all the RPC clients.
hn.InitRPCClients(conn)
// Wait till the server is starting.
if err := hn.WaitUntilStarted(); err != nil {
return fmt.Errorf("waiting for start got: %w", err)
}
return hn.initLightningClient()
}
// InitChangePassword initializes a harness node by passing the change password
// request via RPC. After the request is submitted, this method will block until
// a macaroon-authenticated RPC connection can be established to the harness
// node. Once established, the new connection is used to initialize the
// RPC clients and subscribes the HarnessNode to topology changes.
func (hn *HarnessNode) ChangePasswordAndInit(
req *lnrpc.ChangePasswordRequest) (
*lnrpc.ChangePasswordResponse, error) {
response := hn.RPC.ChangePassword(req)
return response, hn.InitNode(response.AdminMacaroon)
}
// waitTillServerState makes a subscription to the server's state change and
// blocks until the server is in the targeted state.
func (hn *HarnessNode) waitTillServerState(
@ -670,7 +715,7 @@ func (hn *HarnessNode) Stop() error {
// If the rpc clients are not initiated, we'd kill the process
// manually.
hn.printErrf("found nil RPC clients")
if err := hn.kill(); err != nil {
if err := hn.Kill(); err != nil {
return fmt.Errorf("killing process got: %v", err)
}
}
@ -719,8 +764,8 @@ func (hn *HarnessNode) Shutdown() error {
return nil
}
// kill kills the lnd process.
func (hn *HarnessNode) kill() error {
// Kill kills the lnd process.
func (hn *HarnessNode) Kill() error {
return hn.cmd.Process.Kill()
}

View file

@ -5,6 +5,7 @@ import (
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
)
// =====================
@ -82,3 +83,19 @@ func (h *HarnessRPC) SubscribeSingleInvoice(rHash []byte) SingleInvoiceClient {
return client
}
type TrackPaymentClient routerrpc.Router_TrackPaymentV2Client
// TrackPaymentV2 creates a subscription client for given invoice and
// asserts its creation.
func (h *HarnessRPC) TrackPaymentV2(payHash []byte) TrackPaymentClient {
req := &routerrpc.TrackPaymentRequest{PaymentHash: payHash}
// TrackPaymentV2 needs to have the context alive for the entire test
// case as the returned client will be used for send and receive events
// stream. Thus we use runCtx here instead of a timeout context.
client, err := h.Router.TrackPaymentV2(h.runCtx, req)
h.NoError(err, "TrackPaymentV2")
return client
}

View file

@ -194,6 +194,10 @@ func (h *HarnessRPC) ListInvoices(
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
if req == nil {
req = &lnrpc.ListInvoiceRequest{}
}
resp, err := h.LN.ListInvoices(ctxt, req)
h.NoError(err, "ListInvoice")
@ -481,8 +485,10 @@ func (h *HarnessRPC) QueryRoutes(
return routes
}
type SendToRouteClient lnrpc.Lightning_SendToRouteClient
// SendToRoute makes a RPC call to SendToRoute and asserts.
func (h *HarnessRPC) SendToRoute() lnrpc.Lightning_SendToRouteClient {
func (h *HarnessRPC) SendToRoute() SendToRouteClient {
// SendToRoute needs to have the context alive for the entire test case
// as the returned client will be used for send and receive payment
// stream. Thus we use runCtx here instead of a timeout context.
@ -567,3 +573,61 @@ func (h *HarnessRPC) VerifyChanBackup(
return resp
}
// LookupInvoice queries the node's invoices using the specified rHash.
func (h *HarnessRPC) LookupInvoice(rHash []byte) *lnrpc.Invoice {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
payHash := &lnrpc.PaymentHash{RHash: rHash}
resp, err := h.LN.LookupInvoice(ctxt, payHash)
h.NoError(err, "LookupInvoice")
return resp
}
// DecodePayReq makes a RPC call to node's DecodePayReq and asserts.
func (h *HarnessRPC) DecodePayReq(req string) *lnrpc.PayReq {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
payReq := &lnrpc.PayReqString{PayReq: req}
resp, err := h.LN.DecodePayReq(ctxt, payReq)
h.NoError(err, "DecodePayReq")
return resp
}
// ForwardingHistory makes a RPC call to the node's ForwardingHistory and
// asserts.
func (h *HarnessRPC) ForwardingHistory(
req *lnrpc.ForwardingHistoryRequest) *lnrpc.ForwardingHistoryResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
if req == nil {
req = &lnrpc.ForwardingHistoryRequest{}
}
resp, err := h.LN.ForwardingHistory(ctxt, req)
h.NoError(err, "ForwardingHistory")
return resp
}
type MiddlewareClient lnrpc.Lightning_RegisterRPCMiddlewareClient
// RegisterRPCMiddleware makes a RPC call to the node's RegisterRPCMiddleware
// and asserts. It also returns a cancel context which can cancel the context
// used by the client.
func (h *HarnessRPC) RegisterRPCMiddleware() (MiddlewareClient,
context.CancelFunc) {
ctxt, cancel := context.WithCancel(h.runCtx)
stream, err := h.LN.RegisterRPCMiddleware(ctxt)
h.NoError(err, "RegisterRPCMiddleware")
return stream, cancel
}

View file

@ -3,7 +3,9 @@ package rpc
import (
"context"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/stretchr/testify/require"
)
// =====================
@ -12,6 +14,8 @@ import (
// UpdateChanStatus makes a UpdateChanStatus RPC call to node's RouterClient
// and asserts.
//
//nolint:lll
func (h *HarnessRPC) UpdateChanStatus(
req *routerrpc.UpdateChanStatusRequest) *routerrpc.UpdateChanStatusResponse {
@ -39,3 +43,110 @@ func (h *HarnessRPC) SendPayment(
return stream
}
type HtlcEventsClient routerrpc.Router_SubscribeHtlcEventsClient
// SubscribeHtlcEvents makes a subscription to the HTLC events and returns a
// htlc event client.
func (h *HarnessRPC) SubscribeHtlcEvents() HtlcEventsClient {
// Use runCtx here to keep the client alive for the scope of the test.
client, err := h.Router.SubscribeHtlcEvents(
h.runCtx, &routerrpc.SubscribeHtlcEventsRequest{},
)
h.NoError(err, "SubscribeHtlcEvents")
return client
}
// GetMissionControlConfig makes a RPC call to the node's
// GetMissionControlConfig and asserts.
//
//nolint:lll
func (h *HarnessRPC) GetMissionControlConfig() *routerrpc.GetMissionControlConfigResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &routerrpc.GetMissionControlConfigRequest{}
resp, err := h.Router.GetMissionControlConfig(ctxt, req)
h.NoError(err, "GetMissionControlConfig")
return resp
}
// SetMissionControlConfig makes a RPC call to the node's
// SetMissionControlConfig and asserts.
func (h *HarnessRPC) SetMissionControlConfig(
config *routerrpc.MissionControlConfig) {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &routerrpc.SetMissionControlConfigRequest{Config: config}
_, err := h.Router.SetMissionControlConfig(ctxt, req)
h.NoError(err, "SetMissionControlConfig")
}
// ResetMissionControl makes a RPC call to the node's ResetMissionControl and
// asserts.
func (h *HarnessRPC) ResetMissionControl() {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &routerrpc.ResetMissionControlRequest{}
_, err := h.Router.ResetMissionControl(ctxt, req)
h.NoError(err, "ResetMissionControl")
}
// SendToRouteV2 makes a RPC call to SendToRouteV2 and asserts.
func (h *HarnessRPC) SendToRouteV2(
req *routerrpc.SendToRouteRequest) *lnrpc.HTLCAttempt {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Router.SendToRouteV2(ctxt, req)
h.NoError(err, "SendToRouteV2")
return resp
}
// QueryProbability makes a RPC call to the node's QueryProbability and
// asserts.
//
//nolint:lll
func (h *HarnessRPC) QueryProbability(
req *routerrpc.QueryProbabilityRequest) *routerrpc.QueryProbabilityResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Router.QueryProbability(ctxt, req)
h.NoError(err, "QueryProbability")
return resp
}
// XImportMissionControl makes a RPC call to the node's XImportMissionControl
// and asserts.
func (h *HarnessRPC) XImportMissionControl(
req *routerrpc.XImportMissionControlRequest) {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.Router.XImportMissionControl(ctxt, req)
h.NoError(err, "XImportMissionControl")
}
// XImportMissionControlAssertErr makes a RPC call to the node's
// XImportMissionControl
// and asserts an error occurred.
func (h *HarnessRPC) XImportMissionControlAssertErr(
req *routerrpc.XImportMissionControlRequest) {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.Router.XImportMissionControl(ctxt, req)
require.Error(h, err, "expect an error from x import mission control")
}

View file

@ -48,3 +48,17 @@ func (h *HarnessRPC) GenSeed(req *lnrpc.GenSeedRequest) *lnrpc.GenSeedResponse {
return resp
}
// ChangePassword makes a RPC request to WalletUnlocker and asserts there's no
// error.
func (h *HarnessRPC) ChangePassword(
req *lnrpc.ChangePasswordRequest) *lnrpc.ChangePasswordResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.WalletUnlocker.ChangePassword(ctxt, req)
h.NoError(err, "ChangePassword")
return resp
}

View file

@ -1,5 +1,52 @@
package rpc
import (
"context"
"github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc"
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
)
// =====================
// WatchtowerClient and WatchtowerClientClient related RPCs.
// =====================
// GetInfoWatchtower makes a RPC call to the watchtower of the given node and
// asserts.
func (h *HarnessRPC) GetInfoWatchtower() *watchtowerrpc.GetInfoResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &watchtowerrpc.GetInfoRequest{}
info, err := h.Watchtower.GetInfo(ctxt, req)
h.NoError(err, "GetInfo from Watchtower")
return info
}
// AddTower makes a RPC call to the WatchtowerClient of the given node and
// asserts.
func (h *HarnessRPC) AddTower(
req *wtclientrpc.AddTowerRequest) *wtclientrpc.AddTowerResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.WatchtowerClient.AddTower(ctxt, req)
h.NoError(err, "AddTower")
return resp
}
// WatchtowerStats makes a RPC call to the WatchtowerClient of the given node
// and asserts.
func (h *HarnessRPC) WatchtowerStats() *wtclientrpc.StatsResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &wtclientrpc.StatsRequest{}
resp, err := h.WatchtowerClient.Stats(ctxt, req)
h.NoError(err, "Stats from Watchtower")
return resp
}

View file

@ -487,7 +487,7 @@ func executePgQuery(query string) error {
postgresDatabaseDsn("postgres"),
)
if err != nil {
return fmt.Errorf("unable to connect to database: %v", err)
return fmt.Errorf("unable to connect to database: %w", err)
}
defer pool.Close()

View file

@ -690,41 +690,6 @@ func assertChannelPolicy(t *harnessTest, node *lntest.HarnessNode,
}
}
// assertMinerBlockHeightDelta ensures that tempMiner is 'delta' blocks ahead
// of miner.
func assertMinerBlockHeightDelta(t *harnessTest,
miner, tempMiner *lntest.HarnessMiner, delta int32) {
// Ensure the chain lengths are what we expect.
var predErr error
err := wait.Predicate(func() bool {
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock()
if err != nil {
predErr = fmt.Errorf("unable to get current "+
"blockheight %v", err)
return false
}
_, minerHeight, err := miner.Client.GetBestBlock()
if err != nil {
predErr = fmt.Errorf("unable to get current "+
"blockheight %v", err)
return false
}
if tempMinerHeight != minerHeight+delta {
predErr = fmt.Errorf("expected new miner(%d) to be %d "+
"blocks ahead of original miner(%d)",
tempMinerHeight, delta, minerHeight)
return false
}
return true
}, defaultTimeout)
if err != nil {
t.Fatalf(predErr.Error())
}
}
func checkCommitmentMaturity(
forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
maturityHeight uint32, blocksTilMaturity int32) error {
@ -1418,30 +1383,6 @@ func assertTransactionNotInWallet(t *testing.T, node *lntest.HarnessNode,
)
}
func assertAnchorOutputLost(t *harnessTest, node *lntest.HarnessNode,
chanPoint wire.OutPoint) {
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err := wait.Predicate(func() bool {
resp, pErr := node.PendingChannels(
context.Background(), pendingChansRequest,
)
if pErr != nil {
return false
}
for _, pendingChan := range resp.PendingForceClosingChannels {
if pendingChan.Channel.ChannelPoint == chanPoint.String() {
return (pendingChan.Anchor ==
lnrpc.PendingChannelsResponse_ForceClosedChannel_LOST)
}
}
return false
}, defaultTimeout)
require.NoError(t.t, err, "anchor doesn't show as being lost")
}
// assertNodeAnnouncement compares that two node announcements match.
func assertNodeAnnouncement(t *harnessTest, n1, n2 *lnrpc.NodeUpdate) {
// Alias should match.

View file

@ -231,4 +231,142 @@ var allTestCasesTemp = []*lntemp.TestCase{
Name: "neutrino kit",
TestFunc: testNeutrino,
},
{
Name: "etcd failover",
TestFunc: testEtcdFailover,
},
{
Name: "hold invoice force close",
TestFunc: testHoldInvoiceForceClose,
},
{
Name: "hold invoice sender persistence",
TestFunc: testHoldInvoicePersistence,
},
{
Name: "maximum channel size",
TestFunc: testMaxChannelSize,
},
{
Name: "wumbo channels",
TestFunc: testWumboChannels,
},
{
Name: "max htlc pathfind",
TestFunc: testMaxHtlcPathfind,
},
{
Name: "multi-hop htlc error propagation",
TestFunc: testHtlcErrorPropagation,
},
{
Name: "multi-hop payments",
TestFunc: testMultiHopPayments,
},
{
Name: "anchors reserved value",
TestFunc: testAnchorReservedValue,
},
{
Name: "3rd party anchor spend",
TestFunc: testAnchorThirdPartySpend,
},
{
Name: "open channel reorg test",
TestFunc: testOpenChannelAfterReorg,
},
{
Name: "psbt channel funding external",
TestFunc: testPsbtChanFundingExternal,
},
{
Name: "psbt channel funding single step",
TestFunc: testPsbtChanFundingSingleStep,
},
{
Name: "resolution handoff",
TestFunc: testResHandoff,
},
{
Name: "REST API",
TestFunc: testRestAPI,
},
{
Name: "revoked uncooperative close retribution",
TestFunc: testRevokedCloseRetribution,
},
{
Name: "revoked uncooperative close retribution zero value " +
"remote output",
TestFunc: testRevokedCloseRetributionZeroValueRemoteOutput,
},
{
Name: "revoked uncooperative close retribution remote hodl",
TestFunc: testRevokedCloseRetributionRemoteHodl,
},
{
Name: "revoked uncooperative close retribution altruist " +
"watchtower",
TestFunc: testRevokedCloseRetributionAltruistWatchtower,
},
{
Name: "single-hop send to route",
TestFunc: testSingleHopSendToRoute,
},
{
Name: "multi-hop send to route",
TestFunc: testMultiHopSendToRoute,
},
{
Name: "send to route error propagation",
TestFunc: testSendToRouteErrorPropagation,
},
{
Name: "private channels",
TestFunc: testPrivateChannels,
},
{
Name: "invoice routing hints",
TestFunc: testInvoiceRoutingHints,
},
{
Name: "multi-hop payments over private channels",
TestFunc: testMultiHopOverPrivateChannels,
},
{
Name: "query routes",
TestFunc: testQueryRoutes,
},
{
Name: "route fee cutoff",
TestFunc: testRouteFeeCutoff,
},
{
Name: "rpc middleware interceptor",
TestFunc: testRPCMiddlewareInterceptor,
},
{
Name: "macaroon authentication",
TestFunc: testMacaroonAuthentication,
},
{
Name: "bake macaroon",
TestFunc: testBakeMacaroon,
},
{
Name: "delete macaroon id",
TestFunc: testDeleteMacaroonID,
},
{
Name: "stateless init",
TestFunc: testStatelessInit,
},
{
Name: "single hop invoice",
TestFunc: testSingleHopInvoice,
},
{
Name: "wipe forwarding packages",
TestFunc: testWipeForwardingPackages,
},
}

View file

@ -349,8 +349,9 @@ func testChannelBackupRestoreBasic(ht *lntemp.HarnessTest) {
"", revocationWindow, nil,
copyPorts(oldNode),
)
st.RestartNode(newNode, backupSnapshot)
st.RestartNodeWithChanBackups(
newNode, backupSnapshot,
)
return newNode
}

View file

@ -15,7 +15,6 @@ import (
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
@ -984,57 +983,6 @@ func padCLTV(cltv uint32) uint32 {
return cltv + uint32(routing.BlockPadding)
}
type sweptOutput struct {
OutPoint wire.OutPoint
SweepTx *wire.MsgTx
}
// findCommitAndAnchor looks for a commitment sweep and anchor sweep in the
// mempool. Our anchor output is identified by having multiple inputs, because
// we have to bring another input to add fees to the anchor. Note that the
// anchor swept output may be nil if the channel did not have anchors.
// TODO(yy): delete.
func findCommitAndAnchor(t *harnessTest, net *lntest.NetworkHarness,
sweepTxns []*wire.MsgTx, closeTx string) (*sweptOutput, *sweptOutput) {
var commitSweep, anchorSweep *sweptOutput
for _, tx := range sweepTxns {
txHash := tx.TxHash()
sweepTx, err := net.Miner.Client.GetRawTransaction(&txHash)
require.NoError(t.t, err)
// We expect our commitment sweep to have a single input, and,
// our anchor sweep to have more inputs (because the wallet
// needs to add balance to the anchor amount). We find their
// sweep txids here to setup appropriate resolutions. We also
// need to find the outpoint for our resolution, which we do by
// matching the inputs to the sweep to the close transaction.
inputs := sweepTx.MsgTx().TxIn
if len(inputs) == 1 {
commitSweep = &sweptOutput{
OutPoint: inputs[0].PreviousOutPoint,
SweepTx: tx,
}
} else {
// Since we have more than one input, we run through
// them to find the outpoint that spends from the close
// tx. This will be our anchor output.
for _, txin := range inputs {
outpointStr := txin.PreviousOutPoint.Hash.String()
if outpointStr == closeTx {
anchorSweep = &sweptOutput{
OutPoint: txin.PreviousOutPoint,
SweepTx: tx,
}
}
}
}
}
return commitSweep, anchorSweep
}
// testFailingChannel tests that we will fail the channel by force closing it
// in the case where a counterparty tries to settle an HTLC with the wrong
// preimage.

View file

@ -199,7 +199,7 @@ func testUpdateChannelPolicy(ht *lntemp.HarnessTest) {
// We expect this payment to fail, and that the min_htlc value is
// communicated back to us, since the attempted HTLC value was too low.
sendResp, err := alicePayStream.Recv()
sendResp, err := ht.ReceiveSendToRouteUpdate(alicePayStream)
require.NoError(ht, err, "unable to receive payment stream")
// Expected as part of the error message.
@ -238,7 +238,7 @@ func testUpdateChannelPolicy(ht *lntemp.HarnessTest) {
err = alicePayStream.Send(sendReq)
require.NoError(ht, err, "unable to send payment")
sendResp, err = alicePayStream.Recv()
sendResp, err = ht.ReceiveSendToRouteUpdate(alicePayStream)
require.NoError(ht, err, "unable to receive payment stream")
require.Empty(ht, sendResp.PaymentError, "expected payment to succeed")

View file

@ -4,7 +4,6 @@
package itest
import (
"context"
"testing"
"time"
@ -13,27 +12,24 @@ import (
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntest"
"github.com/stretchr/testify/require"
)
func assertLeader(ht *harnessTest, observer cluster.LeaderElector,
func assertLeader(ht *lntemp.HarnessTest, observer cluster.LeaderElector,
expected string) {
leader, err := observer.Leader(context.Background())
if err != nil {
ht.Fatalf("Unable to query leader: %v", err)
}
if leader != expected {
ht.Fatalf("Leader should be '%v', got: '%v'", expected, leader)
}
leader, err := observer.Leader(ht.Context())
require.NoError(ht, err, "Unable to query leader")
require.Equalf(ht, expected, leader,
"Leader should be '%v', got: '%v'", expected, leader)
}
// testEtcdFailover tests that in a cluster setup where two LND nodes form a
// single cluster (sharing the same identity) one can hand over the leader role
// to the other (failing over after graceful shutdown or forceful abort).
func testEtcdFailover(net *lntest.NetworkHarness, ht *harnessTest) {
func testEtcdFailover(ht *lntemp.HarnessTest) {
testCases := []struct {
name string
kill bool
@ -48,148 +44,97 @@ func testEtcdFailover(net *lntest.NetworkHarness, ht *harnessTest) {
for _, test := range testCases {
test := test
ht.t.Run(test.name, func(t1 *testing.T) {
ht1 := newHarnessTest(t1, ht.lndHarness)
ht1.RunTestCase(&testCase{
name: test.name,
test: func(_ *lntest.NetworkHarness,
tt *harnessTest) {
testEtcdFailoverCase(net, tt, test.kill)
},
})
success := ht.Run(test.name, func(t1 *testing.T) {
st := ht.Subtest(t1)
testEtcdFailoverCase(st, test.kill)
})
if !success {
return
}
}
}
func testEtcdFailoverCase(net *lntest.NetworkHarness, ht *harnessTest,
kill bool) {
ctxb := context.Background()
func testEtcdFailoverCase(ht *lntemp.HarnessTest, kill bool) {
etcdCfg, cleanup, err := kvdb.StartEtcdTestBackend(
ht.t.TempDir(), uint16(lntest.NextAvailablePort()),
ht.T.TempDir(), uint16(lntest.NextAvailablePort()),
uint16(lntest.NextAvailablePort()), "",
)
if err != nil {
ht.Fatalf("Failed to start etcd instance: %v", err)
}
require.NoError(ht, err, "Failed to start etcd instance")
defer cleanup()
alice := ht.NewNode("Alice", nil)
// Give Alice some coins to fund the channel.
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
// Make leader election session TTL 5 sec to make the test run fast.
const leaderSessionTTL = 5
observer, err := cluster.MakeLeaderElector(
ctxb, cluster.EtcdLeaderElector, "observer",
ht.Context(), cluster.EtcdLeaderElector, "observer",
lncfg.DefaultEtcdElectionPrefix, leaderSessionTTL, etcdCfg,
)
if err != nil {
ht.Fatalf("Cannot start election observer: %v", err)
}
require.NoError(ht, err, "Cannot start election observer")
password := []byte("the quick brown fox jumps the lazy dog")
entropy := [16]byte{1, 2, 3}
stateless := false
cluster := true
carol1, _, _, err := net.NewNodeWithSeedEtcd(
carol1, _, _ := ht.NewNodeWithSeedEtcd(
"Carol-1", etcdCfg, password, entropy[:], stateless, cluster,
leaderSessionTTL,
)
if err != nil {
ht.Fatalf("unable to start Carol-1: %v", err)
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
info1, err := carol1.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
if err != nil {
ht.Fatalf("unable to get info: %v", err)
}
net.ConnectNodes(ht.t, carol1, net.Alice)
info1 := carol1.RPC.GetInfo()
ht.ConnectNodes(carol1, alice)
// Open a channel with 100k satoshis between Carol and Alice with Alice
// being the sole funder of the channel.
chanAmt := btcutil.Amount(100000)
_ = openChannelAndAssert(
ht, net, net.Alice, carol1,
lntest.OpenChannelParams{
Amt: chanAmt,
},
)
chanAmt := btcutil.Amount(100_000)
ht.OpenChannel(alice, carol1, lntemp.OpenChannelParams{Amt: chanAmt})
// At this point Carol-1 is the elected leader, while Carol-2 will wait
// to become the leader when Carol-1 stops.
carol2, err := net.NewNodeEtcd(
"Carol-2", etcdCfg, password, cluster, false, leaderSessionTTL,
carol2 := ht.NewNodeEtcd(
"Carol-2", etcdCfg, password, cluster, leaderSessionTTL,
)
if err != nil {
ht.Fatalf("Unable to start Carol-2: %v", err)
}
assertLeader(ht, observer, "Carol-1")
amt := btcutil.Amount(1000)
payReqs, _, _, err := createPayReqs(carol1, amt, 2)
if err != nil {
ht.Fatalf("Carol-2 is unable to create payment requests: %v",
err)
}
sendAndAssertSuccess(
ht, net.Alice, &routerrpc.SendPaymentRequest{
PaymentRequest: payReqs[0],
TimeoutSeconds: 60,
FeeLimitSat: noFeeLimitMsat,
},
)
payReqs, _, _ := ht.CreatePayReqs(carol1, amt, 2)
ht.CompletePaymentRequests(alice, []string{payReqs[0]})
// Shut down or kill Carol-1 and wait for Carol-2 to become the leader.
failoverTimeout := time.Duration(2*leaderSessionTTL) * time.Second
if kill {
err = net.KillNode(carol1)
if err != nil {
ht.Fatalf("Can't kill Carol-1: %v", err)
}
ht.KillNode(carol1)
} else {
shutdownAndAssert(net, ht, carol1)
ht.Shutdown(carol1)
}
err = carol2.WaitUntilLeader(failoverTimeout)
if err != nil {
ht.Fatalf("Waiting for Carol-2 to become the leader failed: %v",
err)
}
require.NoError(ht, err, "Waiting for Carol-2 to become the leader "+
"failed")
assertLeader(ht, observer, "Carol-2")
err = carol2.Unlock(&lnrpc.UnlockWalletRequest{
WalletPassword: password,
})
if err != nil {
ht.Fatalf("Unlocking Carol-2 was not successful: %v", err)
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
req := &lnrpc.UnlockWalletRequest{WalletPassword: password}
err = carol2.Unlock(req)
require.NoError(ht, err, "Unlocking Carol-2 failed")
// Make sure Carol-1 and Carol-2 have the same identity.
info2, err := carol2.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
if err != nil {
ht.Fatalf("unable to get info: %v", err)
}
if info1.IdentityPubkey != info2.IdentityPubkey {
ht.Fatalf("Carol-1 and Carol-2 must have the same identity: "+
"%v vs %v", info1.IdentityPubkey, info2.IdentityPubkey)
}
info2 := carol2.RPC.GetInfo()
require.Equal(ht, info1.IdentityPubkey, info2.IdentityPubkey,
"Carol-1 and Carol-2 must have the same identity")
// Make sure the nodes are connected before moving forward. Otherwise
// we may get a link not found error.
ht.AssertConnected(alice, carol2)
// Now let Alice pay the second invoice but this time we expect Carol-2
// to receive the payment.
sendAndAssertSuccess(
ht, net.Alice, &routerrpc.SendPaymentRequest{
PaymentRequest: payReqs[1],
TimeoutSeconds: 60,
FeeLimitSat: noFeeLimitMsat,
},
)
ht.CompletePaymentRequests(alice, []string{payReqs[1]})
shutdownAndAssert(net, ht, carol2)
// Manually shutdown the node as it will mess up with our cleanup
// process.
ht.Shutdown(carol2)
}

View file

@ -1,14 +1,13 @@
package itest
import (
"context"
"fmt"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
@ -16,17 +15,11 @@ import (
// testHoldInvoiceForceClose tests cancellation of accepted hold invoices which
// would otherwise trigger force closes when they expire.
func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) {
ctxb, cancel := context.WithCancel(context.Background())
defer cancel()
func testHoldInvoiceForceClose(ht *lntemp.HarnessTest) {
// Open a channel between alice and bob.
chanReq := lntest.OpenChannelParams{
Amt: 300000,
}
chanPoint := openChannelAndAssert(
t, net, net.Alice, net.Bob, chanReq,
alice, bob := ht.Alice, ht.Bob
chanPoint := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: 300000},
)
// Create a non-dust hold invoice for bob.
@ -39,87 +32,76 @@ func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) {
CltvExpiry: 40,
Hash: payHash[:],
}
bobInvoice := bob.RPC.AddHoldInvoice(invoiceReq)
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
bobInvoice, err := net.Bob.AddHoldInvoice(ctxt, invoiceReq)
require.NoError(t.t, err)
// Subscribe the invoice.
stream := bob.RPC.SubscribeSingleInvoice(payHash[:])
// Pay this invoice from Alice -> Bob, we should achieve this with a
// single htlc.
_, err = net.Alice.RouterClient.SendPaymentV2(
ctxb, &routerrpc.SendPaymentRequest{
req := &routerrpc.SendPaymentRequest{
PaymentRequest: bobInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
require.NoError(t.t, err)
}
alice.RPC.SendPayment(req)
waitForInvoiceAccepted(t, net.Bob, payHash)
ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
// Once the HTLC has cleared, alice and bob should both have a single
// htlc locked in.
nodes := []*lntest.HarnessNode{net.Alice, net.Bob}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash[:])
}, defaultTimeout)
require.NoError(t.t, err)
ht.AssertActiveHtlcs(alice, payHash[:])
ht.AssertActiveHtlcs(bob, payHash[:])
// Get our htlc expiry height and current block height so that we
// can mine the exact number of blocks required to expire the htlc.
chans, err := net.Alice.ListChannels(ctxb, &lnrpc.ListChannelsRequest{})
require.NoError(t.t, err)
require.Len(t.t, chans.Channels, 1)
require.Len(t.t, chans.Channels[0].PendingHtlcs, 1)
activeHtlc := chans.Channels[0].PendingHtlcs[0]
channel := ht.QueryChannelByChanPoint(alice, chanPoint)
require.Len(ht, channel.PendingHtlcs, 1)
activeHtlc := channel.PendingHtlcs[0]
require.NoError(t.t, net.Alice.WaitForBlockchainSync())
require.NoError(t.t, net.Bob.WaitForBlockchainSync())
info, err := net.Alice.GetInfo(ctxb, &lnrpc.GetInfoRequest{})
require.NoError(t.t, err)
_, currentHeight := ht.Miner.GetBestBlock()
// Now we will mine blocks until the htlc expires, and wait for each
// node to sync to our latest height. Sanity check that we won't
// underflow.
require.Greater(
t.t, activeHtlc.ExpirationHeight, info.BlockHeight,
"expected expiry after current height",
)
blocksTillExpiry := activeHtlc.ExpirationHeight - info.BlockHeight
require.Greater(ht, activeHtlc.ExpirationHeight, uint32(currentHeight),
"expected expiry after current height")
blocksTillExpiry := activeHtlc.ExpirationHeight - uint32(currentHeight)
// Alice will go to chain with some delta, sanity check that we won't
// underflow and subtract this from our mined blocks.
require.Greater(
t.t, blocksTillExpiry,
uint32(lncfg.DefaultOutgoingBroadcastDelta),
)
blocksTillForce := blocksTillExpiry - lncfg.DefaultOutgoingBroadcastDelta
require.Greater(ht, blocksTillExpiry,
uint32(lncfg.DefaultOutgoingBroadcastDelta))
mineBlocksSlow(t, net, blocksTillForce, 0)
// blocksTillForce is the number of blocks should be mined to
// trigger a force close from Alice iff the invoice cancelation
// failed. This value is 48 in current test setup.
blocksTillForce := blocksTillExpiry -
lncfg.DefaultOutgoingBroadcastDelta
require.NoError(t.t, net.Alice.WaitForBlockchainSync())
require.NoError(t.t, net.Bob.WaitForBlockchainSync())
// blocksTillCancel is the number of blocks should be mined to trigger
// an invoice cancelation from Bob. This value is 30 in current test
// setup.
blocksTillCancel := blocksTillExpiry -
lncfg.DefaultHoldInvoiceExpiryDelta
// Our channel should not have been force closed, instead we expect our
// channel to still be open and our invoice to have been canceled before
// expiry.
chanInfo, err := getChanInfo(net.Alice)
require.NoError(t.t, err)
// When using ht.MineBlocks, for bitcoind backend, the block height
// synced differ significantly among subsystems. From observation, the
// LNWL syncs much faster than other subsystems, with more than 10
// blocks ahead. For this test case, CRTR may be lagging behind for
// more than 20 blocks. Thus we use slow mining instead.
// TODO(yy): fix block height asymmetry among all the subsystems.
//
// We first mine enough blocks to trigger an invoice cancelation.
ht.MineBlocks(blocksTillCancel)
fundingTxID, err := lnrpc.GetChanPointFundingTxid(chanPoint)
require.NoError(t.t, err)
chanStr := fmt.Sprintf("%v:%v", fundingTxID, chanPoint.OutputIndex)
require.Equal(t.t, chanStr, chanInfo.ChannelPoint)
// Wait for the nodes to be synced.
ht.WaitForBlockchainSync(alice)
ht.WaitForBlockchainSync(bob)
err = wait.NoError(func() error {
inv, err := net.Bob.LookupInvoice(ctxt, &lnrpc.PaymentHash{
RHash: payHash[:],
})
if err != nil {
return err
}
// Check that the invoice is canceled by Bob.
err := wait.NoError(func() error {
inv := bob.RPC.LookupInvoice(payHash[:])
if inv.State != lnrpc.Invoice_CANCELED {
return fmt.Errorf("expected canceled invoice, got: %v",
@ -135,8 +117,33 @@ func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) {
return nil
}, defaultTimeout)
require.NoError(t.t, err, "expected canceled invoice")
require.NoError(ht, err, "expected canceled invoice")
// We now continue to mine more blocks to the point where it could have
// triggered a force close if the invoice cancelation was failed.
//
// NOTE: we need to mine blocks in two sections because of a following
// case has happened frequently with bitcoind backend,
// - when mining all the blocks together, subsystems were syncing
// blocks under very different speed.
// - Bob would cancel the invoice in INVC, and send an UpdateFailHTLC
// in PEER.
// - Alice, however, would need to receive the message before her
// subsystem CNCT being synced to the force close height. This didn't
// happen in bitcoind backend, as Alice's CNCT was syncing way faster
// than Bob's INVC, causing the channel being force closed before the
// invoice cancelation message was received by Alice.
ht.MineBlocks(blocksTillForce - blocksTillCancel)
// Wait for the nodes to be synced.
ht.WaitForBlockchainSync(alice)
ht.WaitForBlockchainSync(bob)
// Check that Alice has not closed the channel because there are no
// outgoing HTLCs in her channel as the only HTLC has already been
// canceled.
ht.AssertNumPendingForceClose(alice, 0)
// Clean up the channel.
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
ht.CloseChannel(alice, chanPoint)
}

View file

@ -1,18 +1,14 @@
package itest
import (
"context"
"crypto/rand"
"fmt"
"io"
"sync"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/rpc"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
@ -21,74 +17,47 @@ import (
// testHoldInvoicePersistence tests that a sender to a hold-invoice, can be
// restarted before the payment gets settled, and still be able to receive the
// preimage.
func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testHoldInvoicePersistence(ht *lntemp.HarnessTest) {
const (
chanAmt = btcutil.Amount(1000000)
numPayments = 10
reason = lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS //nolint:lll
)
// Create carol, and clean up when the test finishes.
carol := net.NewNode(t.t, "Carol", nil)
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("Carol", nil)
// Connect Alice to Carol.
net.ConnectNodes(t.t, net.Alice, carol)
alice, bob := ht.Alice, ht.Bob
ht.ConnectNodes(alice, carol)
// Open a channel between Alice and Carol which is private so that we
// cover the addition of hop hints for hold invoices.
chanPointAlice := openChannelAndAssert(
t, net, net.Alice, carol,
lntest.OpenChannelParams{
chanPointAlice := ht.OpenChannel(
alice, carol, lntemp.OpenChannelParams{
Amt: chanAmt,
Private: true,
},
)
// Wait for Alice and Carol to receive the channel edge from the
// funding manager.
err := net.Alice.WaitForNetworkChannelOpen(chanPointAlice)
if err != nil {
t.Fatalf("alice didn't see the alice->carol channel before "+
"timeout: %v", err)
}
err = carol.WaitForNetworkChannelOpen(chanPointAlice)
if err != nil {
t.Fatalf("carol didn't see the carol->alice channel before "+
"timeout: %v", err)
}
// For Carol to include her private channel with Alice as a hop hint,
// we need Alice to be perceived as a "public" node, meaning that she
// has at least one public channel in the graph. We open a public
// channel from Alice -> Bob and wait for Carol to see it.
chanPointBob := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
chanPointBob := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{
Amt: chanAmt,
},
)
// Wait for Alice and Carol to see the open channel
err = net.Alice.WaitForNetworkChannelOpen(chanPointBob)
require.NoError(t.t, err, "alice didn't see the alice->bob "+
"channel before timeout")
err = carol.WaitForNetworkChannelOpen(chanPointBob)
require.NoError(t.t, err, "carol didn't see the alice->bob "+
"channel before timeout")
// Wait for Carol to see the open channel Alice-Bob.
ht.AssertTopologyChannelOpen(carol, chanPointBob)
// Create preimages for all payments we are going to initiate.
var preimages []lntypes.Preimage
for i := 0; i < numPayments; i++ {
var preimage lntypes.Preimage
_, err = rand.Read(preimage[:])
if err != nil {
t.Fatalf("unable to generate preimage: %v", err)
}
copy(preimage[:], ht.Random32Bytes())
preimages = append(preimages, preimage)
}
@ -96,9 +65,15 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
var (
payAmt = btcutil.Amount(4)
payReqs []string
invoiceStreams []invoicesrpc.Invoices_SubscribeSingleInvoiceClient
invoiceStreams []rpc.SingleInvoiceClient
)
assertInvoiceState := func(state lnrpc.Invoice_InvoiceState) {
for _, client := range invoiceStreams {
ht.AssertInvoiceState(client, state)
}
}
for _, preimage := range preimages {
payHash := preimage.Hash()
@ -110,97 +85,42 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
Hash: payHash[:],
Private: true,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
resp, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
stream, err := carol.SubscribeSingleInvoice(
ctx,
&invoicesrpc.SubscribeSingleInvoiceRequest{
RHash: payHash[:],
},
)
if err != nil {
t.Fatalf("unable to subscribe to invoice: %v", err)
}
resp := carol.RPC.AddHoldInvoice(invoiceReq)
payReqs = append(payReqs, resp.PaymentRequest)
// We expect all of our invoices to have hop hints attached,
// since Carol and Alice are connected with a private channel.
// We assert that we have one hop hint present to ensure that
// we've got coverage for hop hints.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
decodeReq := &lnrpc.PayReqString{
PayReq: resp.PaymentRequest,
}
invoice, err := net.Alice.DecodePayReq(ctxt, decodeReq)
require.NoError(t.t, err, "could not decode invoice")
require.Len(t.t, invoice.RouteHints, 1)
invoice := alice.RPC.DecodePayReq(resp.PaymentRequest)
require.Len(ht, invoice.RouteHints, 1)
stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
invoiceStreams = append(invoiceStreams, stream)
payReqs = append(payReqs, resp.PaymentRequest)
}
// Wait for all the invoices to reach the OPEN state.
for _, stream := range invoiceStreams {
invoice, err := stream.Recv()
if err != nil {
t.Fatalf("err: %v", err)
}
if invoice.State != lnrpc.Invoice_OPEN {
t.Fatalf("expected OPEN, got state: %v", invoice.State)
}
}
assertInvoiceState(lnrpc.Invoice_OPEN)
// Let Alice initiate payments for all the created invoices.
var paymentStreams []routerrpc.Router_SendPaymentV2Client
for _, payReq := range payReqs {
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
payStream, err := net.Alice.RouterClient.SendPaymentV2(
ctx, &routerrpc.SendPaymentRequest{
req := &routerrpc.SendPaymentRequest{
PaymentRequest: payReq,
TimeoutSeconds: 60,
FeeLimitSat: 1000000,
},
}
// Wait for inflight status update.
ht.SendPaymentAndAssertStatus(
alice, req, lnrpc.Payment_IN_FLIGHT,
)
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
paymentStreams = append(paymentStreams, payStream)
}
// Wait for inlight status update.
for _, payStream := range paymentStreams {
payment, err := payStream.Recv()
if err != nil {
t.Fatalf("Failed receiving status update: %v", err)
}
if payment.Status != lnrpc.Payment_IN_FLIGHT {
t.Fatalf("state not in flight: %v", payment.Status)
}
}
// The payments should now show up in Alice's ListInvoices, with a zero
// The payments should now show up in Alice's ListPayments, with a zero
// preimage, indicating they are not yet settled.
err = wait.NoError(func() error {
req := &lnrpc.ListPaymentsRequest{
IncludeIncomplete: true,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
paymentsResp, err := net.Alice.ListPayments(ctxt, req)
if err != nil {
return fmt.Errorf("error when obtaining payments: %v",
err)
}
var zeroPreimg lntypes.Preimage
err := wait.NoError(func() error {
payments := ht.AssertNumPayments(alice, numPayments)
// Gather the payment hashes we are looking for in the
// response.
@ -209,18 +129,16 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
payHashes[preimg.Hash().String()] = struct{}{}
}
var zeroPreimg lntypes.Preimage
for _, payment := range paymentsResp.Payments {
for _, payment := range payments {
_, ok := payHashes[payment.PaymentHash]
if !ok {
continue
}
// The preimage should NEVER be non-zero at this point.
if payment.PaymentPreimage != zeroPreimg.String() {
t.Fatalf("expected zero preimage, got %v",
payment.PaymentPreimage)
}
require.Equal(ht, zeroPreimg.String(),
payment.PaymentPreimage,
"expected zero preimage")
// We wait for the payment attempt to have been
// properly recorded in the DB.
@ -237,201 +155,73 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
return nil
}, defaultTimeout)
if err != nil {
t.Fatalf("predicate not satisfied: %v", err)
}
require.NoError(ht, err, "timeout checking alice's payments")
// Wait for all invoices to be accepted.
for _, stream := range invoiceStreams {
invoice, err := stream.Recv()
if err != nil {
t.Fatalf("err: %v", err)
}
if invoice.State != lnrpc.Invoice_ACCEPTED {
t.Fatalf("expected ACCEPTED, got state: %v",
invoice.State)
}
}
assertInvoiceState(lnrpc.Invoice_ACCEPTED)
// Restart alice. This to ensure she will still be able to handle
// settling the invoices after a restart.
if err := net.RestartNode(net.Alice, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
ht.RestartNode(alice)
// Ensure the connections are made.
//
// TODO(yy): we shouldn't need these two lines since the connections
// are permanent, they'd reconnect automatically upon Alice's restart.
// However, we'd sometimes see the error `unable to gracefully close
// channel while peer is offline (try force closing it instead):
// channel link not found` from closing the channels in the end,
// indicating there's something wrong with the peer conn. We need to
// investigate and fix it in peer conn management.
ht.EnsureConnected(alice, bob)
ht.EnsureConnected(alice, carol)
// Now after a restart, we must re-track the payments. We set up a
// goroutine for each to track their status updates.
var (
statusUpdates []chan *lnrpc.Payment
wg sync.WaitGroup
quit = make(chan struct{})
)
defer close(quit)
for _, preimg := range preimages {
hash := preimg.Hash()
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
payStream := alice.RPC.TrackPaymentV2(hash[:])
ht.ReceiveTrackPayment(payStream)
payStream, err := net.Alice.RouterClient.TrackPaymentV2(
ctx, &routerrpc.TrackPaymentRequest{
PaymentHash: hash[:],
},
ht.AssertPaymentStatus(
alice, preimg, lnrpc.Payment_IN_FLIGHT,
)
if err != nil {
t.Fatalf("unable to send track payment: %v", err)
}
// We set up a channel where we'll forward any status update.
upd := make(chan *lnrpc.Payment)
wg.Add(1)
go func() {
defer wg.Done()
for {
payment, err := payStream.Recv()
if err != nil {
close(upd)
return
}
select {
case upd <- payment:
case <-quit:
return
}
}
}()
statusUpdates = append(statusUpdates, upd)
}
// Wait for the in-flight status update.
for _, upd := range statusUpdates {
select {
case payment, ok := <-upd:
if !ok {
t.Fatalf("failed getting payment update")
}
if payment.Status != lnrpc.Payment_IN_FLIGHT {
t.Fatalf("state not in in flight: %v",
payment.Status)
}
case <-time.After(5 * time.Second):
t.Fatalf("in flight status not received")
}
}
// Settle invoices half the invoices, cancel the rest.
for i, preimage := range preimages {
var expectedState lnrpc.Invoice_InvoiceState
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
if i%2 == 0 {
settle := &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
}
_, err = carol.SettleInvoice(ctxt, settle)
expectedState = lnrpc.Invoice_SETTLED
carol.RPC.SettleInvoice(preimage[:])
ht.AssertInvoiceState(
invoiceStreams[i], lnrpc.Invoice_SETTLED,
)
} else {
hash := preimage.Hash()
settle := &invoicesrpc.CancelInvoiceMsg{
PaymentHash: hash[:],
}
_, err = carol.CancelInvoice(ctxt, settle)
expectedState = lnrpc.Invoice_CANCELED
}
if err != nil {
t.Fatalf("unable to cancel/settle invoice: %v", err)
}
stream := invoiceStreams[i]
invoice, err := stream.Recv()
require.NoError(t.t, err)
require.Equal(t.t, expectedState, invoice.State)
}
// Make sure we get the expected status update.
for i, upd := range statusUpdates {
// Read until the payment is in a terminal state.
var payment *lnrpc.Payment
for payment == nil {
select {
case p, ok := <-upd:
if !ok {
t.Fatalf("failed getting payment update")
}
if p.Status == lnrpc.Payment_IN_FLIGHT {
continue
}
payment = p
case <-time.After(5 * time.Second):
t.Fatalf("in flight status not received")
}
}
// Assert terminal payment state.
if i%2 == 0 {
if payment.Status != lnrpc.Payment_SUCCEEDED {
t.Fatalf("state not succeeded : %v",
payment.Status)
}
} else {
if payment.FailureReason !=
lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS {
t.Fatalf("state not failed: %v",
payment.FailureReason)
}
carol.RPC.CancelInvoice(hash[:])
ht.AssertInvoiceState(
invoiceStreams[i], lnrpc.Invoice_CANCELED,
)
}
}
// Check that Alice's invoices to be shown as settled and failed
// accordingly, and preimages matching up.
req := &lnrpc.ListPaymentsRequest{
IncludeIncomplete: true,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
paymentsResp, err := net.Alice.ListPayments(ctxt, req)
if err != nil {
t.Fatalf("error when obtaining Alice payments: %v", err)
}
for i, preimage := range preimages {
paymentHash := preimage.Hash()
var p string
for _, resp := range paymentsResp.Payments {
if resp.PaymentHash == paymentHash.String() {
p = resp.PaymentPreimage
break
}
}
if p == "" {
t.Fatalf("payment not found")
}
for i, preimg := range preimages {
if i%2 == 0 {
if p != preimage.String() {
t.Fatalf("preimage doesn't match: %v vs %v",
p, preimage.String())
}
ht.AssertPaymentStatus(
alice, preimg, lnrpc.Payment_SUCCEEDED,
)
} else {
if p != lntypes.ZeroHash.String() {
t.Fatalf("preimage not zero: %v", p)
}
payment := ht.AssertPaymentStatus(
alice, preimg, lnrpc.Payment_FAILED,
)
require.Equal(ht, reason, payment.FailureReason,
"wrong failure reason")
}
}
// Check that all of our invoice streams are terminated by the server
// since the invoices have completed.
for _, stream := range invoiceStreams {
_, err = stream.Recv()
require.Equal(t.t, io.EOF, err)
}
// Finally, close all channels.
ht.CloseChannel(alice, chanPointBob)
ht.CloseChannel(alice, chanPointAlice)
}

View file

@ -1,7 +1,6 @@
package itest
import (
"bytes"
"context"
"encoding/hex"
"os"
@ -12,7 +11,8 @@ import (
"github.com/golang/protobuf/proto"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/macaroons"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -23,13 +23,14 @@ import (
// enabled on the gRPC interface, no requests with missing or invalid
// macaroons are allowed. Further, the specific access rights (read/write,
// entity based) and first-party caveats are tested as well.
func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
func testMacaroonAuthentication(ht *lntemp.HarnessTest) {
var (
infoReq = &lnrpc.GetInfoRequest{}
newAddrReq = &lnrpc.NewAddressRequest{
Type: AddrTypeWitnessPubkeyHash,
}
testNode = net.Alice
testNode = ht.Alice
testClient = testNode.RPC.LN
)
testCases := []struct {
@ -41,7 +42,7 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
// enabled.
name: "no macaroon",
run: func(ctxt context.Context, t *testing.T) {
conn, err := testNode.ConnectRPC(false)
conn, err := testNode.ConnectRPCWithMacaroon(nil)
require.NoError(t, err)
defer func() { _ = conn.Close() }()
client := lnrpc.NewLightningClient(conn)
@ -72,7 +73,7 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
name: "read only macaroon",
run: func(ctxt context.Context, t *testing.T) {
readonlyMac, err := testNode.ReadMacaroon(
testNode.ReadMacPath(), defaultTimeout,
testNode.Cfg.ReadMacPath, defaultTimeout,
)
require.NoError(t, err)
cleanup, client := macaroonClient(
@ -89,7 +90,7 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
name: "expired macaroon",
run: func(ctxt context.Context, t *testing.T) {
readonlyMac, err := testNode.ReadMacaroon(
testNode.ReadMacPath(), defaultTimeout,
testNode.Cfg.ReadMacPath, defaultTimeout,
)
require.NoError(t, err)
timeoutMac, err := macaroons.AddConstraints(
@ -109,7 +110,7 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
name: "invalid IP macaroon",
run: func(ctxt context.Context, t *testing.T) {
readonlyMac, err := testNode.ReadMacaroon(
testNode.ReadMacPath(), defaultTimeout,
testNode.Cfg.ReadMacPath, defaultTimeout,
)
require.NoError(t, err)
invalidIPAddrMac, err := macaroons.AddConstraints(
@ -133,7 +134,7 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
name: "correct macaroon",
run: func(ctxt context.Context, t *testing.T) {
adminMac, err := testNode.ReadMacaroon(
testNode.AdminMacPath(), defaultTimeout,
testNode.Cfg.AdminMacPath, defaultTimeout,
)
require.NoError(t, err)
adminMac, err = macaroons.AddConstraints(
@ -163,7 +164,7 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
"Permissions",
}},
}
bakeRes, err := testNode.BakeMacaroon(ctxt, req)
bakeRes, err := testClient.BakeMacaroon(ctxt, req)
require.NoError(t, err)
// Create a connection that uses the custom macaroon.
@ -218,7 +219,7 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
}},
AllowExternalPermissions: true,
}
bakeResp, err := testNode.BakeMacaroon(ctxt, req)
bakeResp, err := testClient.BakeMacaroon(ctxt, req)
require.NoError(t, err)
macBytes, err := hex.DecodeString(bakeResp.Macaroon)
@ -232,7 +233,7 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
// Test that CheckMacaroonPermissions accurately
// characterizes macaroon as valid, even if the
// permissions are not native to LND.
checkResp, err := testNode.CheckMacaroonPermissions(
checkResp, err := testClient.CheckMacaroonPermissions(
ctxt, checkReq,
)
require.NoError(t, err)
@ -253,7 +254,7 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
checkReq.Macaroon = timeoutMacBytes
_, err = testNode.CheckMacaroonPermissions(
_, err = testClient.CheckMacaroonPermissions(
ctxt, checkReq,
)
require.Error(t, err)
@ -269,7 +270,7 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
checkReq.Permissions = wrongPermissions
checkReq.Macaroon = macBytes
_, err = testNode.CheckMacaroonPermissions(
_, err = testClient.CheckMacaroonPermissions(
ctxt, checkReq,
)
require.Error(t, err)
@ -279,9 +280,9 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
for _, tc := range testCases {
tc := tc
ht.t.Run(tc.name, func(tt *testing.T) {
ht.Run(tc.name, func(tt *testing.T) {
ctxt, cancel := context.WithTimeout(
context.Background(), defaultTimeout,
ht.Context(), defaultTimeout,
)
defer cancel()
@ -293,8 +294,8 @@ func testMacaroonAuthentication(net *lntest.NetworkHarness, ht *harnessTest) {
// testBakeMacaroon checks that when creating macaroons, the permissions param
// in the request must be set correctly, and the baked macaroon has the intended
// permissions.
func testBakeMacaroon(net *lntest.NetworkHarness, t *harnessTest) {
var testNode = net.Alice
func testBakeMacaroon(ht *lntemp.HarnessTest) {
var testNode = ht.Alice
testCases := []struct {
name string
@ -495,14 +496,14 @@ func testBakeMacaroon(net *lntest.NetworkHarness, t *harnessTest) {
for _, tc := range testCases {
tc := tc
t.t.Run(tc.name, func(tt *testing.T) {
ht.Run(tc.name, func(tt *testing.T) {
ctxt, cancel := context.WithTimeout(
context.Background(), defaultTimeout,
ht.Context(), defaultTimeout,
)
defer cancel()
adminMac, err := testNode.ReadMacaroon(
testNode.AdminMacPath(), defaultTimeout,
testNode.Cfg.AdminMacPath, defaultTimeout,
)
require.NoError(tt, err)
cleanup, client := macaroonClient(tt, testNode, adminMac)
@ -517,26 +518,26 @@ func testBakeMacaroon(net *lntest.NetworkHarness, t *harnessTest) {
// specified ID and invalidates all macaroons derived from the key with that ID.
// Also, it checks deleting the reserved marcaroon ID, DefaultRootKeyID or is
// forbidden.
func testDeleteMacaroonID(net *lntest.NetworkHarness, t *harnessTest) {
func testDeleteMacaroonID(ht *lntemp.HarnessTest) {
var (
ctxb = context.Background()
testNode = net.Alice
ctxb = ht.Context()
testNode = ht.Alice
)
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
// Use admin macaroon to create a connection.
adminMac, err := testNode.ReadMacaroon(
testNode.AdminMacPath(), defaultTimeout,
testNode.Cfg.AdminMacPath, defaultTimeout,
)
require.NoError(t.t, err)
cleanup, client := macaroonClient(t.t, testNode, adminMac)
require.NoError(ht, err)
cleanup, client := macaroonClient(ht.T, testNode, adminMac)
defer cleanup()
// Record the number of macaroon IDs before creation.
listReq := &lnrpc.ListMacaroonIDsRequest{}
listResp, err := client.ListMacaroonIDs(ctxt, listReq)
require.NoError(t.t, err)
require.NoError(ht, err)
numMacIDs := len(listResp.RootKeyIds)
// Create macaroons for testing.
@ -551,17 +552,17 @@ func testDeleteMacaroonID(net *lntest.NetworkHarness, t *harnessTest) {
}},
}
resp, err := client.BakeMacaroon(ctxt, req)
require.NoError(t.t, err)
require.NoError(ht, err)
macList = append(macList, resp.Macaroon)
}
// Check that the creation is successful.
listReq = &lnrpc.ListMacaroonIDsRequest{}
listResp, err = client.ListMacaroonIDs(ctxt, listReq)
require.NoError(t.t, err)
require.NoError(ht, err)
// The number of macaroon IDs should be increased by len(rootKeyIDs).
require.Equal(t.t, numMacIDs+len(rootKeyIDs), len(listResp.RootKeyIds))
require.Equal(ht, numMacIDs+len(rootKeyIDs), len(listResp.RootKeyIds))
// First test: check deleting the DefaultRootKeyID returns an error.
defaultID, _ := strconv.ParseUint(
@ -571,45 +572,45 @@ func testDeleteMacaroonID(net *lntest.NetworkHarness, t *harnessTest) {
RootKeyId: defaultID,
}
_, err = client.DeleteMacaroonID(ctxt, req)
require.Error(t.t, err)
require.Contains(
t.t, err.Error(), macaroons.ErrDeletionForbidden.Error(),
)
require.Error(ht, err)
require.Contains(ht, err.Error(),
macaroons.ErrDeletionForbidden.Error())
// Second test: check deleting the customized ID returns success.
req = &lnrpc.DeleteMacaroonIDRequest{
RootKeyId: rootKeyIDs[0],
}
resp, err := client.DeleteMacaroonID(ctxt, req)
require.NoError(t.t, err)
require.True(t.t, resp.Deleted)
require.NoError(ht, err)
require.True(ht, resp.Deleted)
// Check that the deletion is successful.
listReq = &lnrpc.ListMacaroonIDsRequest{}
listResp, err = client.ListMacaroonIDs(ctxt, listReq)
require.NoError(t.t, err)
require.NoError(ht, err)
// The number of macaroon IDs should be decreased by 1.
require.Equal(t.t, numMacIDs+len(rootKeyIDs)-1, len(listResp.RootKeyIds))
require.Equal(ht, numMacIDs+len(rootKeyIDs)-1, len(listResp.RootKeyIds))
// Check that the deleted macaroon can no longer access macaroon:read.
deletedMac, err := readMacaroonFromHex(macList[0])
require.NoError(t.t, err)
cleanup, client = macaroonClient(t.t, testNode, deletedMac)
require.NoError(ht, err)
cleanup, client = macaroonClient(ht.T, testNode, deletedMac)
defer cleanup()
// Because the macaroon is deleted, it will be treated as an invalid one.
// Because the macaroon is deleted, it will be treated as an invalid
// one.
listReq = &lnrpc.ListMacaroonIDsRequest{}
_, err = client.ListMacaroonIDs(ctxt, listReq)
require.Error(t.t, err)
require.Contains(t.t, err.Error(), "cannot get macaroon")
require.Error(ht, err)
require.Contains(ht, err.Error(), "cannot get macaroon")
}
// testStatelessInit checks that the stateless initialization of the daemon
// does not write any macaroon files to the daemon's file system and returns
// the admin macaroon in the response. It then checks that the password
// change of the wallet can also happen stateless.
func testStatelessInit(net *lntest.NetworkHarness, t *harnessTest) {
func testStatelessInit(ht *lntemp.HarnessTest) {
var (
initPw = []byte("stateless")
newPw = []byte("stateless-new")
@ -621,85 +622,79 @@ func testStatelessInit(net *lntest.NetworkHarness, t *harnessTest) {
// First, create a new node and request it to initialize stateless.
// This should return us the binary serialized admin macaroon that we
// can then use for further calls.
carol, _, macBytes, err := net.NewNodeWithSeed(
"Carol", nil, initPw, true,
)
require.NoError(t.t, err)
if len(macBytes) == 0 {
t.Fatalf("invalid macaroon returned in stateless init")
}
carol, _, macBytes := ht.NewNodeWithSeed("Carol", nil, initPw, true)
require.NotEmpty(ht, macBytes,
"invalid macaroon returned in stateless init")
// Now make sure no macaroon files have been created by the node Carol.
_, err = os.Stat(carol.AdminMacPath())
require.Error(t.t, err)
_, err = os.Stat(carol.ReadMacPath())
require.Error(t.t, err)
_, err = os.Stat(carol.InvoiceMacPath())
require.Error(t.t, err)
_, err := os.Stat(carol.Cfg.AdminMacPath)
require.Error(ht, err)
_, err = os.Stat(carol.Cfg.ReadMacPath)
require.Error(ht, err)
_, err = os.Stat(carol.Cfg.InvoiceMacPath)
require.Error(ht, err)
// Then check that we can unmarshal the binary serialized macaroon.
adminMac := &macaroon.Macaroon{}
err = adminMac.UnmarshalBinary(macBytes)
require.NoError(t.t, err)
require.NoError(ht, err)
// Find out if we can actually use the macaroon that has been returned
// to us for a RPC call.
conn, err := carol.ConnectRPCWithMacaroon(adminMac)
require.NoError(t.t, err)
require.NoError(ht, err)
defer conn.Close()
adminMacClient := lnrpc.NewLightningClient(conn)
ctxt, _ := context.WithTimeout(context.Background(), defaultTimeout)
ctxt, cancel := context.WithTimeout(ht.Context(), defaultTimeout)
defer cancel()
res, err := adminMacClient.NewAddress(ctxt, newAddrReq)
require.NoError(t.t, err)
require.NoError(ht, err)
if !strings.HasPrefix(res.Address, harnessNetParams.Bech32HRPSegwit) {
t.Fatalf("returned address was not a regtest address")
require.Fail(ht, "returned address was not a regtest address")
}
// As a second part, shut down the node and then try to change the
// password when we start it up again.
if err := net.RestartNodeNoUnlock(carol, nil, true); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
ht.RestartNodeNoUnlock(carol)
changePwReq := &lnrpc.ChangePasswordRequest{
CurrentPassword: initPw,
NewPassword: newPw,
StatelessInit: true,
}
response, err := carol.InitChangePassword(changePwReq)
require.NoError(t.t, err)
response, err := carol.ChangePasswordAndInit(changePwReq)
require.NoError(ht, err)
// Again, make sure no macaroon files have been created by the node
// Carol.
_, err = os.Stat(carol.AdminMacPath())
require.Error(t.t, err)
_, err = os.Stat(carol.ReadMacPath())
require.Error(t.t, err)
_, err = os.Stat(carol.InvoiceMacPath())
require.Error(t.t, err)
_, err = os.Stat(carol.Cfg.AdminMacPath)
require.Error(ht, err)
_, err = os.Stat(carol.Cfg.ReadMacPath)
require.Error(ht, err)
_, err = os.Stat(carol.Cfg.InvoiceMacPath)
require.Error(ht, err)
// Then check that we can unmarshal the new binary serialized macaroon
// and that it really is a new macaroon.
if err = adminMac.UnmarshalBinary(response.AdminMacaroon); err != nil {
t.Fatalf("unable to unmarshal macaroon: %v", err)
}
if bytes.Equal(response.AdminMacaroon, macBytes) {
t.Fatalf("expected new macaroon to be different")
}
err = adminMac.UnmarshalBinary(response.AdminMacaroon)
require.NoError(ht, err, "unable to unmarshal macaroon")
require.NotEqual(ht, response.AdminMacaroon, macBytes,
"expected new macaroon to be different")
// Finally, find out if we can actually use the new macaroon that has
// been returned to us for a RPC call.
conn2, err := carol.ConnectRPCWithMacaroon(adminMac)
require.NoError(t.t, err)
require.NoError(ht, err)
defer conn2.Close()
adminMacClient = lnrpc.NewLightningClient(conn2)
// Changing the password takes a while, so we use the default timeout
// of 30 seconds to wait for the connection to be ready.
ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout)
ctxt, cancel = context.WithTimeout(ht.Context(), defaultTimeout)
defer cancel()
res, err = adminMacClient.NewAddress(ctxt, newAddrReq)
require.NoError(t.t, err)
require.NoError(ht, err)
if !strings.HasPrefix(res.Address, harnessNetParams.Bech32HRPSegwit) {
t.Fatalf("returned address was not a regtest address")
require.Fail(ht, "returned address was not a regtest address")
}
}
@ -717,9 +712,11 @@ func readMacaroonFromHex(macHex string) (*macaroon.Macaroon, error) {
return mac, nil
}
func macaroonClient(t *testing.T, testNode *lntest.HarnessNode,
func macaroonClient(t *testing.T, testNode *node.HarnessNode,
mac *macaroon.Macaroon) (func(), lnrpc.LightningClient) {
t.Helper()
conn, err := testNode.ConnectRPCWithMacaroon(mac)
require.NoError(t, err, "connect to alice")

View file

@ -5,79 +5,50 @@ package itest
import (
"fmt"
"strings"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/funding"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lnwallet"
)
// testMaxChannelSize tests that lnd handles --maxchansize parameter
// correctly. Wumbo nodes should enforce a default soft limit of 10 BTC by
// default. This limit can be adjusted with --maxchansize config option
func testMaxChannelSize(net *lntest.NetworkHarness, t *harnessTest) {
// We'll make two new nodes, both wumbo but with the default
// limit on maximum channel size (10 BTC)
wumboNode := net.NewNode(
t.t, "wumbo", []string{"--protocol.wumbo-channels"},
// testMaxChannelSize tests that lnd handles --maxchansize parameter correctly.
// Wumbo nodes should enforce a default soft limit of 10 BTC by default. This
// limit can be adjusted with --maxchansize config option.
func testMaxChannelSize(ht *lntemp.HarnessTest) {
// We'll make two new nodes, both wumbo but with the default limit on
// maximum channel size (10 BTC)
wumboNode := ht.NewNode(
"wumbo", []string{"--protocol.wumbo-channels"},
)
defer shutdownAndAssert(net, t, wumboNode)
wumboNode2 := net.NewNode(
t.t, "wumbo2", []string{"--protocol.wumbo-channels"},
wumboNode2 := ht.NewNode(
"wumbo2", []string{"--protocol.wumbo-channels"},
)
defer shutdownAndAssert(net, t, wumboNode2)
// We'll send 11 BTC to the wumbo node so it can test the wumbo soft limit.
net.SendCoins(t.t, 11*btcutil.SatoshiPerBitcoin, wumboNode)
// We'll send 11 BTC to the wumbo node so it can test the wumbo soft
// limit.
ht.FundCoins(11*btcutil.SatoshiPerBitcoin, wumboNode)
// Next we'll connect both nodes, then attempt to make a wumbo channel
// funding request, which should fail as it exceeds the default wumbo
// soft limit of 10 BTC.
net.EnsureConnected(t.t, wumboNode, wumboNode2)
ht.EnsureConnected(wumboNode, wumboNode2)
chanAmt := funding.MaxBtcFundingAmountWumbo + 1
_, err := net.OpenChannel(
wumboNode, wumboNode2, lntest.OpenChannelParams{
Amt: chanAmt,
},
// The test should show failure due to the channel exceeding our max
// size.
expectedErr := lnwallet.ErrChanTooLarge(
chanAmt, funding.MaxBtcFundingAmountWumbo,
)
if err == nil {
t.Fatalf("expected channel funding to fail as it exceeds 10 BTC limit")
}
// The test should show failure due to the channel exceeding our max size.
if !strings.Contains(err.Error(), "exceeds maximum chan size") {
t.Fatalf("channel should be rejected due to size, instead "+
"error was: %v", err)
}
// Next we'll create a non-wumbo node to verify that it enforces the
// BOLT-02 channel size limit and rejects our funding request.
miniNode := net.NewNode(t.t, "mini", nil)
defer shutdownAndAssert(net, t, miniNode)
net.EnsureConnected(t.t, wumboNode, miniNode)
_, err = net.OpenChannel(
wumboNode, miniNode, lntest.OpenChannelParams{
Amt: chanAmt,
},
ht.OpenChannelAssertErr(
wumboNode, wumboNode2,
lntemp.OpenChannelParams{Amt: chanAmt}, expectedErr,
)
if err == nil {
t.Fatalf("expected channel funding to fail as it exceeds 0.16 BTC limit")
}
// The test should show failure due to the channel exceeding our max size.
if !strings.Contains(err.Error(), "exceeds maximum chan size") {
t.Fatalf("channel should be rejected due to size, instead "+
"error was: %v", err)
}
// We'll now make another wumbo node with appropriate maximum channel size
// to accept our wumbo channel funding.
wumboNode3 := net.NewNode(
t.t, "wumbo3", []string{
// We'll now make another wumbo node with appropriate maximum channel
// size to accept our wumbo channel funding.
wumboNode3 := ht.NewNode(
"wumbo3", []string{
"--protocol.wumbo-channels",
fmt.Sprintf(
"--maxchansize=%v",
@ -85,16 +56,12 @@ func testMaxChannelSize(net *lntest.NetworkHarness, t *harnessTest) {
),
},
)
defer shutdownAndAssert(net, t, wumboNode3)
// Creating a wumbo channel between these two nodes should succeed.
net.EnsureConnected(t.t, wumboNode, wumboNode3)
chanPoint := openChannelAndAssert(
t, net, wumboNode, wumboNode3,
lntest.OpenChannelParams{
Amt: chanAmt,
},
ht.EnsureConnected(wumboNode, wumboNode3)
chanPoint := ht.OpenChannel(
wumboNode, wumboNode3, lntemp.OpenChannelParams{Amt: chanAmt},
)
closeChannelAndAssert(t, net, wumboNode, chanPoint, false)
ht.CloseChannel(wumboNode, chanPoint)
}

View file

@ -1,13 +1,11 @@
package itest
import (
"context"
"testing"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
@ -16,118 +14,69 @@ import (
// channel where we have already reached the limit of the number of htlcs that
// we may add to the remote party's commitment. This test asserts that we do
// not attempt to use the full channel at all in our pathfinding.
func testMaxHtlcPathfind(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testMaxHtlcPathfind(ht *lntemp.HarnessTest) {
// Setup a channel between Alice and Bob where Alice will only allow
// Bob to add a maximum of 5 htlcs to her commitment.
maxHtlcs := 5
chanPoint := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
alice, bob := ht.Alice, ht.Bob
chanPoint := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{
Amt: 1000000,
PushAmt: 800000,
RemoteMaxHtlcs: uint16(maxHtlcs),
},
)
// Wait for Alice and Bob to receive the channel edge from the
// funding manager.
err := net.Alice.WaitForNetworkChannelOpen(chanPoint)
require.NoError(t.t, err, "alice does not have open channel")
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
require.NoError(t.t, err, "bob does not have open channel")
// Alice and bob should have one channel open with each other now.
assertNodeNumChannels(t, net.Alice, 1)
assertNodeNumChannels(t, net.Bob, 1)
ht.AssertNodeNumChannels(alice, 1)
ht.AssertNodeNumChannels(bob, 1)
// Send our maximum number of htlcs from Bob -> Alice so that we get
// to a point where Alice won't accept any more htlcs on the channel.
subscriptions := make([]*holdSubscription, maxHtlcs)
cancelCtxs := make([]func(), maxHtlcs)
for i := 0; i < maxHtlcs; i++ {
subCtx, cancel := context.WithTimeout(ctxb, defaultTimeout)
cancelCtxs[i] = cancel
subscriptions[i] = acceptHoldInvoice(
subCtx, t.t, i, net.Bob, net.Alice,
)
subscriptions[i] = acceptHoldInvoice(ht, i, bob, alice)
}
// Cancel all of our subscriptions on exit.
defer func() {
for _, cancel := range cancelCtxs {
cancel()
}
}()
err = assertNumActiveHtlcs([]*lntest.HarnessNode{
net.Alice, net.Bob,
}, maxHtlcs)
require.NoError(t.t, err, "htlcs not active")
ht.AssertNumActiveHtlcs(alice, maxHtlcs)
ht.AssertNumActiveHtlcs(bob, maxHtlcs)
// Now we send a payment from Alice -> Bob to sanity check that our
// commitment limit is not applied in the opposite direction.
subCtx, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
aliceBobSub := acceptHoldInvoice(
subCtx, t.t, maxHtlcs, net.Alice, net.Bob,
)
err = assertNumActiveHtlcs([]*lntest.HarnessNode{
net.Alice, net.Bob,
}, maxHtlcs+1)
require.NoError(t.t, err, "htlcs not active")
aliceBobSub := acceptHoldInvoice(ht, maxHtlcs, alice, bob)
ht.AssertNumActiveHtlcs(alice, maxHtlcs+1)
ht.AssertNumActiveHtlcs(bob, maxHtlcs+1)
// Now, we're going to try to send another payment from Bob -> Alice.
// We've hit our max remote htlcs, so we expect this payment to spin
// out dramatically with pathfinding.
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
payment, err := net.Bob.RouterClient.SendPaymentV2(
ctxt, &routerrpc.SendPaymentRequest{
sendReq := &routerrpc.SendPaymentRequest{
Amt: 1000,
Dest: net.Alice.PubKey[:],
Dest: alice.PubKey[:],
TimeoutSeconds: 60,
FeeLimitSat: 1000000,
MaxParts: 10,
Amp: true,
},
)
require.NoError(t.t, err, "send payment failed")
update, err := payment.Recv()
require.NoError(t.t, err, "no payment in flight update")
require.Equal(t.t, lnrpc.Payment_IN_FLIGHT, update.Status,
"payment not inflight")
update, err = payment.Recv()
require.NoError(t.t, err, "no payment failed update")
require.Equal(t.t, lnrpc.Payment_FAILED, update.Status)
require.Len(t.t, update.Htlcs, 0, "expected no htlcs dispatched")
}
ht.SendPaymentAndAssertStatus(bob, sendReq, lnrpc.Payment_FAILED)
// Now that we're done, we cancel all our pending htlcs so that we
// can cleanup the channel with a coop close.
for _, sub := range subscriptions {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
sub.cancel(ctxt, t.t)
sub.cancel(ht)
}
aliceBobSub.cancel(ht)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
aliceBobSub.cancel(ctxt, t.t)
ht.AssertNumActiveHtlcs(alice, 0)
ht.AssertNumActiveHtlcs(bob, 0)
err = assertNumActiveHtlcs([]*lntest.HarnessNode{
net.Alice, net.Bob,
}, 0)
require.NoError(t.t, err, "expected all htlcs canceled")
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
ht.CloseChannel(alice, chanPoint)
}
type holdSubscription struct {
recipient invoicesrpc.InvoicesClient
recipient *node.HarnessNode
hash lntypes.Hash
invSubscription invoicesrpc.Invoices_SubscribeSingleInvoiceClient
paymentSubscription routerrpc.Router_SendPaymentV2Client
@ -135,89 +84,73 @@ type holdSubscription struct {
// cancel updates a hold invoice to cancel from the recipient and consumes
// updates from the payer until it has reached a final, failed state.
func (h *holdSubscription) cancel(ctx context.Context, t *testing.T) {
_, err := h.recipient.CancelInvoice(ctx, &invoicesrpc.CancelInvoiceMsg{
PaymentHash: h.hash[:],
})
require.NoError(t, err, "invoice cancel failed")
func (h *holdSubscription) cancel(ht *lntemp.HarnessTest) {
h.recipient.RPC.CancelInvoice(h.hash[:])
invUpdate, err := h.invSubscription.Recv()
require.NoError(t, err, "cancel invoice subscribe failed")
require.Equal(t, lnrpc.Invoice_CANCELED, invUpdate.State,
invUpdate := ht.ReceiveSingleInvoice(h.invSubscription)
require.Equal(ht, lnrpc.Invoice_CANCELED, invUpdate.State,
"expected invoice canceled")
// We expect one in flight update when our htlc is canceled back, and
// another when we fail the payment as a whole.
payUpdate, err := h.paymentSubscription.Recv()
require.NoError(t, err, "cancel payment subscribe failed")
require.Len(t, payUpdate.Htlcs, 1)
require.Equal(t, lnrpc.Payment_IN_FLIGHT, payUpdate.Status)
payUpdate := ht.AssertPaymentStatusFromStream(
h.paymentSubscription, lnrpc.Payment_IN_FLIGHT,
)
require.Len(ht, payUpdate.Htlcs, 1)
payUpdate, err = h.paymentSubscription.Recv()
require.NoError(t, err, "cancel payment subscribe failed")
require.Equal(t, lnrpc.Payment_FAILED, payUpdate.Status,
payUpdate = ht.AssertPaymentStatusFromStream(
h.paymentSubscription, lnrpc.Payment_FAILED,
)
require.Equal(ht, lnrpc.Payment_FAILED, payUpdate.Status,
"expected payment failed")
require.Equal(t, lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS,
require.Equal(ht, lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, //nolint:lll
payUpdate.FailureReason, "expected unknown details")
}
// acceptHoldInvoice adds a hold invoice to the recipient node, pays it from
// the sender and asserts that we have reached the accepted state where htlcs
// are locked in for the payment.
func acceptHoldInvoice(ctx context.Context, t *testing.T, idx int, sender,
receiver *lntest.HarnessNode) *holdSubscription {
func acceptHoldInvoice(ht *lntemp.HarnessTest, idx int, sender,
receiver *node.HarnessNode) *holdSubscription {
hash := [lntypes.HashSize]byte{byte(idx + 1)}
invoice, err := receiver.AddHoldInvoice(
ctx, &invoicesrpc.AddHoldInvoiceRequest{
req := &invoicesrpc.AddHoldInvoiceRequest{
ValueMsat: 10000,
Hash: hash[:],
},
)
require.NoError(t, err, "couldn't add invoice")
}
invoice := receiver.RPC.AddHoldInvoice(req)
invStream, err := receiver.InvoicesClient.SubscribeSingleInvoice(
ctx, &invoicesrpc.SubscribeSingleInvoiceRequest{
RHash: hash[:],
},
)
require.NoError(t, err, "could not subscribe to invoice")
invStream := receiver.RPC.SubscribeSingleInvoice(hash[:])
inv := ht.ReceiveSingleInvoice(invStream)
require.Equal(ht, lnrpc.Invoice_OPEN, inv.State, "expect open")
inv, err := invStream.Recv()
require.NoError(t, err, "invoice open stream failed")
require.Equal(t, lnrpc.Invoice_OPEN, inv.State,
"expected open")
payStream, err := sender.RouterClient.SendPaymentV2(
ctx, &routerrpc.SendPaymentRequest{
sendReq := &routerrpc.SendPaymentRequest{
PaymentRequest: invoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitSat: 1000000,
},
)
require.NoError(t, err, "send payment failed")
}
payStream := sender.RPC.SendPayment(sendReq)
// Finally, assert that we progress to an accepted state. We expect
// the payer to get one update for the creation of the payment, and
// another when a htlc is dispatched.
payment, err := payStream.Recv()
require.NoError(t, err, "payment in flight stream failed")
require.Equal(t, lnrpc.Payment_IN_FLIGHT, payment.Status)
require.Len(t, payment.Htlcs, 0)
payment := ht.AssertPaymentStatusFromStream(
payStream, lnrpc.Payment_IN_FLIGHT,
)
require.Empty(ht, payment.Htlcs)
payment, err = payStream.Recv()
require.NoError(t, err, "payment in flight stream failed")
require.Equal(t, lnrpc.Payment_IN_FLIGHT, payment.Status)
require.Len(t, payment.Htlcs, 1)
payment = ht.AssertPaymentStatusFromStream(
payStream, lnrpc.Payment_IN_FLIGHT,
)
require.Len(ht, payment.Htlcs, 1)
inv, err = invStream.Recv()
require.NoError(t, err, "invoice accepted stream failed")
require.Equal(t, lnrpc.Invoice_ACCEPTED, inv.State,
"expected accepted invoice")
inv = ht.ReceiveSingleInvoice(invStream)
require.Equal(ht, lnrpc.Invoice_ACCEPTED, inv.State,
"expected accepted")
return &holdSubscription{
recipient: receiver.InvoicesClient,
recipient: receiver,
hash: hash,
invSubscription: invStream,
paymentSubscription: payStream,

View file

@ -1,47 +1,65 @@
package itest
import (
"context"
"math"
"strings"
"time"
"github.com/lightningnetwork/lnd/funding"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
func testHtlcErrorPropagation(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testHtlcErrorPropagation(ht *lntemp.HarnessTest) {
// In this test we wish to exercise the daemon's correct parsing,
// handling, and propagation of errors that occur while processing a
// multi-hop payment.
const chanAmt = funding.MaxBtcFundingAmount
alice, bob := ht.Alice, ht.Bob
// Since we'd like to test some multi-hop failure scenarios, we'll
// introduce another node into our test network: Carol.
carol := ht.NewNode("Carol", nil)
ht.ConnectNodes(bob, carol)
// Before we start sending payments, subscribe to htlc events for each
// node.
aliceEvents := alice.RPC.SubscribeHtlcEvents()
bobEvents := bob.RPC.SubscribeHtlcEvents()
carolEvents := carol.RPC.SubscribeHtlcEvents()
// Once subscribed, the first event will be UNKNOWN.
ht.AssertHtlcEventType(aliceEvents, routerrpc.HtlcEvent_UNKNOWN)
ht.AssertHtlcEventType(bobEvents, routerrpc.HtlcEvent_UNKNOWN)
ht.AssertHtlcEventType(carolEvents, routerrpc.HtlcEvent_UNKNOWN)
// First establish a channel with a capacity of 0.5 BTC between Alice
// and Bob.
chanPointAlice := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
Amt: chanAmt,
},
chanPointAlice := ht.OpenChannel(
alice, bob,
lntemp.OpenChannelParams{Amt: chanAmt},
)
if err := net.Alice.WaitForNetworkChannelOpen(chanPointAlice); err != nil {
t.Fatalf("channel not seen by alice before timeout: %v", err)
}
cType, err := channelCommitType(net.Alice, chanPointAlice)
if err != nil {
t.Fatalf("unable to get channel type: %v", err)
}
// Next, we'll create a connection from Bob to Carol, and open a
// channel between them so we have the topology: Alice -> Bob -> Carol.
// The channel created will be of lower capacity that the one created
// above.
const bobChanAmt = funding.MaxBtcFundingAmount
chanPointBob := ht.OpenChannel(
bob, carol, lntemp.OpenChannelParams{Amt: chanAmt},
)
// Ensure that Alice has Carol in her routing table before proceeding.
ht.AssertTopologyChannelOpen(alice, chanPointBob)
cType := ht.GetChannelCommitType(alice, chanPointAlice)
commitFee := calcStaticFee(cType, 0)
assertBaseBalance := func() {
// Alice has opened a channel with Bob with zero push amount, so
// it's remote balance is zero.
// Alice has opened a channel with Bob with zero push amount,
// so it's remote balance is zero.
expBalanceAlice := &lnrpc.ChannelBalanceResponse{
LocalBalance: &lnrpc.Amount{
Sat: uint64(chanAmt - commitFee),
@ -57,7 +75,7 @@ func testHtlcErrorPropagation(net *lntest.NetworkHarness, t *harnessTest) {
// Deprecated fields.
Balance: int64(chanAmt - commitFee),
}
assertChannelBalanceResp(t, net.Alice, expBalanceAlice)
ht.AssertChannelBalanceResp(alice, expBalanceAlice)
// Bob has a channel with Alice and another with Carol, so it's
// local and remote balances are both chanAmt - commitFee.
@ -81,52 +99,21 @@ func testHtlcErrorPropagation(net *lntest.NetworkHarness, t *harnessTest) {
// Deprecated fields.
Balance: int64(chanAmt - commitFee),
}
assertChannelBalanceResp(t, net.Bob, expBalanceBob)
ht.AssertChannelBalanceResp(bob, expBalanceBob)
}
// Since we'd like to test some multi-hop failure scenarios, we'll
// introduce another node into our test network: Carol.
carol := net.NewNode(t.t, "Carol", nil)
// assertLinkFailure checks that the stream provided has a single link
// failure the failure detail provided.
assertLinkFailure := func(event *routerrpc.HtlcEvent,
failureDetail routerrpc.FailureDetail) {
// Next, we'll create a connection from Bob to Carol, and open a
// channel between them so we have the topology: Alice -> Bob -> Carol.
// The channel created will be of lower capacity that the one created
// above.
net.ConnectNodes(t.t, net.Bob, carol)
const bobChanAmt = funding.MaxBtcFundingAmount
chanPointBob := openChannelAndAssert(
t, net, net.Bob, carol,
lntest.OpenChannelParams{
Amt: chanAmt,
},
)
linkFail, ok := event.Event.(*routerrpc.HtlcEvent_LinkFailEvent)
require.Truef(ht, ok, "expected forwarding failure, got: %T",
linkFail)
// Ensure that Alice has Carol in her routing table before proceeding.
nodeInfoReq := &lnrpc.NodeInfoRequest{
PubKey: carol.PubKeyStr,
}
checkTableTimeout := time.After(time.Second * 10)
checkTableTicker := time.NewTicker(100 * time.Millisecond)
defer checkTableTicker.Stop()
out:
// TODO(roasbeef): make into async hook for node announcements
for {
select {
case <-checkTableTicker.C:
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
_, err := net.Alice.GetNodeInfo(ctxt, nodeInfoReq)
if err != nil && strings.Contains(err.Error(),
"unable to find") {
continue
}
break out
case <-checkTableTimeout:
t.Fatalf("carol's node announcement didn't propagate within " +
"the timeout period")
}
require.Equal(ht, failureDetail,
linkFail.LinkFailEvent.FailureDetail,
"wrong link fail detail")
}
// With the channels, open we can now start to test our multi-hop error
@ -137,61 +124,14 @@ out:
Memo: "kek99",
Value: payAmt,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to generate carol invoice: %v", err)
}
carolPayReq, err := carol.DecodePayReq(ctxb,
&lnrpc.PayReqString{
PayReq: carolInvoice.PaymentRequest,
})
if err != nil {
t.Fatalf("unable to decode generated payment request: %v", err)
}
// Before we send the payment, ensure that the announcement of the new
// channel has been processed by Alice.
if err := net.Alice.WaitForNetworkChannelOpen(chanPointBob); err != nil {
t.Fatalf("channel not seen by alice before timeout: %v", err)
}
// Before we start sending payments, subscribe to htlc events for each
// node.
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
aliceEvents, err := net.Alice.RouterClient.SubscribeHtlcEvents(
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
)
if err != nil {
t.Fatalf("could not subscribe events: %v", err)
}
assertSubscribed(t, aliceEvents)
bobEvents, err := net.Bob.RouterClient.SubscribeHtlcEvents(
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
)
if err != nil {
t.Fatalf("could not subscribe events: %v", err)
}
assertSubscribed(t, bobEvents)
carolEvents, err := carol.RouterClient.SubscribeHtlcEvents(
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
)
if err != nil {
t.Fatalf("could not subscribe events: %v", err)
}
assertSubscribed(t, carolEvents)
carolInvoice := carol.RPC.AddInvoice(invoiceReq)
carolPayReq := carol.RPC.DecodePayReq(carolInvoice.PaymentRequest)
// For the first scenario, we'll test the cancellation of an HTLC with
// an unknown payment hash.
// TODO(roasbeef): return failure response rather than failing entire
// stream on payment error.
sendReq := &routerrpc.SendPaymentRequest{
PaymentHash: makeFakePayHash(t),
PaymentHash: ht.Random32Bytes(),
Dest: carol.PubKey[:],
Amt: payAmt,
FinalCltvDelta: int32(carolPayReq.CltvExpiry),
@ -199,25 +139,56 @@ out:
FeeLimitMsat: noFeeLimitMsat,
MaxParts: 1,
}
sendAndAssertFailure(
t, net.Alice,
sendReq, lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS,
ht.SendPaymentAssertFail(
alice, sendReq,
lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, //nolint:lll
)
assertLastHTLCError(
t, net.Alice,
lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS,
ht.AssertLastHTLCError(
alice, lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS,
)
// assertAliceAndBob is a helper closure that asserts Alice and Bob
// each has one forward and one forward fail event, and Bob has the
// final htlc fail event.
assertAliceAndBob := func() {
ht.AssertHtlcEventTypes(
aliceEvents, routerrpc.HtlcEvent_SEND,
lntemp.HtlcEventForward,
)
ht.AssertHtlcEventTypes(
aliceEvents, routerrpc.HtlcEvent_SEND,
lntemp.HtlcEventForwardFail,
)
ht.AssertHtlcEventTypes(
bobEvents, routerrpc.HtlcEvent_FORWARD,
lntemp.HtlcEventForward,
)
ht.AssertHtlcEventTypes(
bobEvents, routerrpc.HtlcEvent_FORWARD,
lntemp.HtlcEventForwardFail,
)
ht.AssertHtlcEventTypes(
bobEvents, routerrpc.HtlcEvent_UNKNOWN,
lntemp.HtlcEventFinal,
)
}
// We expect alice and bob to each have one forward and one forward
// fail event at this stage.
assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_SEND, aliceEvents)
assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_FORWARD, bobEvents)
assertAliceAndBob()
// Carol should have a link failure because the htlc failed on her
// incoming link.
assertLinkFailure(
t, routerrpc.HtlcEvent_RECEIVE,
routerrpc.FailureDetail_UNKNOWN_INVOICE, carolEvents,
event := ht.AssertHtlcEventType(
carolEvents, routerrpc.HtlcEvent_RECEIVE,
)
assertLinkFailure(event, routerrpc.FailureDetail_UNKNOWN_INVOICE)
// There's also a final htlc event that gives the final outcome of the
// htlc.
ht.AssertHtlcEventTypes(
carolEvents, routerrpc.HtlcEvent_UNKNOWN, lntemp.HtlcEventFinal,
)
// The balances of all parties should be the same as initially since
@ -230,31 +201,34 @@ out:
sendReq = &routerrpc.SendPaymentRequest{
PaymentHash: carolInvoice.RHash,
Dest: carol.PubKey[:],
Amt: int64(htlcAmt.ToSatoshis()), // 10k satoshis are expected.
// 10k satoshis are expected.
Amt: int64(htlcAmt.ToSatoshis()),
FinalCltvDelta: int32(carolPayReq.CltvExpiry),
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
MaxParts: 1,
}
sendAndAssertFailure(
t, net.Alice,
sendReq, lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS,
ht.SendPaymentAssertFail(
alice, sendReq,
lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, //nolint:lll
)
assertLastHTLCError(
t, net.Alice,
lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS,
ht.AssertLastHTLCError(
alice, lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS,
)
// We expect alice and bob to each have one forward and one forward
// fail event at this stage.
assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_SEND, aliceEvents)
assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_FORWARD, bobEvents)
assertAliceAndBob()
// Carol should have a link failure because the htlc failed on her
// incoming link.
assertLinkFailure(
t, routerrpc.HtlcEvent_RECEIVE,
routerrpc.FailureDetail_INVOICE_UNDERPAID, carolEvents,
event = ht.AssertHtlcEventType(carolEvents, routerrpc.HtlcEvent_RECEIVE)
assertLinkFailure(event, routerrpc.FailureDetail_INVOICE_UNDERPAID)
// There's also a final htlc event that gives the final outcome of the
// htlc.
ht.AssertHtlcEventTypes(
carolEvents, routerrpc.HtlcEvent_UNKNOWN, lntemp.HtlcEventFinal,
)
// The balances of all parties should be the same as initially since
@ -284,29 +258,34 @@ out:
invoiceReq = &lnrpc.Invoice{
Value: toSend,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
carolInvoice2, err := carol.AddInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to generate carol invoice: %v", err)
}
carolInvoice2 := carol.RPC.AddInvoice(invoiceReq)
sendAndAssertSuccess(
t, net.Bob, &routerrpc.SendPaymentRequest{
req := &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice2.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
MaxParts: 1,
},
)
}
ht.SendPaymentAndAssertStatus(bob, req, lnrpc.Payment_SUCCEEDED)
// For each send bob makes, we need to check that bob has a
// forward and settle event for his send, and carol has a
// settle event for her receive.
assertHtlcEvents(
t, 1, 0, 1, routerrpc.HtlcEvent_SEND, bobEvents,
// settle event and a final htlc event for her receive.
ht.AssertHtlcEventTypes(
bobEvents, routerrpc.HtlcEvent_SEND,
lntemp.HtlcEventForward,
)
assertHtlcEvents(
t, 0, 0, 1, routerrpc.HtlcEvent_RECEIVE, carolEvents,
ht.AssertHtlcEventTypes(
bobEvents, routerrpc.HtlcEvent_SEND,
lntemp.HtlcEventSettle,
)
ht.AssertHtlcEventTypes(
carolEvents, routerrpc.HtlcEvent_RECEIVE,
lntemp.HtlcEventSettle,
)
ht.AssertHtlcEventTypes(
carolEvents, routerrpc.HtlcEvent_UNKNOWN,
lntemp.HtlcEventFinal,
)
amtSent += toSend
@ -318,11 +297,7 @@ out:
invoiceReq = &lnrpc.Invoice{
Value: 100000,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
carolInvoice3, err := carol.AddInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to generate carol invoice: %v", err)
}
carolInvoice3 := carol.RPC.AddInvoice(invoiceReq)
sendReq = &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice3.PaymentRequest,
@ -330,30 +305,37 @@ out:
FeeLimitMsat: noFeeLimitMsat,
MaxParts: 1,
}
sendAndAssertFailure(
t, net.Alice,
sendReq, lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE,
ht.SendPaymentAssertFail(
alice, sendReq,
lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE,
)
assertLastHTLCError(
t, net.Alice, lnrpc.Failure_TEMPORARY_CHANNEL_FAILURE,
ht.AssertLastHTLCError(
alice, lnrpc.Failure_TEMPORARY_CHANNEL_FAILURE,
)
// Alice should have a forwarding event and a forwarding failure.
assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_SEND, aliceEvents)
ht.AssertHtlcEventTypes(
aliceEvents, routerrpc.HtlcEvent_SEND,
lntemp.HtlcEventForward,
)
ht.AssertHtlcEventTypes(
aliceEvents, routerrpc.HtlcEvent_SEND,
lntemp.HtlcEventForwardFail,
)
// Bob should have a link failure because the htlc failed on his
// outgoing link.
assertLinkFailure(
t, routerrpc.HtlcEvent_FORWARD,
routerrpc.FailureDetail_INSUFFICIENT_BALANCE, bobEvents,
event = ht.AssertHtlcEventType(bobEvents, routerrpc.HtlcEvent_FORWARD)
assertLinkFailure(event, routerrpc.FailureDetail_INSUFFICIENT_BALANCE)
// There's also a final htlc event that gives the final outcome of the
// htlc.
ht.AssertHtlcEventTypes(
bobEvents, routerrpc.HtlcEvent_UNKNOWN, lntemp.HtlcEventFinal,
)
// Generate new invoice to not pay same invoice twice.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
carolInvoice, err = carol.AddInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to generate carol invoice: %v", err)
}
carolInvoice = carol.RPC.AddInvoice(invoiceReq)
// For our final test, we'll ensure that if a target link isn't
// available for what ever reason then the payment fails accordingly.
@ -361,77 +343,48 @@ out:
// We'll attempt to complete the original invoice we created with Carol
// above, but before we do so, Carol will go offline, resulting in a
// failed payment.
shutdownAndAssert(net, t, carol)
ht.Shutdown(carol)
// Reset mission control to forget the temporary channel failure above.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
_, err = net.Alice.RouterClient.ResetMissionControl(
ctxt, &routerrpc.ResetMissionControlRequest{},
)
if err != nil {
t.Fatalf("unable to reset mission control: %v", err)
}
alice.RPC.ResetMissionControl()
sendAndAssertFailure(
t, net.Alice,
&routerrpc.SendPaymentRequest{
req := &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
MaxParts: 1,
},
lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE,
}
ht.SendPaymentAssertFail(
alice, req, lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE,
)
assertLastHTLCError(t, net.Alice, lnrpc.Failure_UNKNOWN_NEXT_PEER)
ht.AssertLastHTLCError(alice, lnrpc.Failure_UNKNOWN_NEXT_PEER)
// Alice should have a forwarding event and subsequent fail.
assertHtlcEvents(t, 1, 1, 0, routerrpc.HtlcEvent_SEND, aliceEvents)
ht.AssertHtlcEventTypes(
aliceEvents, routerrpc.HtlcEvent_SEND,
lntemp.HtlcEventForward,
)
ht.AssertHtlcEventTypes(
aliceEvents, routerrpc.HtlcEvent_SEND,
lntemp.HtlcEventForwardFail,
)
// Bob should have a link failure because he could not find the next
// peer.
assertLinkFailure(
t, routerrpc.HtlcEvent_FORWARD,
routerrpc.FailureDetail_NO_DETAIL, bobEvents,
event = ht.AssertHtlcEventType(bobEvents, routerrpc.HtlcEvent_FORWARD)
assertLinkFailure(event, routerrpc.FailureDetail_NO_DETAIL)
// There's also a final htlc event that gives the final outcome of the
// htlc.
ht.AssertHtlcEventTypes(
bobEvents, routerrpc.HtlcEvent_UNKNOWN, lntemp.HtlcEventFinal,
)
// Finally, immediately close the channel. This function will also
// block until the channel is closed and will additionally assert the
// relevant channel closing post conditions.
closeChannelAndAssert(t, net, net.Alice, chanPointAlice, false)
ht.CloseChannel(alice, chanPointAlice)
// Force close Bob's final channel.
closeChannelAndAssert(t, net, net.Bob, chanPointBob, true)
// Cleanup by mining the force close and sweep transaction.
cleanupForceClose(t, net, net.Bob, chanPointBob)
}
// assertLinkFailure checks that the stream provided has a single link failure
// the the failure detail provided.
func assertLinkFailure(t *harnessTest,
eventType routerrpc.HtlcEvent_EventType,
failureDetail routerrpc.FailureDetail,
client routerrpc.Router_SubscribeHtlcEventsClient) {
event := assertEventAndType(t, eventType, client)
linkFail, ok := event.Event.(*routerrpc.HtlcEvent_LinkFailEvent)
if !ok {
t.Fatalf("expected forwarding failure, got: %T", linkFail)
}
if linkFail.LinkFailEvent.FailureDetail != failureDetail {
t.Fatalf("expected: %v, got: %v", failureDetail,
linkFail.LinkFailEvent.FailureDetail)
}
event = assertEventAndType(t, routerrpc.HtlcEvent_UNKNOWN, client)
finalHtlc, ok := event.Event.(*routerrpc.HtlcEvent_FinalHtlcEvent)
if !ok {
t.Fatalf("expected final htlc, got: %T", event.Event)
}
if finalHtlc.FinalHtlcEvent.Settled {
t.Fatalf("expected final fail")
}
ht.ForceCloseChannel(bob, chanPointBob)
}

View file

@ -1,146 +1,70 @@
package itest
import (
"context"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/stretchr/testify/require"
)
func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testMultiHopPayments(ht *lntemp.HarnessTest) {
const chanAmt = btcutil.Amount(100000)
var networkChans []*lnrpc.ChannelPoint
// Open a channel with 100k satoshis between Alice and Bob with Alice
// being the sole funder of the channel.
chanPointAlice := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
Amt: chanAmt,
},
)
networkChans = append(networkChans, chanPointAlice)
aliceChanTXID, err := lnrpc.GetChanPointFundingTxid(chanPointAlice)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
aliceFundPoint := wire.OutPoint{
Hash: *aliceChanTXID,
Index: chanPointAlice.OutputIndex,
}
// As preliminary setup, we'll create two new nodes: Carol and Dave,
// such that we now have a 4 node, 3 channel topology. Dave will make a
// channel with Alice, and Carol with Dave. After this setup, the
// network topology should now look like:
// Carol -> Dave -> Alice -> Bob
//
// First, we'll create Dave and establish a channel to Alice. Dave will
// be running an older node that requires the legacy onion payload.
alice, bob := ht.Alice, ht.Bob
daveArgs := []string{"--protocol.legacy.onion"}
dave := net.NewNode(t.t, "Dave", daveArgs)
defer shutdownAndAssert(net, t, dave)
dave := ht.NewNode("Dave", daveArgs)
carol := ht.NewNode("Carol", nil)
net.ConnectNodes(t.t, dave, net.Alice)
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, dave)
// Subscribe events early so we don't miss it out.
aliceEvents := alice.RPC.SubscribeHtlcEvents()
bobEvents := bob.RPC.SubscribeHtlcEvents()
carolEvents := carol.RPC.SubscribeHtlcEvents()
daveEvents := dave.RPC.SubscribeHtlcEvents()
chanPointDave := openChannelAndAssert(
t, net, dave, net.Alice,
lntest.OpenChannelParams{
Amt: chanAmt,
},
// Once subscribed, the first event will be UNKNOWN.
ht.AssertHtlcEventType(aliceEvents, routerrpc.HtlcEvent_UNKNOWN)
ht.AssertHtlcEventType(bobEvents, routerrpc.HtlcEvent_UNKNOWN)
ht.AssertHtlcEventType(carolEvents, routerrpc.HtlcEvent_UNKNOWN)
ht.AssertHtlcEventType(daveEvents, routerrpc.HtlcEvent_UNKNOWN)
// Connect the nodes.
ht.ConnectNodes(dave, alice)
ht.ConnectNodes(carol, dave)
// Open a channel with 100k satoshis between Alice and Bob with Alice
// being the sole funder of the channel.
chanPointAlice := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt},
)
// We'll create Dave and establish a channel to Alice. Dave will be
// running an older node that requires the legacy onion payload.
ht.FundCoins(btcutil.SatoshiPerBitcoin, dave)
chanPointDave := ht.OpenChannel(
dave, alice, lntemp.OpenChannelParams{Amt: chanAmt},
)
networkChans = append(networkChans, chanPointDave)
daveChanTXID, err := lnrpc.GetChanPointFundingTxid(chanPointDave)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
daveFundPoint := wire.OutPoint{
Hash: *daveChanTXID,
Index: chanPointDave.OutputIndex,
}
// Next, we'll create Carol and establish a channel to from her to
// Dave.
carol := net.NewNode(t.t, "Carol", nil)
defer shutdownAndAssert(net, t, carol)
net.ConnectNodes(t.t, carol, dave)
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol)
chanPointCarol := openChannelAndAssert(
t, net, carol, dave,
lntest.OpenChannelParams{
Amt: chanAmt,
},
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
chanPointCarol := ht.OpenChannel(
carol, dave, lntemp.OpenChannelParams{Amt: chanAmt},
)
networkChans = append(networkChans, chanPointCarol)
carolChanTXID, err := lnrpc.GetChanPointFundingTxid(chanPointCarol)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
carolFundPoint := wire.OutPoint{
Hash: *carolChanTXID,
Index: chanPointCarol.OutputIndex,
}
// Wait for all nodes to have seen all channels.
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave}
nodeNames := []string{"Alice", "Bob", "Carol", "Dave"}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
point := wire.OutPoint{
Hash: *txid,
Index: chanPoint.OutputIndex,
}
err = node.WaitForNetworkChannelOpen(chanPoint)
if err != nil {
t.Fatalf("%s(%d): timeout waiting for "+
"channel(%s) open: %v", nodeNames[i],
node.NodeID, point, err)
}
}
}
// Create 5 invoices for Bob, which expect a payment from Carol for 1k
// satoshis with a different preimage each time.
const numPayments = 5
const paymentAmt = 1000
payReqs, _, _, err := createPayReqs(
net.Bob, paymentAmt, numPayments,
)
if err != nil {
t.Fatalf("unable to create pay reqs: %v", err)
}
// We'll wait for all parties to recognize the new channels within the
// network.
err = dave.WaitForNetworkChannelOpen(chanPointDave)
if err != nil {
t.Fatalf("dave didn't advertise his channel: %v", err)
}
err = carol.WaitForNetworkChannelOpen(chanPointCarol)
if err != nil {
t.Fatalf("carol didn't advertise her channel in time: %v",
err)
}
time.Sleep(time.Millisecond * 50)
payReqs, _, _ := ht.CreatePayReqs(bob, paymentAmt, numPayments)
// Set the fee policies of the Alice -> Bob and the Dave -> Alice
// channel edges to relatively large non default values. This makes it
@ -149,61 +73,21 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
const aliceBaseFeeSat = 1
const aliceFeeRatePPM = 100000
updateChannelPolicy(
t, net.Alice, chanPointAlice, aliceBaseFeeSat*1000,
aliceFeeRatePPM, chainreg.DefaultBitcoinTimeLockDelta, maxHtlc,
carol,
ht, alice, chanPointAlice, aliceBaseFeeSat*1000,
aliceFeeRatePPM, chainreg.DefaultBitcoinTimeLockDelta,
maxHtlc, carol,
)
const daveBaseFeeSat = 5
const daveFeeRatePPM = 150000
updateChannelPolicy(
t, dave, chanPointDave, daveBaseFeeSat*1000, daveFeeRatePPM,
ht, dave, chanPointDave, daveBaseFeeSat*1000, daveFeeRatePPM,
chainreg.DefaultBitcoinTimeLockDelta, maxHtlc, carol,
)
// Before we start sending payments, subscribe to htlc events for each
// node.
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
aliceEvents, err := net.Alice.RouterClient.SubscribeHtlcEvents(
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
)
if err != nil {
t.Fatalf("could not subscribe events: %v", err)
}
assertSubscribed(t, aliceEvents)
bobEvents, err := net.Bob.RouterClient.SubscribeHtlcEvents(
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
)
if err != nil {
t.Fatalf("could not subscribe events: %v", err)
}
assertSubscribed(t, bobEvents)
carolEvents, err := carol.RouterClient.SubscribeHtlcEvents(
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
)
if err != nil {
t.Fatalf("could not subscribe events: %v", err)
}
assertSubscribed(t, carolEvents)
daveEvents, err := dave.RouterClient.SubscribeHtlcEvents(
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
)
if err != nil {
t.Fatalf("could not subscribe events: %v", err)
}
assertSubscribed(t, daveEvents)
// Using Carol as the source, pay to the 5 invoices from Bob created
// above.
err = completePaymentRequests(carol, carol.RouterClient, payReqs, true)
if err != nil {
t.Fatalf("unable to send payments: %v", err)
}
ht.CompletePaymentRequests(carol, payReqs)
// At this point all the channels within our proto network should be
// shifted by 5k satoshis in the direction of Bob, the sink within the
@ -215,10 +99,10 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
// The final node bob expects to get paid five times 1000 sat.
expectedAmountPaidAtoB := int64(numPayments * paymentAmt)
assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Bob,
aliceFundPoint, int64(0), expectedAmountPaidAtoB)
assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Alice,
aliceFundPoint, expectedAmountPaidAtoB, int64(0))
ht.AssertAmountPaid("Alice(local) => Bob(remote)", bob,
chanPointAlice, int64(0), expectedAmountPaidAtoB)
ht.AssertAmountPaid("Alice(local) => Bob(remote)", alice,
chanPointAlice, expectedAmountPaidAtoB, int64(0))
// To forward a payment of 1000 sat, Alice is charging a fee of
// 1 sat + 10% = 101 sat.
@ -229,10 +113,10 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
// Dave needs to pay what Alice pays plus Alice's fee.
expectedAmountPaidDtoA := expectedAmountPaidAtoB + expectedFeeAlice
assertAmountPaid(t, "Dave(local) => Alice(remote)", net.Alice,
daveFundPoint, int64(0), expectedAmountPaidDtoA)
assertAmountPaid(t, "Dave(local) => Alice(remote)", dave,
daveFundPoint, expectedAmountPaidDtoA, int64(0))
ht.AssertAmountPaid("Dave(local) => Alice(remote)", alice,
chanPointDave, int64(0), expectedAmountPaidDtoA)
ht.AssertAmountPaid("Dave(local) => Alice(remote)", dave,
chanPointDave, expectedAmountPaidDtoA, int64(0))
// To forward a payment of 1101 sat, Dave is charging a fee of
// 5 sat + 15% = 170.15 sat. This is rounded down in rpcserver to 170.
@ -244,10 +128,10 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
// Carol needs to pay what Dave pays plus Dave's fee.
expectedAmountPaidCtoD := expectedAmountPaidDtoA + expectedFeeDave
assertAmountPaid(t, "Carol(local) => Dave(remote)", dave,
carolFundPoint, int64(0), expectedAmountPaidCtoD)
assertAmountPaid(t, "Carol(local) => Dave(remote)", carol,
carolFundPoint, expectedAmountPaidCtoD, int64(0))
ht.AssertAmountPaid("Carol(local) => Dave(remote)", dave,
chanPointCarol, int64(0), expectedAmountPaidCtoD)
ht.AssertAmountPaid("Carol(local) => Dave(remote)", carol,
chanPointCarol, expectedAmountPaidCtoD, int64(0))
// Now that we know all the balances have been settled out properly,
// we'll ensure that our internal record keeping for completed circuits
@ -256,206 +140,92 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
// First, check that the FeeReport response shows the proper fees
// accrued over each time range. Dave should've earned 170 satoshi for
// each of the forwarded payments.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
feeReport, err := dave.FeeReport(ctxt, &lnrpc.FeeReportRequest{})
require.NoError(t.t, err)
require.EqualValues(t.t, expectedFeeDave, feeReport.DayFeeSum)
require.EqualValues(t.t, expectedFeeDave, feeReport.WeekFeeSum)
require.EqualValues(t.t, expectedFeeDave, feeReport.MonthFeeSum)
ht.AssertFeeReport(
dave, expectedFeeDave, expectedFeeDave, expectedFeeDave,
)
// Next, ensure that if we issue the vanilla query for the forwarding
// history, it returns 5 values, and each entry is formatted properly.
// From David's perspective he receives a payement from Carol and
// forwards it to Alice. So let's ensure that the forwarding history
// returns Carol's peer alias as inbound and Alice's alias as outbound.
info, err := carol.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
require.NoError(t.t, err)
info := carol.RPC.GetInfo()
carolAlias := info.Alias
info, err = net.Alice.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
require.NoError(t.t, err)
info = alice.RPC.GetInfo()
aliceAlias := info.Alias
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
fwdingHistory, err := dave.ForwardingHistory(
ctxt, &lnrpc.ForwardingHistoryRequest{},
)
require.NoError(t.t, err)
require.Len(t.t, fwdingHistory.ForwardingEvents, numPayments)
fwdingHistory := dave.RPC.ForwardingHistory(nil)
require.Len(ht, fwdingHistory.ForwardingEvents, numPayments)
expectedForwardingFee := uint64(expectedFeeDave / numPayments)
for _, event := range fwdingHistory.ForwardingEvents {
// Each event should show a fee of 170 satoshi.
require.Equal(t.t, expectedForwardingFee, event.Fee)
require.Equal(ht, expectedForwardingFee, event.Fee)
// Check that peer aliases are empty since the
// ForwardingHistoryRequest did not specify the PeerAliasLookup
// flag.
require.Empty(t.t, event.PeerAliasIn)
require.Empty(t.t, event.PeerAliasOut)
require.Empty(ht, event.PeerAliasIn)
require.Empty(ht, event.PeerAliasOut)
}
// Lookup the forwarding history again but this time also lookup the
// peers' alias names.
fwdingHistory, err = dave.ForwardingHistory(
ctxt, &lnrpc.ForwardingHistoryRequest{
fwdingHistory = dave.RPC.ForwardingHistory(
&lnrpc.ForwardingHistoryRequest{
PeerAliasLookup: true,
},
)
require.NoError(t.t, err)
require.Len(t.t, fwdingHistory.ForwardingEvents, numPayments)
require.Len(ht, fwdingHistory.ForwardingEvents, numPayments)
for _, event := range fwdingHistory.ForwardingEvents {
// Each event should show a fee of 170 satoshi.
require.Equal(t.t, expectedForwardingFee, event.Fee)
require.Equal(ht, expectedForwardingFee, event.Fee)
// Check that peer aliases adhere to payment flow, namely
// Carol->Dave->Alice.
require.Equal(t.t, carolAlias, event.PeerAliasIn)
require.Equal(t.t, aliceAlias, event.PeerAliasOut)
require.Equal(ht, carolAlias, event.PeerAliasIn)
require.Equal(ht, aliceAlias, event.PeerAliasOut)
}
// We expect Carol to have successful forwards and settles for
// her sends.
assertHtlcEvents(
t, numPayments, 0, numPayments, routerrpc.HtlcEvent_SEND,
carolEvents,
ht.AssertHtlcEvents(
carolEvents, numPayments, 0, numPayments,
routerrpc.HtlcEvent_SEND,
)
// Dave and Alice should both have forwards and settles for
// their role as forwarding nodes.
assertHtlcEvents(
t, numPayments, 0, numPayments, routerrpc.HtlcEvent_FORWARD,
daveEvents,
ht.AssertHtlcEvents(
daveEvents, numPayments, 0, numPayments,
routerrpc.HtlcEvent_FORWARD,
)
assertHtlcEvents(
t, numPayments, 0, numPayments, routerrpc.HtlcEvent_FORWARD,
aliceEvents,
ht.AssertHtlcEvents(
aliceEvents, numPayments, 0, numPayments,
routerrpc.HtlcEvent_FORWARD,
)
// Bob should only have settle events for his receives.
assertHtlcEvents(
t, 0, 0, numPayments, routerrpc.HtlcEvent_RECEIVE, bobEvents,
ht.AssertHtlcEvents(
bobEvents, 0, 0, numPayments, routerrpc.HtlcEvent_RECEIVE,
)
closeChannelAndAssert(t, net, net.Alice, chanPointAlice, false)
closeChannelAndAssert(t, net, dave, chanPointDave, false)
closeChannelAndAssert(t, net, carol, chanPointCarol, false)
// Finally, close all channels.
ht.CloseChannel(alice, chanPointAlice)
ht.CloseChannel(dave, chanPointDave)
ht.CloseChannel(carol, chanPointCarol)
}
// assertHtlcEvents consumes events from a client and ensures that they are of
// the expected type and contain the expected number of forwards, forward
// failures and settles.
func assertHtlcEvents(t *harnessTest, fwdCount, fwdFailCount, settleCount int,
userType routerrpc.HtlcEvent_EventType,
client routerrpc.Router_SubscribeHtlcEventsClient) {
var forwards, forwardFails, settles, finalSettles, finalFails int
var finalFailCount, finalSettleCount int
if userType != routerrpc.HtlcEvent_SEND {
finalFailCount = fwdFailCount
finalSettleCount = settleCount
}
numEvents := fwdCount + fwdFailCount + settleCount +
finalFailCount + finalSettleCount
for i := 0; i < numEvents; i++ {
event, err := client.Recv()
if err != nil {
t.Fatalf("could not get event")
}
expectedEventType := userType
switch e := event.Event.(type) {
case *routerrpc.HtlcEvent_ForwardEvent:
forwards++
case *routerrpc.HtlcEvent_ForwardFailEvent:
forwardFails++
case *routerrpc.HtlcEvent_SettleEvent:
settles++
case *routerrpc.HtlcEvent_FinalHtlcEvent:
if e.FinalHtlcEvent.Settled {
finalSettles++
} else {
finalFails++
}
expectedEventType = routerrpc.HtlcEvent_UNKNOWN
default:
t.Fatalf("unexpected event: %T", event.Event)
}
if event.EventType != expectedEventType {
t.Fatalf("expected: %v, got: %v", expectedEventType,
event.EventType)
}
}
if forwards != fwdCount {
t.Fatalf("expected: %v forwards, got: %v", fwdCount, forwards)
}
if forwardFails != fwdFailCount {
t.Fatalf("expected: %v forward fails, got: %v", fwdFailCount,
forwardFails)
}
if finalFails != finalFailCount {
t.Fatalf("expected: %v final fails, got: %v", finalFailCount,
finalFails)
}
if settles != settleCount {
t.Fatalf("expected: %v settles, got: %v", settleCount, settles)
}
if finalSettles != finalSettleCount {
t.Fatalf("expected: %v settles, got: %v", finalSettleCount,
finalSettles)
}
}
// assertEventAndType reads an event from the stream provided and ensures that
// it is associated with the correct user related type - a user initiated send,
// a receive to our node or a forward through our node. Note that this event
// type is different from the htlc event type (forward, link failure etc).
func assertEventAndType(t *harnessTest, eventType routerrpc.HtlcEvent_EventType,
client routerrpc.Router_SubscribeHtlcEventsClient) *routerrpc.HtlcEvent {
event, err := client.Recv()
if err != nil {
t.Fatalf("could not get event")
}
if event.EventType != eventType {
t.Fatalf("expected: %v, got: %v", eventType,
event.EventType)
}
return event
}
func assertSubscribed(t *harnessTest,
client routerrpc.Router_SubscribeHtlcEventsClient) {
event, err := client.Recv()
require.NoError(t.t, err)
require.NotNil(t.t, event.GetSubscribedEvent())
}
// updateChannelPolicy updates the channel policy of node to the
// given fees and timelock delta. This function blocks until
// listenerNode has received the policy update.
func updateChannelPolicy(t *harnessTest, node *lntest.HarnessNode,
chanPoint *lnrpc.ChannelPoint, baseFee int64, feeRate int64,
timeLockDelta uint32, maxHtlc uint64, listenerNode *lntest.HarnessNode) {
ctxb := context.Background()
// updateChannelPolicy updates the channel policy of node to the given fees and
// timelock delta. This function blocks until listenerNode has received the
// policy update.
//
// NOTE: only used in current test.
func updateChannelPolicy(ht *lntemp.HarnessTest, hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint, baseFee int64,
feeRate int64, timeLockDelta uint32,
maxHtlc uint64, listenerNode *node.HarnessNode) {
expectedPolicy := &lnrpc.RoutingPolicy{
FeeBaseMsat: baseFee,
@ -475,14 +245,10 @@ func updateChannelPolicy(t *harnessTest, node *lntest.HarnessNode,
MaxHtlcMsat: maxHtlc,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
if _, err := node.UpdateChannelPolicy(ctxt, updateFeeReq); err != nil {
t.Fatalf("unable to update chan policy: %v", err)
}
hn.RPC.UpdateChannelPolicy(updateFeeReq)
// Wait for listener node to receive the channel update from node.
assertChannelPolicyUpdate(
t.t, listenerNode, node.PubKeyStr,
expectedPolicy, chanPoint, false,
ht.AssertChannelPolicyUpdate(
listenerNode, hn, expectedPolicy, chanPoint, false,
)
}

View file

@ -3,10 +3,8 @@
package itest
import (
"github.com/lightningnetwork/lnd/lntest"
)
import "github.com/lightningnetwork/lnd/lntemp"
// testEtcdFailover is an empty itest when LND is not compiled with etcd
// support.
func testEtcdFailover(net *lntest.NetworkHarness, ht *harnessTest) {}
func testEtcdFailover(ht *lntemp.HarnessTest) {}

View file

@ -4,8 +4,6 @@ import (
"bytes"
"context"
"fmt"
"strings"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
@ -15,6 +13,7 @@ import (
"github.com/lightningnetwork/lnd/lnrpc/chainrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lnwallet"
@ -233,17 +232,17 @@ func runCPFP(net *lntest.NetworkHarness, t *harnessTest,
// testAnchorReservedValue tests that we won't allow sending transactions when
// that would take the value we reserve for anchor fee bumping out of our
// wallet.
func testAnchorReservedValue(net *lntest.NetworkHarness, t *harnessTest) {
func testAnchorReservedValue(ht *lntemp.HarnessTest) {
// Start two nodes supporting anchor channels.
args := nodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
alice := net.NewNode(t.t, "Alice", args)
defer shutdownAndAssert(net, t, alice)
bob := net.NewNode(t.t, "Bob", args)
defer shutdownAndAssert(net, t, bob)
// NOTE: we cannot reuse the standby node here as the test requires the
// node to start with no UTXOs.
alice := ht.NewNode("Alice", args)
bob := ht.Bob
ht.RestartNodeWithExtraArgs(bob, args)
ctxb := context.Background()
net.ConnectNodes(t.t, alice, bob)
ht.ConnectNodes(alice, bob)
// Send just enough coins for Alice to open a channel without a change
// output.
@ -252,100 +251,73 @@ func testAnchorReservedValue(net *lntest.NetworkHarness, t *harnessTest) {
feeEst = 8000
)
net.SendCoins(t.t, chanAmt+feeEst, alice)
ht.FundCoins(chanAmt+feeEst, alice)
// wallet, without a change output. This should not be allowed.
resErr := lnwallet.ErrReservedValueInvalidated.Error()
_, err := net.OpenChannel(
alice, bob, lntest.OpenChannelParams{
ht.OpenChannelAssertErr(
alice, bob, lntemp.OpenChannelParams{
Amt: chanAmt,
},
}, lnwallet.ErrReservedValueInvalidated,
)
if err == nil || !strings.Contains(err.Error(), resErr) {
t.Fatalf("expected failure, got: %v", err)
}
// Alice opens a smaller channel. This works since it will have a
// change output.
aliceChanPoint1 := openChannelAndAssert(
t, net, alice, bob, lntest.OpenChannelParams{
Amt: chanAmt / 4,
},
chanPoint1 := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt / 4},
)
// If Alice tries to open another anchor channel to Bob, Bob should not
// reject it as he is not contributing any funds.
aliceChanPoint2 := openChannelAndAssert(
t, net, alice, bob, lntest.OpenChannelParams{
Amt: chanAmt / 4,
},
chanPoint2 := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt / 4},
)
// Similarly, if Alice tries to open a legacy channel to Bob, Bob should
// not reject it as he is not contributing any funds. We'll restart Bob
// to remove his support for anchors.
err = net.RestartNode(bob, nil)
require.NoError(t.t, err)
aliceChanPoint3 := openChannelAndAssert(
t, net, alice, bob, lntest.OpenChannelParams{
Amt: chanAmt / 4,
},
// Similarly, if Alice tries to open a legacy channel to Bob, Bob
// should not reject it as he is not contributing any funds. We'll
// restart Bob to remove his support for anchors.
ht.RestartNode(bob)
// Before opening the channel, make sure the nodes are connected.
ht.EnsureConnected(alice, bob)
chanPoint3 := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt / 4},
)
chanPoints := []*lnrpc.ChannelPoint{
aliceChanPoint1, aliceChanPoint2, aliceChanPoint3,
}
for _, chanPoint := range chanPoints {
err = alice.WaitForNetworkChannelOpen(chanPoint)
require.NoError(t.t, err)
err = bob.WaitForNetworkChannelOpen(chanPoint)
require.NoError(t.t, err)
}
chanPoints := []*lnrpc.ChannelPoint{chanPoint1, chanPoint2, chanPoint3}
// Alice tries to send all coins to an internal address. This is
// allowed, since the final wallet balance will still be above the
// reserved value.
addrReq := &lnrpc.NewAddressRequest{
req := &lnrpc.NewAddressRequest{
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
resp, err := alice.NewAddress(ctxt, addrReq)
require.NoError(t.t, err)
resp := alice.RPC.NewAddress(req)
sweepReq := &lnrpc.SendCoinsRequest{
Addr: resp.Address,
SendAll: true,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
_, err = alice.SendCoins(ctxt, sweepReq)
require.NoError(t.t, err)
alice.RPC.SendCoins(sweepReq)
block := mineBlocks(t, net, 1, 1)[0]
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
assertNumTxInAndTxOut := func(tx *wire.MsgTx, in, out int) {
require.Len(ht, tx.TxIn, in, "num inputs not matched")
require.Len(ht, tx.TxOut, out, "num outputs not matched")
}
// The sweep transaction should have exactly one input, the change from
// the previous SendCoins call.
sweepTx := block.Transactions[1]
if len(sweepTx.TxIn) != 1 {
t.Fatalf("expected 1 inputs instead have %v", len(sweepTx.TxIn))
}
// It should have a single output.
if len(sweepTx.TxOut) != 1 {
t.Fatalf("expected 1 output instead have %v", len(sweepTx.TxOut))
}
assertNumTxInAndTxOut(sweepTx, 1, 1)
// Wait for Alice to see her balance as confirmed.
waitForConfirmedBalance := func() int64 {
var balance int64
err := wait.NoError(func() error {
req := &lnrpc.WalletBalanceRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
resp, err := alice.WalletBalance(ctxt, req)
if err != nil {
return err
}
resp := alice.RPC.WalletBalance()
if resp.TotalBalance == 0 {
return fmt.Errorf("no balance")
@ -358,93 +330,51 @@ func testAnchorReservedValue(net *lntest.NetworkHarness, t *harnessTest) {
balance = resp.TotalBalance
return nil
}, defaultTimeout)
require.NoError(t.t, err)
require.NoError(ht, err, "timeout checking alice's balance")
return balance
}
_ = waitForConfirmedBalance()
waitForConfirmedBalance()
// Alice tries to send all funds to an external address, the reserved
// value must stay in her wallet.
minerAddr, err := net.Miner.NewAddress()
require.NoError(t.t, err)
minerAddr := ht.Miner.NewMinerAddress()
sweepReq = &lnrpc.SendCoinsRequest{
Addr: minerAddr.String(),
SendAll: true,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
_, err = alice.SendCoins(ctxt, sweepReq)
require.NoError(t.t, err)
alice.RPC.SendCoins(sweepReq)
// We'll mine a block which should include the sweep transaction we
// generated above.
block = mineBlocks(t, net, 1, 1)[0]
block = ht.MineBlocksAndAssertNumTxes(1, 1)[0]
// The sweep transaction should have exactly one inputs as we only had
// the single output from above in the wallet.
sweepTx = block.Transactions[1]
if len(sweepTx.TxIn) != 1 {
t.Fatalf("expected 1 inputs instead have %v", len(sweepTx.TxIn))
}
// It should have two outputs, one being the miner address, the other
// one being the reserve going back to our wallet.
if len(sweepTx.TxOut) != 2 {
t.Fatalf("expected 2 outputs instead have %v", len(sweepTx.TxOut))
}
assertNumTxInAndTxOut(sweepTx, 1, 2)
// The reserved value is now back in Alice's wallet.
aliceBalance := waitForConfirmedBalance()
// The reserved value should be equal to the required reserve for anchor
// channels.
walletBalanceResp, err := alice.WalletBalance(
ctxb, &lnrpc.WalletBalanceRequest{},
)
require.NoError(t.t, err)
require.Equal(
t.t, aliceBalance, walletBalanceResp.ReservedBalanceAnchorChan,
)
additionalChannels := int64(1)
// Required reserve when additional channels are provided.
requiredReserveResp, err := alice.WalletKitClient.RequiredReserve(
ctxb, &walletrpc.RequiredReserveRequest{
AdditionalPublicChannels: uint32(additionalChannels),
},
)
require.NoError(t.t, err)
additionalReservedValue := btcutil.Amount(additionalChannels *
int64(lnwallet.AnchorChanReservedValue))
totalReserved := btcutil.Amount(aliceBalance) + additionalReservedValue
// The total reserved value should not exceed the maximum value reserved
// for anchor channels.
if totalReserved > lnwallet.MaxAnchorChanReservedValue {
totalReserved = lnwallet.MaxAnchorChanReservedValue
}
require.Equal(
t.t, int64(totalReserved), requiredReserveResp.RequiredReserve,
)
// Alice closes channel, should now be allowed to send everything to an
// external address.
for _, chanPoint := range chanPoints {
closeChannelAndAssert(t, net, alice, chanPoint, false)
ht.CloseChannel(alice, chanPoint)
}
newBalance := waitForConfirmedBalance()
if newBalance <= aliceBalance {
t.Fatalf("Alice's balance did not increase after channel close")
}
require.Greater(ht, newBalance, aliceBalance,
"Alice's balance did not increase after channel close")
// Assert there are no open or pending channels anymore.
assertNumPendingChannels(t, alice, 0, 0)
assertNodeNumChannels(t, alice, 0)
ht.AssertNumWaitingClose(alice, 0)
ht.AssertNodeNumChannels(alice, 0)
// We'll wait for the balance to reflect that the channel has been
// closed and the funds are in the wallet.
@ -452,32 +382,205 @@ func testAnchorReservedValue(net *lntest.NetworkHarness, t *harnessTest) {
Addr: minerAddr.String(),
SendAll: true,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
_, err = alice.SendCoins(ctxt, sweepReq)
require.NoError(t.t, err)
alice.RPC.SendCoins(sweepReq)
// We'll mine a block which should include the sweep transaction we
// generated above.
block = mineBlocks(t, net, 1, 1)[0]
block = ht.MineBlocksAndAssertNumTxes(1, 1)[0]
// The sweep transaction should have four inputs, the change output from
// the previous sweep, and the outputs from the coop closed channels.
sweepTx = block.Transactions[1]
if len(sweepTx.TxIn) != 4 {
t.Fatalf("expected 4 inputs instead have %v", len(sweepTx.TxIn))
}
// It should have a single output.
if len(sweepTx.TxOut) != 1 {
t.Fatalf("expected 1 output instead have %v", len(sweepTx.TxOut))
assertNumTxInAndTxOut(sweepTx, 4, 1)
}
// testAnchorThirdPartySpend tests that if we force close a channel, but then
// don't sweep the anchor in time and a 3rd party spends it, that we remove any
// transactions that are a descendent of that sweep.
func testAnchorThirdPartySpend(ht *lntemp.HarnessTest) {
// First, we'll create two new nodes that both default to anchor
// channels.
//
// NOTE: The itests differ here as anchors is default off vs the normal
// lnd binary.
args := nodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
alice := ht.NewNode("Alice", args)
defer ht.Shutdown(alice)
bob := ht.NewNode("Bob", args)
defer ht.Shutdown(bob)
ht.EnsureConnected(alice, bob)
// We'll fund our Alice with coins, as she'll be opening the channel.
// We'll fund her with *just* enough coins to open the channel and
// sweep the anchor.
const (
firstChanSize = 1_000_000
anchorFeeBuffer = 500_000
)
ht.FundCoins(firstChanSize+anchorFeeBuffer, alice)
// Open the channel between the two nodes and wait for it to confirm
// fully.
aliceChanPoint1 := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{
Amt: firstChanSize,
},
)
// Send another UTXO if this is a neutrino backend. When sweeping
// anchors, there are two transactions created, `local_sweep_tx` for
// sweeping Alice's anchor on the local commitment, `remote_sweep_tx`
// for sweeping her anchor on the remote commitment. Whenever the force
// close transaction is published, Alice will always create these two
// transactions to sweep her anchor.
// On the other hand, when creating the sweep txes, the anchor itself
// is not able to cover the fee, so another wallet UTXO is needed. In
// our test case, there's a change output that can be used from the
// above funding process. And it's used by both sweep txes - when `lnd`
// happens to create the `remote_sweep_tx` first, it will receive an
// error since its parent tx, the remote commitment, is not known,
// hence freeing the change output to be used by `local_sweep_tx`.
// For neutrino client, however, it will consider the transaction which
// sweeps the remote anchor as an orphan tx, and it will neither send
// it to the mempool nor return an error to free the change output.
// Thus, if the change output is already used in `remote_sweep_tx`, we
// won't have UTXO to create `local_sweep_tx`.
//
// NOTE: the order of the sweep requests for the two anchors cannot be
// guaranteed. If the sweeper happens to sweep the remote anchor first,
// then the test won't pass without the extra UTXO, which is the source
// of the flakeness.
//
// TODO(yy): make a RPC server for sweeper so we can explicitly check
// and control its state.
if ht.IsNeutrinoBackend() {
ht.FundCoins(anchorFeeBuffer, alice)
}
// With the channel open, we'll actually immediately force close it. We
// don't care about network announcements here since there's no routing
// in this test.
ht.CloseChannelAssertPending(alice, aliceChanPoint1, true)
// Now that the channel has been force closed, it should show up in the
// PendingChannels RPC under the waiting close section.
waitingClose := ht.AssertChannelWaitingClose(alice, aliceChanPoint1)
// At this point, the channel is waiting close, and we have both the
// commitment transaction and anchor sweep in the mempool.
const expectedTxns = 2
sweepTxns := ht.Miner.GetNumTxsFromMempool(expectedTxns)
aliceCloseTx := waitingClose.Commitments.LocalTxid
_, aliceAnchor := ht.FindCommitAndAnchor(sweepTxns, aliceCloseTx)
// We'll now mine _only_ the commitment force close transaction, as we
// want the anchor sweep to stay unconfirmed.
forceCloseTxID, _ := chainhash.NewHashFromStr(aliceCloseTx)
commitTxn := ht.Miner.GetRawTransaction(forceCloseTxID)
ht.Miner.MineBlockWithTxes([]*btcutil.Tx{commitTxn})
// With the anchor output located, and the main commitment mined we'll
// instruct the wallet to send all coins in the wallet to a new address
// (to the miner), including unconfirmed change.
minerAddr := ht.Miner.NewMinerAddress()
sweepReq := &lnrpc.SendCoinsRequest{
Addr: minerAddr.String(),
SendAll: true,
MinConfs: 0,
SpendUnconfirmed: true,
}
sweepAllResp := alice.RPC.SendCoins(sweepReq)
// Both the original anchor sweep transaction, as well as the
// transaction we created to sweep all the coins from Alice's wallet
// should be found in her transaction store.
sweepAllTxID, _ := chainhash.NewHashFromStr(sweepAllResp.Txid)
ht.AssertTransactionInWallet(alice, aliceAnchor.SweepTx.TxHash())
ht.AssertTransactionInWallet(alice, *sweepAllTxID)
// Next, we'll shutdown Alice, and allow 16 blocks to pass so that the
// anchor output can be swept by anyone. Rather than use the normal API
// call, we'll generate a series of _empty_ blocks here.
aliceRestart := ht.SuspendNode(alice)
const anchorCsv = 16
ht.MineEmptyBlocks(anchorCsv)
// Before we sweep the anchor, we'll restart Alice.
require.NoErrorf(ht, aliceRestart(), "unable to restart alice")
// Now that the channel has been closed, and Alice has an unconfirmed
// transaction spending the output produced by her anchor sweep, we'll
// mine a transaction that double spends the output.
thirdPartyAnchorSweep := genAnchorSweep(ht, aliceAnchor, anchorCsv)
ht.Miner.MineBlockWithTxes([]*btcutil.Tx{thirdPartyAnchorSweep})
// At this point, we should no longer find Alice's transaction that
// tried to sweep the anchor in her wallet.
ht.AssertTransactionNotInWallet(alice, aliceAnchor.SweepTx.TxHash())
// In addition, the transaction she sent to sweep all her coins to the
// miner also should no longer be found.
ht.AssertTransactionNotInWallet(alice, *sweepAllTxID)
// The anchor should now show as being "lost", while the force close
// response is still present.
assertAnchorOutputLost(ht, alice, aliceChanPoint1)
// At this point Alice's CSV output should already be fully spent and
// the channel marked as being resolved. We mine a block first, as so
// far we've been generating custom blocks this whole time.
commitSweepOp := wire.OutPoint{
Hash: *forceCloseTxID,
Index: 1,
}
ht.Miner.AssertOutpointInMempool(commitSweepOp)
ht.MineBlocks(1)
ht.AssertNumWaitingClose(alice, 0)
}
// assertAnchorOutputLost asserts that the anchor output for the given channel
// has the state of being lost.
func assertAnchorOutputLost(ht *lntemp.HarnessTest, hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint) {
cp := ht.OutPointFromChannelPoint(chanPoint)
expected := lnrpc.PendingChannelsResponse_ForceClosedChannel_LOST
err := wait.NoError(func() error {
resp := hn.RPC.PendingChannels()
channels := resp.PendingForceClosingChannels
for _, c := range channels {
// Not the wanted channel, skipped.
if c.Channel.ChannelPoint != cp.String() {
continue
}
// Found the channel, check the anchor state.
if c.Anchor == expected {
return nil
}
return fmt.Errorf("unexpected anchor state, want %v, "+
"got %v", expected, c.Anchor)
}
return fmt.Errorf("channel not found using cp=%v", cp)
}, defaultTimeout)
require.NoError(ht, err, "anchor doesn't show as being lost")
}
// genAnchorSweep generates a "3rd party" anchor sweeping from an existing one.
// In practice, we just re-use the existing witness, and track on our own
// output producing a 1-in-1-out transaction.
func genAnchorSweep(t *harnessTest, net *lntest.NetworkHarness,
aliceAnchor *sweptOutput, anchorCsv uint32) *btcutil.Tx {
func genAnchorSweep(ht *lntemp.HarnessTest,
aliceAnchor *lntemp.SweptOutput, anchorCsv uint32) *btcutil.Tx {
// At this point, we have the transaction that Alice used to try to
// sweep her anchor. As this is actually just something anyone can
@ -491,7 +594,8 @@ func genAnchorSweep(t *harnessTest, net *lntest.NetworkHarness,
}
}
t.Fatalf("anchor op not found")
require.FailNow(ht, "anchor op not found")
return wire.TxIn{}
}()
@ -500,14 +604,9 @@ func genAnchorSweep(t *harnessTest, net *lntest.NetworkHarness,
aliceAnchorTxIn.Witness[0] = nil
aliceAnchorTxIn.Sequence = anchorCsv
minerAddr, err := net.Miner.NewAddress()
if err != nil {
t.Fatalf("unable to get miner addr: %v", err)
}
minerAddr := ht.Miner.NewMinerAddress()
addrScript, err := txscript.PayToAddrScript(minerAddr)
if err != nil {
t.Fatalf("unable to gen addr script: %v", err)
}
require.NoError(ht, err, "unable to gen addr script")
// Now that we have the txIn, we can just make a new transaction that
// uses a different script for the output.
@ -520,191 +619,3 @@ func genAnchorSweep(t *harnessTest, net *lntest.NetworkHarness,
return btcutil.NewTx(tx)
}
// testAnchorThirdPartySpend tests that if we force close a channel, but then
// don't sweep the anchor in time and a 3rd party spends it, that we remove any
// transactions that are a descendent of that sweep.
func testAnchorThirdPartySpend(net *lntest.NetworkHarness, t *harnessTest) {
// First, we'll create two new nodes that both default to anchor
// channels.
//
// NOTE: The itests differ here as anchors is default off vs the normal
// lnd binary.
args := nodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
alice := net.NewNode(t.t, "Alice", args)
defer shutdownAndAssert(net, t, alice)
bob := net.NewNode(t.t, "Bob", args)
defer shutdownAndAssert(net, t, bob)
ctxb := context.Background()
net.ConnectNodes(t.t, alice, bob)
// We'll fund our Alice with coins, as she'll be opening the channel.
// We'll fund her with *just* enough coins to open the channel.
const (
firstChanSize = 1_000_000
anchorFeeBuffer = 500_000
)
net.SendCoins(t.t, firstChanSize, alice)
// We'll give Alice another spare UTXO as well so she can use it to
// help sweep all coins.
net.SendCoins(t.t, anchorFeeBuffer, alice)
// Open the channel between the two nodes and wait for it to confirm
// fully.
aliceChanPoint1 := openChannelAndAssert(
t, net, alice, bob, lntest.OpenChannelParams{
Amt: firstChanSize,
},
)
// With the channel open, we'll actually immediately force close it. We
// don't care about network announcements here since there's no routing
// in this test.
_, _, err := net.CloseChannel(alice, aliceChanPoint1, true)
if err != nil {
t.Fatalf("unable to execute force channel closure: %v", err)
}
// Now that the channel has been force closed, it should show up in the
// PendingChannels RPC under the waiting close section.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := alice.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
err = checkNumWaitingCloseChannels(pendingChanResp, 1)
if err != nil {
t.Fatalf(err.Error())
}
// Get the normal channel outpoint so we can track it in the set of
// channels that are waiting to be closed.
fundingTxID, err := lnrpc.GetChanPointFundingTxid(aliceChanPoint1)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
chanPoint := wire.OutPoint{
Hash: *fundingTxID,
Index: aliceChanPoint1.OutputIndex,
}
waitingClose, err := findWaitingCloseChannel(pendingChanResp, &chanPoint)
if err != nil {
t.Fatalf(err.Error())
}
// At this point, the channel is waiting close, and we have both the
// commitment transaction and anchor sweep in the mempool.
const expectedTxns = 2
sweepTxns, err := getNTxsFromMempool(
net.Miner.Client, expectedTxns, minerMempoolTimeout,
)
require.NoError(t.t, err, "no sweep txns in miner mempool")
aliceCloseTx := waitingClose.Commitments.LocalTxid
_, aliceAnchor := findCommitAndAnchor(t, net, sweepTxns, aliceCloseTx)
// We'll now mine _only_ the commitment force close transaction, as we
// want the anchor sweep to stay unconfirmed.
var emptyTime time.Time
forceCloseTxID, _ := chainhash.NewHashFromStr(aliceCloseTx)
commitTxn, err := net.Miner.Client.GetRawTransaction(
forceCloseTxID,
)
if err != nil {
t.Fatalf("unable to get transaction: %v", err)
}
_, err = net.Miner.GenerateAndSubmitBlock(
[]*btcutil.Tx{commitTxn}, -1, emptyTime,
)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// With the anchor output located, and the main commitment mined we'll
// instruct the wallet to send all coins in the wallet to a new address
// (to the miner), including unconfirmed change.
minerAddr, err := net.Miner.NewAddress()
if err != nil {
t.Fatalf("unable to create new miner addr: %v", err)
}
sweepReq := &lnrpc.SendCoinsRequest{
Addr: minerAddr.String(),
SendAll: true,
MinConfs: 0,
SpendUnconfirmed: true,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
sweepAllResp, err := alice.SendCoins(ctxt, sweepReq)
if err != nil {
t.Fatalf("unable to sweep coins: %v", err)
}
// Both the original anchor sweep transaction, as well as the
// transaction we created to sweep all the coins from Alice's wallet
// should be found in her transaction store.
sweepAllTxID, _ := chainhash.NewHashFromStr(sweepAllResp.Txid)
assertTransactionInWallet(t.t, alice, aliceAnchor.SweepTx.TxHash())
assertTransactionInWallet(t.t, alice, *sweepAllTxID)
// Next, we'll shutdown Alice, and allow 16 blocks to pass so that the
// anchor output can be swept by anyone. Rather than use the normal API
// call, we'll generate a series of _empty_ blocks here.
aliceRestart, err := net.SuspendNode(alice)
if err != nil {
t.Fatalf("unable to shutdown alice: %v", err)
}
const anchorCsv = 16
for i := 0; i < anchorCsv; i++ {
_, err := net.Miner.GenerateAndSubmitBlock(nil, -1, emptyTime)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
}
// Before we sweep the anchor, we'll restart Alice.
if err := aliceRestart(); err != nil {
t.Fatalf("unable to restart alice: %v", err)
}
// Now that the channel has been closed, and Alice has an unconfirmed
// transaction spending the output produced by her anchor sweep, we'll
// mine a transaction that double spends the output.
thirdPartyAnchorSweep := genAnchorSweep(t, net, aliceAnchor, anchorCsv)
_, err = net.Miner.GenerateAndSubmitBlock(
[]*btcutil.Tx{thirdPartyAnchorSweep}, -1, emptyTime,
)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// At this point, we should no longer find Alice's transaction that
// tried to sweep the anchor in her wallet.
assertTransactionNotInWallet(t.t, alice, aliceAnchor.SweepTx.TxHash())
// In addition, the transaction she sent to sweep all her coins to the
// miner also should no longer be found.
assertTransactionNotInWallet(t.t, alice, *sweepAllTxID)
// The anchor should now show as being "lost", while the force close
// response is still present.
assertAnchorOutputLost(t, alice, chanPoint)
// At this point Alice's CSV output should already be fully spent and
// the channel marked as being resolved. We mine a block first, as so
// far we've been generating custom blocks this whole time..
commitSweepOp := wire.OutPoint{
Hash: *forceCloseTxID,
Index: 1,
}
assertSpendingTxInMempool(
t, net.Miner.Client, minerMempoolTimeout, commitSweepOp,
)
_, err = net.Miner.Client.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
assertNumPendingChannels(t, alice, 0, 0)
}

View file

@ -6,12 +6,12 @@ import (
"time"
"github.com/btcsuite/btcd/btcjson"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/integration/rpctest"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/funding"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
@ -20,112 +20,83 @@ import (
// testOpenChannelAfterReorg tests that in the case where we have an open
// channel where the funding tx gets reorged out, the channel will no
// longer be present in the node's routing table.
func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
func testOpenChannelAfterReorg(ht *lntemp.HarnessTest) {
// Skip test for neutrino, as we cannot disconnect the miner at will.
// TODO(halseth): remove when either can disconnect at will, or restart
// node with connection to new miner.
if net.BackendCfg.Name() == lntest.NeutrinoBackendName {
t.Skipf("skipping reorg test for neutrino backend")
if ht.IsNeutrinoBackend() {
ht.Skipf("skipping reorg test for neutrino backend")
}
var (
ctxb = context.Background()
temp = "temp"
)
temp := "temp"
// Set up a new miner that we can use to cause a reorg.
tempLogDir := ".tempminerlogs"
logFilename := "output-open_channel_reorg-temp_miner.log"
tempMiner, err := lntest.NewTempMiner(tempLogDir, logFilename)
require.NoError(t.t, err, "failed to create temp miner")
defer func() {
require.NoError(
t.t, tempMiner.Stop(),
"failed to clean up temp miner",
tempMiner := lntemp.NewTempMiner(
ht.Context(), ht.T, tempLogDir, logFilename,
)
}()
defer tempMiner.Stop()
// Setup the temp miner
require.NoError(
t.t, tempMiner.SetUp(false, 0), "unable to set up mining node",
)
require.NoError(ht, tempMiner.SetUp(false, 0),
"unable to set up mining node")
miner := ht.Miner
alice, bob := ht.Alice, ht.Bob
// We start by connecting the new miner to our original miner,
// such that it will sync to our original chain.
err = net.Miner.Client.Node(
err := miner.Client.Node(
btcjson.NConnect, tempMiner.P2PAddress(), &temp,
)
if err != nil {
t.Fatalf("unable to remove node: %v", err)
}
nodeSlice := []*rpctest.Harness{net.Miner.Harness, tempMiner.Harness}
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
t.Fatalf("unable to join node on blocks: %v", err)
}
require.NoError(ht, err, "unable to connect miners")
nodeSlice := []*rpctest.Harness{miner.Harness, tempMiner.Harness}
err = rpctest.JoinNodes(nodeSlice, rpctest.Blocks)
require.NoError(ht, err, "unable to join node on blocks")
// The two miners should be on the same blockheight.
assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 0)
assertMinerBlockHeightDelta(ht, miner, tempMiner, 0)
// We disconnect the two miners, such that we can mine two different
// chains and can cause a reorg later.
err = net.Miner.Client.Node(
err = miner.Client.Node(
btcjson.NDisconnect, tempMiner.P2PAddress(), &temp,
)
if err != nil {
t.Fatalf("unable to remove node: %v", err)
}
require.NoError(ht, err, "unable to disconnect miners")
// Create a new channel that requires 1 confs before it's considered
// open, then broadcast the funding transaction
chanAmt := funding.MaxBtcFundingAmount
pushAmt := btcutil.Amount(0)
pendingUpdate, err := net.OpenPendingChannel(
net.Alice, net.Bob, chanAmt, pushAmt,
)
if err != nil {
t.Fatalf("unable to open channel: %v", err)
params := lntemp.OpenChannelParams{
Amt: funding.MaxBtcFundingAmount,
Private: true,
}
pendingUpdate := ht.OpenChannelAssertPending(alice, bob, params)
// Wait for miner to have seen the funding tx. The temporary miner is
// disconnected, and won't see the transaction.
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
if err != nil {
t.Fatalf("failed to find funding tx in mempool: %v", err)
}
ht.Miner.AssertNumTxsInMempool(1)
// At this point, the channel's funding transaction will have been
// broadcast, but not confirmed, and the channel should be pending.
assertNumOpenChannelsPending(t, net.Alice, net.Bob, 1)
ht.AssertNodesNumPendingOpenChannels(alice, bob, 1)
fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid)
if err != nil {
t.Fatalf("unable to convert funding txid into chainhash.Hash:"+
" %v", err)
}
require.NoError(ht, err, "convert funding txid into chainhash failed")
// We now cause a fork, by letting our original miner mine 10 blocks,
// and our new miner mine 15. This will also confirm our pending
// channel on the original miner's chain, which should be considered
// open.
block := mineBlocks(t, net, 10, 1)[0]
assertTxInBlock(t, block, fundingTxID)
if _, err := tempMiner.Client.Generate(15); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
block := ht.MineBlocks(10)[0]
ht.Miner.AssertTxInBlock(block, fundingTxID)
_, err = tempMiner.Client.Generate(15)
require.NoError(ht, err, "unable to generate blocks")
// Ensure the chain lengths are what we expect, with the temp miner
// being 5 blocks ahead.
assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 5)
// Wait for Alice to sync to the original miner's chain.
_, minerHeight, err := net.Miner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current blockheight %v", err)
}
err = waitForNodeBlockHeight(net.Alice, minerHeight)
if err != nil {
t.Fatalf("unable to sync to chain: %v", err)
}
assertMinerBlockHeightDelta(ht, miner, tempMiner, 5)
chanPoint := &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
@ -135,121 +106,57 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
}
// Ensure channel is no longer pending.
assertNumOpenChannelsPending(t, net.Alice, net.Bob, 0)
ht.AssertNodesNumPendingOpenChannels(alice, bob, 0)
// Wait for Alice and Bob to recognize and advertise the new channel
// generated above.
err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
if err != nil {
t.Fatalf("alice didn't advertise channel before "+
"timeout: %v", err)
}
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
if err != nil {
t.Fatalf("bob didn't advertise channel before "+
"timeout: %v", err)
}
ht.AssertTopologyChannelOpen(alice, chanPoint)
ht.AssertTopologyChannelOpen(bob, chanPoint)
// Alice should now have 1 edge in her graph.
req := &lnrpc.ChannelGraphRequest{
IncludeUnannounced: true,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
chanGraph, err := net.Alice.DescribeGraph(ctxt, req)
if err != nil {
t.Fatalf("unable to query for alice's routing table: %v", err)
}
numEdges := len(chanGraph.Edges)
if numEdges != 1 {
t.Fatalf("expected to find one edge in the graph, found %d",
numEdges)
}
ht.AssertNumEdges(alice, 1, true)
// Now we disconnect Alice's chain backend from the original miner, and
// connect the two miners together. Since the temporary miner knows
// about a longer chain, both miners should sync to that chain.
err = net.BackendCfg.DisconnectMiner()
if err != nil {
t.Fatalf("unable to remove node: %v", err)
}
ht.DisconnectMiner()
// Connecting to the temporary miner should now cause our original
// chain to be re-orged out.
err = net.Miner.Client.Node(
btcjson.NConnect, tempMiner.P2PAddress(), &temp,
)
if err != nil {
t.Fatalf("unable to remove node: %v", err)
}
err = miner.Client.Node(btcjson.NConnect, tempMiner.P2PAddress(), &temp)
require.NoError(ht, err, "unable to connect temp miner")
nodes := []*rpctest.Harness{tempMiner.Harness, net.Miner.Harness}
if err := rpctest.JoinNodes(nodes, rpctest.Blocks); err != nil {
t.Fatalf("unable to join node on blocks: %v", err)
}
nodes := []*rpctest.Harness{tempMiner.Harness, miner.Harness}
err = rpctest.JoinNodes(nodes, rpctest.Blocks)
require.NoError(ht, err, "unable to join node on blocks")
// Once again they should be on the same chain.
assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 0)
assertMinerBlockHeightDelta(ht, miner, tempMiner, 0)
// Now we disconnect the two miners, and connect our original miner to
// our chain backend once again.
err = net.Miner.Client.Node(
err = miner.Client.Node(
btcjson.NDisconnect, tempMiner.P2PAddress(), &temp,
)
if err != nil {
t.Fatalf("unable to remove node: %v", err)
}
require.NoError(ht, err, "unable to disconnect temp miner")
err = net.BackendCfg.ConnectMiner()
if err != nil {
t.Fatalf("unable to remove node: %v", err)
}
ht.ConnectMiner()
// This should have caused a reorg, and Alice should sync to the longer
// chain, where the funding transaction is not confirmed.
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock()
if err != nil {
t.Fatalf("unable to get current blockheight %v", err)
}
err = waitForNodeBlockHeight(net.Alice, tempMinerHeight)
if err != nil {
t.Fatalf("unable to sync to chain: %v", err)
}
require.NoError(ht, err, "unable to get current blockheight")
ht.WaitForNodeBlockHeight(alice, tempMinerHeight)
// Since the fundingtx was reorged out, Alice should now have no edges
// in her graph.
req = &lnrpc.ChannelGraphRequest{
IncludeUnannounced: true,
}
var predErr error
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanGraph, err = net.Alice.DescribeGraph(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for "+
"alice's routing table: %v", err)
return false
}
numEdges = len(chanGraph.Edges)
if numEdges != 0 {
predErr = fmt.Errorf("expected to find "+
"no edge in the graph, found %d",
numEdges)
return false
}
return true
}, defaultTimeout)
if err != nil {
t.Fatalf(predErr.Error())
}
ht.AssertNumEdges(alice, 0, true)
// Cleanup by mining the funding tx again, then closing the channel.
block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, fundingTxID)
block = ht.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.Miner.AssertTxInBlock(block, fundingTxID)
closeReorgedChannelAndAssert(t, net, net.Alice, chanPoint, false)
ht.CloseChannel(alice, chanPoint)
}
// testOpenChannelFeePolicy checks if different channel fee scenarios
@ -579,3 +486,33 @@ func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
), "verifying alice close updates",
)
}
// assertMinerBlockHeightDelta ensures that tempMiner is 'delta' blocks ahead
// of miner.
func assertMinerBlockHeightDelta(ht *lntemp.HarnessTest,
miner, tempMiner *lntemp.HarnessMiner, delta int32) {
// Ensure the chain lengths are what we expect.
err := wait.NoError(func() error {
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock()
if err != nil {
return fmt.Errorf("unable to get current "+
"blockheight %v", err)
}
_, minerHeight, err := miner.Client.GetBestBlock()
if err != nil {
return fmt.Errorf("unable to get current "+
"blockheight %v", err)
}
if tempMinerHeight != minerHeight+delta {
return fmt.Errorf("expected new miner(%d) to be %d "+
"blocks ahead of original miner(%d)",
tempMinerHeight, delta, minerHeight)
}
return nil
}, defaultTimeout)
require.NoError(ht, err, "failed to assert block height delta")
}

View file

@ -1,7 +1,6 @@
package itest
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
@ -31,11 +30,18 @@ func testListPayments(ht *lntemp.HarnessTest) {
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt},
)
// Get the number of invoices Bob already has.
//
// TODO(yy): we can remove this check once the `DeleteAllInvoices` rpc
// is added.
invResp := bob.RPC.ListInvoices(nil)
numOldInvoices := len(invResp.Invoices)
// Now that the channel is open, create an invoice for Bob which
// expects a payment of 1000 satoshis from Alice paid via a particular
// preimage.
const paymentAmt = 1000
preimage := bytes.Repeat([]byte("B"), 32)
preimage := ht.Random32Bytes()
invoice := &lnrpc.Invoice{
Memo: "testing",
RPreimage: preimage,
@ -43,6 +49,10 @@ func testListPayments(ht *lntemp.HarnessTest) {
}
invoiceResp := bob.RPC.AddInvoice(invoice)
// Check that Bob has added the invoice.
numInvoices := numOldInvoices + 1
ht.AssertNumInvoices(bob, 1)
// With the invoice for Bob added, send a payment towards Alice paying
// to the above generated invoice.
payReqs := []string{invoiceResp.PaymentRequest}
@ -113,8 +123,8 @@ func testListPayments(ht *lntemp.HarnessTest) {
invReq := &lnrpc.ListInvoiceRequest{
CreationDateStart: 1227035905,
}
invResp := bob.RPC.ListInvoices(invReq)
require.Len(ht, invResp.Invoices, 1)
invResp = bob.RPC.ListInvoices(invReq)
require.Len(ht, invResp.Invoices, numInvoices)
// Use an end date long time ago should return us nothing.
invReq = &lnrpc.ListInvoiceRequest{
@ -135,7 +145,7 @@ func testListPayments(ht *lntemp.HarnessTest) {
CreationDateEnd: 5392552705,
}
invResp = bob.RPC.ListInvoices(invReq)
require.Len(ht, invResp.Invoices, 1)
require.Len(ht, invResp.Invoices, numInvoices)
// Delete all payments from Alice. DB should have no payments.
alice.RPC.DeleteAllPayments()
@ -152,7 +162,7 @@ func testListPayments(ht *lntemp.HarnessTest) {
time.Sleep(2 * time.Second)
// Close the channel.
defer ht.CloseChannel(alice, chanPoint)
ht.CloseChannel(alice, chanPoint)
}
// testPaymentFollowingChannelOpen tests that the channel transition from

View file

@ -5,6 +5,7 @@ import (
"context"
"crypto/rand"
"fmt"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/ecdsa"
@ -20,6 +21,7 @@ import (
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntest"
"github.com/stretchr/testify/require"
)
@ -255,69 +257,56 @@ func runPsbtChanFunding(net *lntest.NetworkHarness, t *harnessTest, carol,
// and dave by using a Partially Signed Bitcoin Transaction that funds the
// channel multisig funding output and is fully funded by an external third
// party.
func testPsbtChanFundingExternal(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testPsbtChanFundingExternal(ht *lntemp.HarnessTest) {
const chanSize = funding.MaxBtcFundingAmount
// Everything we do here should be done within a second or two, so we
// can just keep a single timeout context around for all calls.
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
// First, we'll create two new nodes that we'll use to open channels
// between for this test. Both these nodes have an empty wallet as Alice
// will be funding the channel.
carol := net.NewNode(t.t, "carol", nil)
defer shutdownAndAssert(net, t, carol)
dave := net.NewNode(t.t, "dave", nil)
defer shutdownAndAssert(net, t, dave)
carol := ht.NewNode("carol", nil)
dave := ht.NewNode("dave", nil)
// Before we start the test, we'll ensure both sides are connected so
// the funding flow can be properly executed.
net.EnsureConnected(t.t, carol, dave)
net.EnsureConnected(t.t, carol, net.Alice)
alice := ht.Alice
ht.EnsureConnected(carol, dave)
ht.EnsureConnected(carol, alice)
// At this point, we can begin our PSBT channel funding workflow. We'll
// start by generating a pending channel ID externally that will be used
// to track this new funding type.
var pendingChanID [32]byte
_, err := rand.Read(pendingChanID[:])
require.NoError(t.t, err)
pendingChanID := ht.Random32Bytes()
// We'll also test batch funding of two channels so we need another ID.
var pendingChanID2 [32]byte
_, err = rand.Read(pendingChanID2[:])
require.NoError(t.t, err)
pendingChanID2 := ht.Random32Bytes()
// Now that we have the pending channel ID, Carol will open the channel
// by specifying a PSBT shim. We use the NoPublish flag here to avoid
// publishing the whole batch TX too early.
chanUpdates, tempPsbt, err := openChannelPsbt(
ctxt, carol, dave, lntest.OpenChannelParams{
chanUpdates, tempPsbt := ht.OpenChannelPsbt(
carol, dave, lntemp.OpenChannelParams{
Amt: chanSize,
FundingShim: &lnrpc.FundingShim{
Shim: &lnrpc.FundingShim_PsbtShim{
PsbtShim: &lnrpc.PsbtShim{
PendingChanId: pendingChanID[:],
PendingChanId: pendingChanID,
NoPublish: true,
},
},
},
},
)
require.NoError(t.t, err)
// Let's add a second channel to the batch. This time between Carol and
// Alice. We will publish the batch TX once this channel funding is
// complete.
chanUpdates2, psbtBytes2, err := openChannelPsbt(
ctxt, carol, net.Alice, lntest.OpenChannelParams{
chanUpdates2, psbtBytes2 := ht.OpenChannelPsbt(
carol, alice, lntemp.OpenChannelParams{
Amt: chanSize,
FundingShim: &lnrpc.FundingShim{
Shim: &lnrpc.FundingShim_PsbtShim{
PsbtShim: &lnrpc.PsbtShim{
PendingChanId: pendingChanID2[:],
PendingChanId: pendingChanID2,
NoPublish: true,
BasePsbt: tempPsbt,
},
@ -325,7 +314,6 @@ func testPsbtChanFundingExternal(net *lntest.NetworkHarness, t *harnessTest) {
},
},
)
require.NoError(t.t, err)
// We'll now ask Alice's wallet to fund the PSBT for us. This will
// return a packet with inputs and outputs set but without any witness
@ -338,8 +326,7 @@ func testPsbtChanFundingExternal(net *lntest.NetworkHarness, t *harnessTest) {
SatPerVbyte: 2,
},
}
fundResp, err := net.Alice.WalletKitClient.FundPsbt(ctxt, fundReq)
require.NoError(t.t, err)
fundResp := alice.RPC.FundPsbt(fundReq)
// We have a PSBT that has no witness data yet, which is exactly what we
// need for the next step: Verify the PSBT with the funding intents.
@ -349,92 +336,77 @@ func testPsbtChanFundingExternal(net *lntest.NetworkHarness, t *harnessTest) {
// direct communication with Carol and won't send the signed TX to her
// before broadcasting it. So we cannot call the finalize step but
// instead just tell lnd to wait for a TX to be published/confirmed.
_, err = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
carol.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
PsbtVerify: &lnrpc.FundingPsbtVerify{
PendingChanId: pendingChanID[:],
PendingChanId: pendingChanID,
FundedPsbt: fundResp.FundedPsbt,
SkipFinalize: true,
},
},
})
require.NoError(t.t, err)
_, err = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
carol.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
PsbtVerify: &lnrpc.FundingPsbtVerify{
PendingChanId: pendingChanID2[:],
PendingChanId: pendingChanID2,
FundedPsbt: fundResp.FundedPsbt,
SkipFinalize: true,
},
},
})
require.NoError(t.t, err)
// Consume the "channel pending" update. This waits until the funding
// transaction was fully compiled for both channels.
updateResp, err := receiveChanUpdate(ctxt, chanUpdates)
require.NoError(t.t, err)
updateResp := ht.ReceiveOpenChannelUpdate(chanUpdates)
upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
require.True(t.t, ok)
require.True(ht, ok)
chanPoint := &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
FundingTxidBytes: upd.ChanPending.Txid,
},
OutputIndex: upd.ChanPending.OutputIndex,
}
updateResp2, err := receiveChanUpdate(ctxt, chanUpdates2)
require.NoError(t.t, err)
updateResp2 := ht.ReceiveOpenChannelUpdate(chanUpdates2)
upd2, ok := updateResp2.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
require.True(t.t, ok)
require.True(ht, ok)
chanPoint2 := &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
FundingTxidBytes: upd2.ChanPending.Txid,
},
OutputIndex: upd2.ChanPending.OutputIndex,
}
numPending, err := numOpenChannelsPending(ctxt, carol)
require.NoError(t.t, err)
require.Equal(t.t, 2, numPending)
ht.AssertNumPendingOpenChannels(carol, 2)
// Now we'll ask Alice's wallet to sign the PSBT so we can finish the
// funding flow.
finalizeReq := &walletrpc.FinalizePsbtRequest{
FundedPsbt: fundResp.FundedPsbt,
}
finalizeRes, err := net.Alice.WalletKitClient.FinalizePsbt(
ctxt, finalizeReq,
)
require.NoError(t.t, err)
finalizeRes := alice.RPC.FinalizePsbt(finalizeReq)
// No transaction should have been published yet.
mempool, err := net.Miner.Client.GetRawMempool()
require.NoError(t.t, err)
require.Equal(t.t, 0, len(mempool))
ht.Miner.AssertNumTxsInMempool(0)
// Great, now let's publish the final raw transaction.
var finalTx wire.MsgTx
err = finalTx.Deserialize(bytes.NewReader(finalizeRes.RawFinalTx))
require.NoError(t.t, err)
err := finalTx.Deserialize(bytes.NewReader(finalizeRes.RawFinalTx))
require.NoError(ht, err)
txHash := finalTx.TxHash()
_, err = net.Miner.Client.SendRawTransaction(&finalTx, false)
require.NoError(t.t, err)
_, err = ht.Miner.Client.SendRawTransaction(&finalTx, false)
require.NoError(ht, err)
// Now we can mine a block to get the transaction confirmed, then wait
// for the new channel to be propagated through the network.
block := mineBlocks(t, net, 6, 1)[0]
assertTxInBlock(t, block, &txHash)
err = carol.WaitForNetworkChannelOpen(chanPoint)
require.NoError(t.t, err)
err = carol.WaitForNetworkChannelOpen(chanPoint2)
require.NoError(t.t, err)
block := ht.MineBlocksAndAssertNumTxes(6, 1)[0]
ht.Miner.AssertTxInBlock(block, &txHash)
ht.AssertTopologyChannelOpen(carol, chanPoint)
ht.AssertTopologyChannelOpen(carol, chanPoint2)
// With the channel open, ensure that it is counted towards Carol's
// total channel balance.
balReq := &lnrpc.ChannelBalanceRequest{}
balRes, err := carol.ChannelBalance(ctxt, balReq)
require.NoError(t.t, err)
require.NotEqual(t.t, int64(0), balRes.LocalBalance.Sat)
balRes := carol.RPC.ChannelBalance()
require.NotZero(ht, balRes.LocalBalance.Sat)
// Next, to make sure the channel functions as normal, we'll make some
// payments within the channel.
@ -443,88 +415,88 @@ func testPsbtChanFundingExternal(net *lntest.NetworkHarness, t *harnessTest) {
Memo: "new chans",
Value: int64(payAmt),
}
resp, err := dave.AddInvoice(ctxt, invoice)
require.NoError(t.t, err)
err = completePaymentRequests(
carol, carol.RouterClient, []string{resp.PaymentRequest}, true,
)
require.NoError(t.t, err)
resp := dave.RPC.AddInvoice(invoice)
ht.CompletePaymentRequests(carol, []string{resp.PaymentRequest})
// TODO(yy): remove the sleep once the following bug is fixed. When the
// payment is reported as settled by Carol, it's expected the
// commitment dance is finished and all subsequent states have been
// updated. Yet we'd receive the error `cannot co-op close channel with
// active htlcs` or `link failed to shutdown` if we close the channel.
// We need to investigate the order of settling the payments and
// updating commitments to understand and fix .
time.Sleep(2 * time.Second)
// To conclude, we'll close the newly created channel between Carol and
// Dave. This function will also block until the channels are closed and
// will additionally assert the relevant channel closing post
// conditions.
closeChannelAndAssert(t, net, carol, chanPoint, false)
closeChannelAndAssert(t, net, carol, chanPoint2, false)
ht.CloseChannel(carol, chanPoint)
ht.CloseChannel(carol, chanPoint2)
}
// testPsbtChanFundingSingleStep checks whether PSBT funding works also when the
// wallet of both nodes are empty and one of them uses PSBT and an external
// testPsbtChanFundingSingleStep checks whether PSBT funding works also when
// the wallet of both nodes are empty and one of them uses PSBT and an external
// wallet to fund the channel while creating reserve output in the same
// transaction.
func testPsbtChanFundingSingleStep(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
func testPsbtChanFundingSingleStep(ht *lntemp.HarnessTest) {
const chanSize = funding.MaxBtcFundingAmount
// Everything we do here should be done within a second or two, so we
// can just keep a single timeout context around for all calls.
ctxt, cancel := context.WithTimeout(ctxb, 2*defaultTimeout)
defer cancel()
args := nodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
// First, we'll create two new nodes that we'll use to open channels
// between for this test. But in this case both nodes have an empty
// wallet.
carol := net.NewNode(t.t, "carol", args)
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("carol", args)
defer ht.Shutdown(carol)
dave := net.NewNode(t.t, "dave", args)
defer shutdownAndAssert(net, t, dave)
dave := ht.NewNode("dave", args)
defer ht.Shutdown(dave)
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, net.Alice)
alice := ht.Alice
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
// Get new address for anchor reserve.
reserveAddrReq := &lnrpc.NewAddressRequest{
req := &lnrpc.NewAddressRequest{
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
}
addrResp, err := carol.NewAddress(ctxb, reserveAddrReq)
require.NoError(t.t, err)
reserveAddr, err := btcutil.DecodeAddress(addrResp.Address, harnessNetParams)
require.NoError(t.t, err)
addrResp := carol.RPC.NewAddress(req)
reserveAddr, err := btcutil.DecodeAddress(
addrResp.Address, harnessNetParams,
)
require.NoError(ht, err)
reserveAddrScript, err := txscript.PayToAddrScript(reserveAddr)
require.NoError(t.t, err)
require.NoError(ht, err)
// Before we start the test, we'll ensure both sides are connected so
// the funding flow can be properly executed.
net.EnsureConnected(t.t, carol, dave)
ht.EnsureConnected(carol, dave)
// At this point, we can begin our PSBT channel funding workflow. We'll
// start by generating a pending channel ID externally that will be used
// to track this new funding type.
var pendingChanID [32]byte
_, err = rand.Read(pendingChanID[:])
require.NoError(t.t, err)
pendingChanID := ht.Random32Bytes()
// Now that we have the pending channel ID, Carol will open the channel
// by specifying a PSBT shim.
chanUpdates, tempPsbt, err := openChannelPsbt(
ctxt, carol, dave, lntest.OpenChannelParams{
chanUpdates, tempPsbt := ht.OpenChannelPsbt(
carol, dave, lntemp.OpenChannelParams{
Amt: chanSize,
FundingShim: &lnrpc.FundingShim{
Shim: &lnrpc.FundingShim_PsbtShim{
PsbtShim: &lnrpc.PsbtShim{
PendingChanId: pendingChanID[:],
PendingChanId: pendingChanID,
NoPublish: false,
},
},
},
},
)
require.NoError(t.t, err)
decodedPsbt, err := psbt.NewFromRawBytes(bytes.NewReader(tempPsbt), false)
require.NoError(t.t, err)
decodedPsbt, err := psbt.NewFromRawBytes(
bytes.NewReader(tempPsbt), false,
)
require.NoError(ht, err)
reserveTxOut := wire.TxOut{
Value: 10000,
@ -538,7 +510,7 @@ func testPsbtChanFundingSingleStep(net *lntest.NetworkHarness, t *harnessTest) {
var psbtBytes bytes.Buffer
err = decodedPsbt.Serialize(&psbtBytes)
require.NoError(t.t, err)
require.NoError(ht, err)
fundReq := &walletrpc.FundPsbtRequest{
Template: &walletrpc.FundPsbtRequest_Psbt{
@ -548,55 +520,45 @@ func testPsbtChanFundingSingleStep(net *lntest.NetworkHarness, t *harnessTest) {
SatPerVbyte: 2,
},
}
fundResp, err := net.Alice.WalletKitClient.FundPsbt(ctxt, fundReq)
require.NoError(t.t, err)
fundResp := alice.RPC.FundPsbt(fundReq)
// Make sure the wallets are actually empty
unspentCarol, err := carol.ListUnspent(ctxb, &lnrpc.ListUnspentRequest{})
require.NoError(t.t, err)
require.Len(t.t, unspentCarol.Utxos, 0)
unspentDave, err := dave.ListUnspent(ctxb, &lnrpc.ListUnspentRequest{})
require.NoError(t.t, err)
require.Len(t.t, unspentDave.Utxos, 0)
ht.AssertNumUTXOsUnconfirmed(alice, 0)
ht.AssertNumUTXOsUnconfirmed(dave, 0)
// We have a PSBT that has no witness data yet, which is exactly what we
// need for the next step: Verify the PSBT with the funding intents.
_, err = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
carol.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
PsbtVerify: &lnrpc.FundingPsbtVerify{
PendingChanId: pendingChanID[:],
PendingChanId: pendingChanID,
FundedPsbt: fundResp.FundedPsbt,
},
},
})
require.NoError(t.t, err)
// Now we'll ask Alice's wallet to sign the PSBT so we can finish the
// funding flow.
finalizeReq := &walletrpc.FinalizePsbtRequest{
FundedPsbt: fundResp.FundedPsbt,
}
finalizeRes, err := net.Alice.WalletKitClient.FinalizePsbt(ctxt, finalizeReq)
require.NoError(t.t, err)
finalizeRes := alice.RPC.FinalizePsbt(finalizeReq)
// We've signed our PSBT now, let's pass it to the intent again.
_, err = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
carol.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{
PsbtFinalize: &lnrpc.FundingPsbtFinalize{
PendingChanId: pendingChanID[:],
PendingChanId: pendingChanID,
SignedPsbt: finalizeRes.SignedPsbt,
},
},
})
require.NoError(t.t, err)
// Consume the "channel pending" update. This waits until the funding
// transaction was fully compiled.
updateResp, err := receiveChanUpdate(ctxt, chanUpdates)
require.NoError(t.t, err)
updateResp := ht.ReceiveOpenChannelUpdate(chanUpdates)
upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
require.True(t.t, ok)
require.True(ht, ok)
chanPoint := &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
FundingTxidBytes: upd.ChanPending.Txid,
@ -606,13 +568,12 @@ func testPsbtChanFundingSingleStep(net *lntest.NetworkHarness, t *harnessTest) {
var finalTx wire.MsgTx
err = finalTx.Deserialize(bytes.NewReader(finalizeRes.RawFinalTx))
require.NoError(t.t, err)
require.NoError(ht, err)
txHash := finalTx.TxHash()
block := mineBlocks(t, net, 6, 1)[0]
assertTxInBlock(t, block, &txHash)
err = carol.WaitForNetworkChannelOpen(chanPoint)
require.NoError(t.t, err)
block := ht.MineBlocksAndAssertNumTxes(6, 1)[0]
ht.Miner.AssertTxInBlock(block, &txHash)
ht.AssertTopologyChannelOpen(carol, chanPoint)
// Next, to make sure the channel functions as normal, we'll make some
// payments within the channel.
@ -621,19 +582,23 @@ func testPsbtChanFundingSingleStep(net *lntest.NetworkHarness, t *harnessTest) {
Memo: "new chans",
Value: int64(payAmt),
}
resp, err := dave.AddInvoice(ctxt, invoice)
require.NoError(t.t, err)
err = completePaymentRequests(
carol, carol.RouterClient, []string{resp.PaymentRequest},
true,
)
require.NoError(t.t, err)
resp := dave.RPC.AddInvoice(invoice)
ht.CompletePaymentRequests(carol, []string{resp.PaymentRequest})
// TODO(yy): remove the sleep once the following bug is fixed. When the
// payment is reported as settled by Carol, it's expected the
// commitment dance is finished and all subsequent states have been
// updated. Yet we'd receive the error `cannot co-op close channel with
// active htlcs` or `link failed to shutdown` if we close the channel.
// We need to investigate the order of settling the payments and
// updating commitments to understand and fix .
time.Sleep(2 * time.Second)
// To conclude, we'll close the newly created channel between Carol and
// Dave. This function will also block until the channel is closed and
// will additionally assert the relevant channel closing post
// conditions.
closeChannelAndAssert(t, net, carol, chanPoint, false)
ht.CloseChannel(carol, chanPoint)
}
// testSignPsbt tests that the SignPsbt RPC works correctly.

View file

@ -5,131 +5,69 @@ import (
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
// testResHandoff tests that the contractcourt is able to properly hand-off
// resolution messages to the switch.
func testResHandoff(net *lntest.NetworkHarness, t *harnessTest) {
func testResHandoff(ht *lntemp.HarnessTest) {
const (
chanAmt = btcutil.Amount(1000000)
paymentAmt = 50000
)
ctxb := context.Background()
alice, bob := ht.Alice, ht.Bob
// First we'll create a channel between Alice and Bob.
net.EnsureConnected(t.t, net.Alice, net.Bob)
ht.EnsureConnected(alice, bob)
chanPointAlice := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
Amt: chanAmt,
},
)
defer closeChannelAndAssert(t, net, net.Alice, chanPointAlice, false)
// Wait for Alice and Bob to receive the channel edge from the funding
// manager.
err := net.Alice.WaitForNetworkChannelOpen(chanPointAlice)
require.NoError(t.t, err)
err = net.Bob.WaitForNetworkChannelOpen(chanPointAlice)
require.NoError(t.t, err)
params := lntemp.OpenChannelParams{Amt: chanAmt}
chanPointAlice := ht.OpenChannel(alice, bob, params)
// Create a new node Carol that will be in hodl mode. This is used to
// trigger the behavior of checkRemoteDanglingActions in the
// contractcourt. This will cause Bob to fail the HTLC back to Alice.
carol := net.NewNode(t.t, "Carol", []string{"--hodl.commit"})
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("Carol", []string{"--hodl.commit"})
defer ht.Shutdown(carol)
// Connect Bob to Carol.
net.ConnectNodes(t.t, net.Bob, carol)
ht.ConnectNodes(bob, carol)
// Open a channel between Bob and Carol.
chanPointCarol := openChannelAndAssert(
t, net, net.Bob, carol,
lntest.OpenChannelParams{
Amt: chanAmt,
},
)
// Wait for Bob and Carol to receive the channel edge from the funding
// manager.
err = net.Bob.WaitForNetworkChannelOpen(chanPointCarol)
require.NoError(t.t, err)
err = carol.WaitForNetworkChannelOpen(chanPointCarol)
require.NoError(t.t, err)
chanPointCarol := ht.OpenChannel(bob, carol, params)
// Wait for Alice to see the channel edge in the graph.
err = net.Alice.WaitForNetworkChannelOpen(chanPointCarol)
require.NoError(t.t, err)
ht.AssertTopologyChannelOpen(alice, chanPointCarol)
// We'll create an invoice for Carol that Alice will attempt to pay.
// Since Carol is in hodl.commit mode, she won't send back any commit
// sigs.
carolPayReqs, _, _, err := createPayReqs(
carol, paymentAmt, 1,
)
require.NoError(t.t, err)
carolPayReqs, _, _ := ht.CreatePayReqs(carol, paymentAmt, 1)
// Alice will now attempt to fulfill the invoice.
err = completePaymentRequests(
net.Alice, net.Alice.RouterClient, carolPayReqs, false,
)
require.NoError(t.t, err)
ht.CompletePaymentRequestsNoWait(alice, carolPayReqs, chanPointAlice)
// Wait until Carol has received the Add, CommitSig from Bob, and has
// responded with a RevokeAndAck. We expect NumUpdates to be 1 meaning
// Carol's CommitHeight is 1.
err = wait.Predicate(func() bool {
carolInfo, err := getChanInfo(carol)
if err != nil {
return false
}
return carolInfo.NumUpdates == 1
}, defaultTimeout)
require.NoError(t.t, err)
ht.AssertChannelCommitHeight(carol, chanPointCarol, 1)
// Before we shutdown Alice, we'll assert that she only has 1 update.
err = wait.Predicate(func() bool {
aliceInfo, err := getChanInfo(net.Alice)
if err != nil {
return false
}
return aliceInfo.NumUpdates == 1
}, defaultTimeout)
require.NoError(t.t, err)
ht.AssertChannelCommitHeight(alice, chanPointAlice, 1)
// We'll shutdown Alice so that Bob can't connect to her.
restartAlice, err := net.SuspendNode(net.Alice)
require.NoError(t.t, err)
restartAlice := ht.SuspendNode(alice)
// Bob will now force close his channel with Carol such that resolution
// messages are created and forwarded backwards to Alice.
_, _, err = net.CloseChannel(net.Bob, chanPointCarol, true)
require.NoError(t.t, err)
ht.CloseChannelAssertPending(bob, chanPointCarol, true)
// The channel should be listed in the PendingChannels result.
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
ht.AssertNumWaitingClose(bob, 1)
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := net.Bob.PendingChannels(
ctxt, pendingChansRequest,
)
require.NoError(t.t, err)
require.NoError(t.t, checkNumWaitingCloseChannels(pendingChanResp, 1))
// We'll mine a block to confirm the force close transaction and to
// advance Bob's contract state with Carol to StateContractClosed.
mineBlocks(t, net, 1, 1)
// Mine a block to confirm the closing tx.
ht.MineBlocks(1)
// We sleep here so we can be sure that the hand-off has occurred from
// Bob's contractcourt to Bob's htlcswitch. This sleep could be removed
@ -137,64 +75,25 @@ func testResHandoff(net *lntest.NetworkHarness, t *harnessTest) {
// querying the state of resolution messages.
time.Sleep(10 * time.Second)
// Mine blocks until Bob has no waiting close channels. This tests
// that the circuit-deletion logic is skipped if a resolution message
// Mine blocks until Bob has no waiting close channels. This tests that
// the circuit-deletion logic is skipped if a resolution message
// exists.
for {
_, err = net.Miner.Client.Generate(1)
require.NoError(t.t, err)
pendingChanResp, err = net.Bob.PendingChannels(
ctxt, pendingChansRequest,
)
require.NoError(t.t, err)
isErr := checkNumForceClosedChannels(pendingChanResp, 0)
if isErr == nil {
break
}
time.Sleep(150 * time.Millisecond)
}
ht.CleanupForceClose(bob, chanPointCarol)
// We will now restart Bob so that we can test whether the resolution
// messages are re-forwarded on start-up.
restartBob, err := net.SuspendNode(net.Bob)
require.NoError(t.t, err)
err = restartBob()
require.NoError(t.t, err)
ht.RestartNode(bob)
// We'll now also restart Alice and connect her with Bob.
err = restartAlice()
require.NoError(t.t, err)
require.NoError(ht, restartAlice())
net.EnsureConnected(t.t, net.Alice, net.Bob)
ht.EnsureConnected(alice, bob)
// We'll assert that Alice has received the failure resolution
// message.
err = wait.Predicate(func() bool {
aliceInfo, err := getChanInfo(net.Alice)
if err != nil {
return false
}
return aliceInfo.NumUpdates == 2
}, defaultTimeout)
require.NoError(t.t, err)
// We'll assert that Alice has received the failure resolution message.
ht.AssertChannelCommitHeight(alice, chanPointAlice, 2)
// Assert that Alice's payment failed.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
paymentsResp, err := net.Alice.ListPayments(
ctxt, &lnrpc.ListPaymentsRequest{
IncludeIncomplete: true,
},
)
require.NoError(t.t, err)
require.Equal(t.t, 1, len(paymentsResp.Payments))
ht.AssertFirstHTLCError(alice, lnrpc.Failure_PERMANENT_CHANNEL_FAILURE)
htlcs := paymentsResp.Payments[0].Htlcs
require.Equal(t.t, 1, len(htlcs))
require.Equal(t.t, lnrpc.HTLCAttempt_FAILED, htlcs[0].Status)
ht.CloseChannel(alice, chanPointAlice)
}

View file

@ -2,7 +2,6 @@ package itest
import (
"bytes"
"context"
"crypto/tls"
"encoding/base64"
"encoding/hex"
@ -24,8 +23,8 @@ import (
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lnrpc/verrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/stretchr/testify/assert"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/stretchr/testify/require"
)
@ -57,19 +56,21 @@ var (
// testRestAPI tests that the most important features of the REST API work
// correctly.
func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
func testRestAPI(ht *lntemp.HarnessTest) {
testCases := []struct {
name string
run func(*testing.T, *lntest.HarnessNode, *lntest.HarnessNode)
run func(*testing.T, *node.HarnessNode, *node.HarnessNode)
}{{
name: "simple GET",
run: func(t *testing.T, a, b *lntest.HarnessNode) {
run: func(t *testing.T, a, b *node.HarnessNode) {
t.Helper()
// Check that the parsing into the response proto
// message works.
resp := &lnrpc.GetInfoResponse{}
err := invokeGET(a, "/v1/getinfo", resp)
require.Nil(t, err, "getinfo")
assert.Equal(t, "#3399ff", resp.Color, "node color")
require.Equal(t, "#3399ff", resp.Color, "node color")
// Make sure we get the correct field names (snake
// case).
@ -77,20 +78,22 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
a, "/v1/getinfo", "GET", nil, nil,
)
require.Nil(t, err, "getinfo")
assert.Contains(
require.Contains(
t, string(resp2), "best_header_timestamp",
"getinfo",
)
},
}, {
name: "simple POST and GET with query param",
run: func(t *testing.T, a, b *lntest.HarnessNode) {
run: func(t *testing.T, a, b *node.HarnessNode) {
t.Helper()
// Add an invoice, testing POST in the process.
req := &lnrpc.Invoice{Value: 1234}
resp := &lnrpc.AddInvoiceResponse{}
err := invokePOST(a, "/v1/invoices", req, resp)
require.Nil(t, err, "add invoice")
assert.Equal(t, 32, len(resp.RHash), "invoice rhash")
require.Equal(t, 32, len(resp.RHash), "invoice rhash")
// Make sure we can call a GET endpoint with a hex
// encoded URL part.
@ -98,11 +101,14 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
resp2 := &lnrpc.Invoice{}
err = invokeGET(a, url, resp2)
require.Nil(t, err, "query invoice")
assert.Equal(t, int64(1234), resp2.Value, "invoice amt")
require.Equal(t, int64(1234), resp2.Value,
"invoice amt")
},
}, {
name: "GET with base64 encoded byte slice in path",
run: func(t *testing.T, a, b *lntest.HarnessNode) {
run: func(t *testing.T, a, b *node.HarnessNode) {
t.Helper()
url := "/v2/router/mc/probability/%s/%s/%d"
url = fmt.Sprintf(
url, urlEnc.EncodeToString(a.PubKey[:]),
@ -115,38 +121,37 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
},
}, {
name: "GET with map type query param",
run: func(t *testing.T, a, b *lntest.HarnessNode) {
// Get a new wallet address from Alice.
ctxb := context.Background()
newAddrReq := &lnrpc.NewAddressRequest{
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
}
addrRes, err := a.NewAddress(ctxb, newAddrReq)
require.Nil(t, err, "get address")
run: func(t *testing.T, a, b *node.HarnessNode) {
t.Helper()
// Use a fake address.
addr := "bcrt1qlutnwklt4u2548cufrjmsjclewugr9lcpnkzag"
// Create the full URL with the map query param.
url := "/v1/transactions/fee?target_conf=%d&" +
"AddrToAmount[%s]=%d"
url = fmt.Sprintf(url, 2, addrRes.Address, 50000)
url = fmt.Sprintf(url, 2, addr, 50000)
resp := &lnrpc.EstimateFeeResponse{}
err = invokeGET(a, url, resp)
err := invokeGET(a, url, resp)
require.Nil(t, err, "estimate fee")
assert.Greater(t, resp.FeeSat, int64(253), "fee")
require.Greater(t, resp.FeeSat, int64(253), "fee")
},
}, {
name: "sub RPC servers REST support",
run: func(t *testing.T, a, b *lntest.HarnessNode) {
run: func(t *testing.T, a, b *node.HarnessNode) {
t.Helper()
// Query autopilot status.
res1 := &autopilotrpc.StatusResponse{}
err := invokeGET(a, "/v2/autopilot/status", res1)
require.Nil(t, err, "autopilot status")
assert.Equal(t, false, res1.Active, "autopilot status")
require.Equal(t, false, res1.Active, "autopilot status")
// Query the version RPC.
res2 := &verrpc.Version{}
err = invokeGET(a, "/v2/versioner/version", res2)
require.Nil(t, err, "version")
assert.Greater(
require.Greater(
t, res2.AppMinor, uint32(0), "lnd minor version",
)
@ -157,11 +162,13 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
a, "/v2/wallet/address/next", req1, res3,
)
require.Nil(t, err, "address")
assert.NotEmpty(t, res3.Addr, "address")
require.NotEmpty(t, res3.Addr, "address")
},
}, {
name: "CORS headers",
run: func(t *testing.T, a, b *lntest.HarnessNode) {
run: func(t *testing.T, a, b *node.HarnessNode) {
t.Helper()
// Alice allows all origins. Make sure we get the same
// value back in the CORS header that we send in the
// Origin header.
@ -171,12 +178,12 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
a, "/v1/getinfo", "OPTIONS", nil, reqHeaders,
)
require.Nil(t, err, "getinfo")
assert.Equal(
require.Equal(
t, "https://foo.bar:9999",
resHeaders.Get("Access-Control-Allow-Origin"),
"CORS header",
)
assert.Equal(t, 0, len(body))
require.Equal(t, 0, len(body))
// Make sure that we don't get a value set for Bob which
// doesn't allow any CORS origin.
@ -184,17 +191,17 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
b, "/v1/getinfo", "OPTIONS", nil, reqHeaders,
)
require.Nil(t, err, "getinfo")
assert.Equal(
require.Equal(
t, "",
resHeaders.Get("Access-Control-Allow-Origin"),
"CORS header",
)
assert.Equal(t, 0, len(body))
require.Equal(t, 0, len(body))
},
}}
wsTestCases := []struct {
name string
run func(ht *harnessTest, net *lntest.NetworkHarness)
run func(ht *lntemp.HarnessTest)
}{{
name: "websocket subscription",
run: wsTestCaseSubscription,
@ -212,39 +219,33 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
// Make sure Alice allows all CORS origins. Bob will keep the default.
// We also make sure the ping/pong messages are sent very often, so we
// can test them without waiting half a minute.
net.Alice.Cfg.ExtraArgs = append(
net.Alice.Cfg.ExtraArgs, "--restcors=\"*\"",
alice, bob := ht.Alice, ht.Bob
alice.Cfg.ExtraArgs = append(
alice.Cfg.ExtraArgs, "--restcors=\"*\"",
fmt.Sprintf("--ws-ping-interval=%s", pingInterval),
fmt.Sprintf("--ws-pong-wait=%s", pongWait),
)
err := net.RestartNode(net.Alice, nil)
if err != nil {
ht.t.Fatalf("Could not restart Alice to set CORS config: %v",
err)
}
ht.RestartNode(alice)
for _, tc := range testCases {
tc := tc
ht.t.Run(tc.name, func(t *testing.T) {
tc.run(t, net.Alice, net.Bob)
ht.Run(tc.name, func(t *testing.T) {
tc.run(t, alice, bob)
})
}
for _, tc := range wsTestCases {
tc := tc
ht.t.Run(tc.name, func(t *testing.T) {
ht := &harnessTest{
t: t, testCase: ht.testCase, lndHarness: net,
}
tc.run(ht, net)
ht.Run(tc.name, func(t *testing.T) {
st := ht.Subtest(t)
tc.run(st)
})
}
}
func wsTestCaseSubscription(ht *harnessTest, net *lntest.NetworkHarness) {
func wsTestCaseSubscription(ht *lntemp.HarnessTest) {
// Find out the current best block so we can subscribe to the next one.
hash, height, err := net.Miner.Client.GetBestBlock()
require.Nil(ht.t, err, "get best block")
hash, height := ht.Miner.GetBestBlock()
// Create a new subscription to get block epoch events.
req := &chainrpc.BlockEpoch{
@ -252,11 +253,11 @@ func wsTestCaseSubscription(ht *harnessTest, net *lntest.NetworkHarness) {
Height: uint32(height),
}
url := "/v2/chainnotifier/register/blocks"
c, err := openWebSocket(net.Alice, url, "POST", req, nil)
require.Nil(ht.t, err, "websocket")
c, err := openWebSocket(ht.Alice, url, "POST", req, nil)
require.NoError(ht, err, "websocket")
defer func() {
err := c.WriteMessage(websocket.CloseMessage, closeMsg)
require.NoError(ht.t, err)
require.NoError(ht, err)
_ = c.Close()
}()
@ -300,30 +301,25 @@ func wsTestCaseSubscription(ht *harnessTest, net *lntest.NetworkHarness) {
}()
// Mine a block and make sure we get a message for it.
blockHashes, err := net.Miner.Client.Generate(1)
require.Nil(ht.t, err, "generate blocks")
assert.Equal(ht.t, 1, len(blockHashes), "num blocks")
blockHashes := ht.Miner.GenerateBlocks(1)
select {
case msg := <-msgChan:
assert.Equal(
ht.t, blockHashes[0].CloneBytes(), msg.Hash,
require.Equal(
ht, blockHashes[0].CloneBytes(), msg.Hash,
"block hash",
)
case err := <-errChan:
ht.t.Fatalf("Received error from WS: %v", err)
ht.Fatalf("Received error from WS: %v", err)
case <-timeout:
ht.t.Fatalf("Timeout before message was received")
ht.Fatalf("Timeout before message was received")
}
}
func wsTestCaseSubscriptionMacaroon(ht *harnessTest,
net *lntest.NetworkHarness) {
func wsTestCaseSubscriptionMacaroon(ht *lntemp.HarnessTest) {
// Find out the current best block so we can subscribe to the next one.
hash, height, err := net.Miner.Client.GetBestBlock()
require.Nil(ht.t, err, "get best block")
hash, height := ht.Miner.GetBestBlock()
// Create a new subscription to get block epoch events.
req := &chainrpc.BlockEpoch{
@ -335,22 +331,23 @@ func wsTestCaseSubscriptionMacaroon(ht *harnessTest,
// This time we send the macaroon in the special header
// Sec-Websocket-Protocol which is the only header field available to
// browsers when opening a WebSocket.
mac, err := net.Alice.ReadMacaroon(
net.Alice.AdminMacPath(), defaultTimeout,
alice := ht.Alice
mac, err := alice.ReadMacaroon(
alice.Cfg.AdminMacPath, defaultTimeout,
)
require.NoError(ht.t, err, "read admin mac")
require.NoError(ht, err, "read admin mac")
macBytes, err := mac.MarshalBinary()
require.NoError(ht.t, err, "marshal admin mac")
require.NoError(ht, err, "marshal admin mac")
customHeader := make(http.Header)
customHeader.Set(lnrpc.HeaderWebSocketProtocol, fmt.Sprintf(
"Grpc-Metadata-Macaroon+%s", hex.EncodeToString(macBytes),
))
c, err := openWebSocket(net.Alice, url, "POST", req, customHeader)
require.Nil(ht.t, err, "websocket")
c, err := openWebSocket(alice, url, "POST", req, customHeader)
require.Nil(ht, err, "websocket")
defer func() {
err := c.WriteMessage(websocket.CloseMessage, closeMsg)
require.NoError(ht.t, err)
require.NoError(ht, err)
_ = c.Close()
}()
@ -394,52 +391,49 @@ func wsTestCaseSubscriptionMacaroon(ht *harnessTest,
}()
// Mine a block and make sure we get a message for it.
blockHashes, err := net.Miner.Client.Generate(1)
require.Nil(ht.t, err, "generate blocks")
assert.Equal(ht.t, 1, len(blockHashes), "num blocks")
blockHashes := ht.Miner.GenerateBlocks(1)
select {
case msg := <-msgChan:
assert.Equal(
ht.t, blockHashes[0].CloneBytes(), msg.Hash,
require.Equal(
ht, blockHashes[0].CloneBytes(), msg.Hash,
"block hash",
)
case err := <-errChan:
ht.t.Fatalf("Received error from WS: %v", err)
ht.Fatalf("Received error from WS: %v", err)
case <-timeout:
ht.t.Fatalf("Timeout before message was received")
ht.Fatalf("Timeout before message was received")
}
}
func wsTestCaseBiDirectionalSubscription(ht *harnessTest,
net *lntest.NetworkHarness) {
func wsTestCaseBiDirectionalSubscription(ht *lntemp.HarnessTest) {
initialRequest := &lnrpc.ChannelAcceptResponse{}
url := "/v1/channels/acceptor"
// This time we send the macaroon in the special header
// Sec-Websocket-Protocol which is the only header field available to
// browsers when opening a WebSocket.
mac, err := net.Alice.ReadMacaroon(
net.Alice.AdminMacPath(), defaultTimeout,
alice := ht.Alice
mac, err := alice.ReadMacaroon(
alice.Cfg.AdminMacPath, defaultTimeout,
)
require.NoError(ht.t, err, "read admin mac")
require.NoError(ht, err, "read admin mac")
macBytes, err := mac.MarshalBinary()
require.NoError(ht.t, err, "marshal admin mac")
require.NoError(ht, err, "marshal admin mac")
customHeader := make(http.Header)
customHeader.Set(lnrpc.HeaderWebSocketProtocol, fmt.Sprintf(
"Grpc-Metadata-Macaroon+%s", hex.EncodeToString(macBytes),
))
conn, err := openWebSocket(
net.Alice, url, "POST", initialRequest, customHeader,
alice, url, "POST", initialRequest, customHeader,
)
require.Nil(ht.t, err, "websocket")
require.Nil(ht, err, "websocket")
defer func() {
err := conn.WriteMessage(websocket.CloseMessage, closeMsg)
_ = conn.Close()
require.NoError(ht.t, err)
require.NoError(ht, err)
}()
// Buffer the message channel to make sure we're always blocking on
@ -528,29 +522,30 @@ func wsTestCaseBiDirectionalSubscription(ht *harnessTest,
// Before we start opening channels, make sure the two nodes are
// connected.
net.EnsureConnected(ht.t, net.Alice, net.Bob)
bob := ht.Bob
ht.EnsureConnected(alice, bob)
// Open 3 channels to make sure multiple requests and responses can be
// sent over the web socket.
const numChannels = 3
for i := 0; i < numChannels; i++ {
openChannelAndAssert(
ht, net, net.Bob, net.Alice,
lntest.OpenChannelParams{Amt: 500000},
chanPoint := ht.OpenChannel(
bob, alice, lntemp.OpenChannelParams{Amt: 500000},
)
defer ht.CloseChannel(bob, chanPoint)
select {
case <-msgChan:
case err := <-errChan:
ht.t.Fatalf("Received error from WS: %v", err)
ht.Fatalf("Received error from WS: %v", err)
case <-timeout:
ht.t.Fatalf("Timeout before message was received")
ht.Fatalf("Timeout before message was received")
}
}
}
func wsTestPingPongTimeout(ht *harnessTest, net *lntest.NetworkHarness) {
func wsTestPingPongTimeout(ht *lntemp.HarnessTest) {
initialRequest := &lnrpc.InvoiceSubscription{
AddIndex: 1, SettleIndex: 1,
}
@ -559,25 +554,26 @@ func wsTestPingPongTimeout(ht *harnessTest, net *lntest.NetworkHarness) {
// This time we send the macaroon in the special header
// Sec-Websocket-Protocol which is the only header field available to
// browsers when opening a WebSocket.
mac, err := net.Alice.ReadMacaroon(
net.Alice.AdminMacPath(), defaultTimeout,
alice := ht.Alice
mac, err := alice.ReadMacaroon(
alice.Cfg.AdminMacPath, defaultTimeout,
)
require.NoError(ht.t, err, "read admin mac")
require.NoError(ht, err, "read admin mac")
macBytes, err := mac.MarshalBinary()
require.NoError(ht.t, err, "marshal admin mac")
require.NoError(ht, err, "marshal admin mac")
customHeader := make(http.Header)
customHeader.Set(lnrpc.HeaderWebSocketProtocol, fmt.Sprintf(
"Grpc-Metadata-Macaroon+%s", hex.EncodeToString(macBytes),
))
conn, err := openWebSocket(
net.Alice, url, "GET", initialRequest, customHeader,
alice, url, "GET", initialRequest, customHeader,
)
require.Nil(ht.t, err, "websocket")
require.Nil(ht, err, "websocket")
defer func() {
err := conn.WriteMessage(websocket.CloseMessage, closeMsg)
_ = conn.Close()
require.NoError(ht.t, err)
require.NoError(ht, err)
}()
// We want to be able to read invoices for a long time, making sure we
@ -650,27 +646,26 @@ func wsTestPingPongTimeout(ht *harnessTest, net *lntest.NetworkHarness) {
// Let's create five invoices and wait for them to arrive. We'll wait
// for at least one ping/pong cycle between each invoice.
ctxb := context.Background()
const numInvoices = 5
const value = 123
const memo = "websocket"
for i := 0; i < numInvoices; i++ {
_, err := net.Alice.AddInvoice(ctxb, &lnrpc.Invoice{
invoice := &lnrpc.Invoice{
Value: value,
Memo: memo,
})
require.NoError(ht.t, err)
}
alice.RPC.AddInvoice(invoice)
select {
case streamMsg := <-invoices:
require.Equal(ht.t, int64(value), streamMsg.Value)
require.Equal(ht.t, memo, streamMsg.Memo)
require.Equal(ht, int64(value), streamMsg.Value)
require.Equal(ht, memo, streamMsg.Memo)
case err := <-errChan:
require.Fail(ht.t, "Error reading invoice: %v", err)
require.Fail(ht, "Error reading invoice: %v", err)
case <-timeout:
require.Fail(ht.t, "No invoice msg received in time")
require.Fail(ht, "No invoice msg received in time")
}
// Let's wait for at least a whole ping/pong cycle to happen, so
@ -683,7 +678,7 @@ func wsTestPingPongTimeout(ht *harnessTest, net *lntest.NetworkHarness) {
// invokeGET calls the given URL with the GET method and appropriate macaroon
// header fields then tries to unmarshal the response into the given response
// proto message.
func invokeGET(node *lntest.HarnessNode, url string, resp proto.Message) error {
func invokeGET(node *node.HarnessNode, url string, resp proto.Message) error {
_, rawResp, err := makeRequest(node, url, "GET", nil, nil)
if err != nil {
return err
@ -695,7 +690,7 @@ func invokeGET(node *lntest.HarnessNode, url string, resp proto.Message) error {
// invokePOST calls the given URL with the POST method, request body and
// appropriate macaroon header fields then tries to unmarshal the response into
// the given response proto message.
func invokePOST(node *lntest.HarnessNode, url string, req,
func invokePOST(node *node.HarnessNode, url string, req,
resp proto.Message) error {
// Marshal the request to JSON using the jsonpb marshaler to get correct
@ -715,7 +710,7 @@ func invokePOST(node *lntest.HarnessNode, url string, req,
// makeRequest calls the given URL with the given method, request body and
// appropriate macaroon header fields and returns the raw response body.
func makeRequest(node *lntest.HarnessNode, url, method string,
func makeRequest(node *node.HarnessNode, url, method string,
request io.Reader, additionalHeaders http.Header) (http.Header, []byte,
error) {
@ -748,7 +743,7 @@ func makeRequest(node *lntest.HarnessNode, url, method string,
// openWebSocket opens a new WebSocket connection to the given URL with the
// appropriate macaroon headers and sends the request message over the socket.
func openWebSocket(node *lntest.HarnessNode, url, method string,
func openWebSocket(node *node.HarnessNode, url, method string,
req proto.Message, customHeader http.Header) (*websocket.Conn, error) {
// Prepare our macaroon headers and assemble the full URL from the
@ -785,8 +780,8 @@ func openWebSocket(node *lntest.HarnessNode, url, method string,
// addAdminMacaroon reads the admin macaroon from the node and appends it to
// the HTTP header fields.
func addAdminMacaroon(node *lntest.HarnessNode, header http.Header) error {
mac, err := node.ReadMacaroon(node.AdminMacPath(), defaultTimeout)
func addAdminMacaroon(node *node.HarnessNode, header http.Header) error {
mac, err := node.ReadMacaroon(node.Cfg.AdminMacPath, defaultTimeout)
if err != nil {
return err
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -8,7 +8,8 @@ import (
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/macaroons"
"github.com/lightningnetwork/lnd/zpay32"
"github.com/stretchr/testify/require"
@ -18,133 +19,149 @@ import (
// testRPCMiddlewareInterceptor tests that the RPC middleware interceptor can
// be used correctly and in a safe way.
func testRPCMiddlewareInterceptor(net *lntest.NetworkHarness, t *harnessTest) {
func testRPCMiddlewareInterceptor(ht *lntemp.HarnessTest) {
// Let's first enable the middleware interceptor.
net.Alice.Cfg.ExtraArgs = append(
net.Alice.Cfg.ExtraArgs, "--rpcmiddleware.enable",
)
err := net.RestartNode(net.Alice, nil)
require.NoError(t.t, err)
//
// NOTE: we cannot use standby nodes here as the test messes with
// middleware interceptor. Thus we also skip the calling of cleanup of
// each of the following subtests because no standby nodes are used.
alice := ht.NewNode("alice", []string{"--rpcmiddleware.enable"})
bob := ht.NewNode("bob", nil)
// Let's set up a channel between Alice and Bob, just to get some useful
// data to inspect when doing RPC calls to Alice later.
net.EnsureConnected(t.t, net.Alice, net.Bob)
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, net.Alice)
_ = openChannelAndAssert(
t, net, net.Alice, net.Bob, lntest.OpenChannelParams{
Amt: 1_234_567,
},
)
ht.EnsureConnected(alice, bob)
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
ht.OpenChannel(alice, bob, lntemp.OpenChannelParams{Amt: 1_234_567})
// Load or bake the macaroons that the simulated users will use to
// access the RPC.
readonlyMac, err := net.Alice.ReadMacaroon(
net.Alice.ReadMacPath(), defaultTimeout,
readonlyMac, err := alice.ReadMacaroon(
alice.Cfg.ReadMacPath, defaultTimeout,
)
require.NoError(t.t, err)
adminMac, err := net.Alice.ReadMacaroon(
net.Alice.AdminMacPath(), defaultTimeout,
require.NoError(ht, err)
adminMac, err := alice.ReadMacaroon(
alice.Cfg.AdminMacPath, defaultTimeout,
)
require.NoError(t.t, err)
require.NoError(ht, err)
customCaveatReadonlyMac, err := macaroons.SafeCopyMacaroon(readonlyMac)
require.NoError(t.t, err)
require.NoError(ht, err)
addConstraint := macaroons.CustomConstraint(
"itest-caveat", "itest-value",
)
require.NoError(t.t, addConstraint(customCaveatReadonlyMac))
require.NoError(ht, addConstraint(customCaveatReadonlyMac))
customCaveatAdminMac, err := macaroons.SafeCopyMacaroon(adminMac)
require.NoError(t.t, err)
require.NoError(t.t, addConstraint(customCaveatAdminMac))
require.NoError(ht, err)
require.NoError(ht, addConstraint(customCaveatAdminMac))
// Run all sub-tests now. We can't run anything in parallel because that
// would cause the main test function to exit and the nodes being
// cleaned up.
t.t.Run("registration restrictions", func(tt *testing.T) {
middlewareRegistrationRestrictionTests(tt, net.Alice)
ht.Run("registration restrictions", func(tt *testing.T) {
middlewareRegistrationRestrictionTests(tt, alice)
})
t.t.Run("read-only intercept", func(tt *testing.T) {
ht.Run("read-only intercept", func(tt *testing.T) {
registration := registerMiddleware(
tt, net.Alice, &lnrpc.MiddlewareRegistration{
MiddlewareName: "itest-interceptor",
tt, alice, &lnrpc.MiddlewareRegistration{
MiddlewareName: "itest-interceptor-1",
ReadOnlyMode: true,
}, true,
)
defer registration.cancel()
middlewareInterceptionTest(
tt, net.Alice, net.Bob, registration, readonlyMac,
tt, alice, bob, registration, readonlyMac,
customCaveatReadonlyMac, true,
)
})
// We've manually disconnected Bob from Alice in the previous test, make
// sure they're connected again.
net.EnsureConnected(t.t, net.Alice, net.Bob)
t.t.Run("encumbered macaroon intercept", func(tt *testing.T) {
//
// NOTE: we may get an error here saying "interceptor RPC client quit"
// as it takes some time for the interceptor to fully quit. Thus we
// restart the node here to make sure the old interceptor is removed
// from registration.
ht.RestartNode(alice)
ht.EnsureConnected(alice, bob)
ht.Run("encumbered macaroon intercept", func(tt *testing.T) {
registration := registerMiddleware(
tt, net.Alice, &lnrpc.MiddlewareRegistration{
MiddlewareName: "itest-interceptor",
tt, alice, &lnrpc.MiddlewareRegistration{
MiddlewareName: "itest-interceptor-2",
CustomMacaroonCaveatName: "itest-caveat",
}, true,
)
defer registration.cancel()
middlewareInterceptionTest(
tt, net.Alice, net.Bob, registration,
tt, alice, bob, registration,
customCaveatReadonlyMac, readonlyMac, false,
)
})
// Next, run the response manipulation tests.
net.EnsureConnected(t.t, net.Alice, net.Bob)
t.t.Run("read-only not allowed to manipulate", func(tt *testing.T) {
//
// NOTE: we may get an error here saying "interceptor RPC client quit"
// as it takes some time for the interceptor to fully quit. Thus we
// restart the node here to make sure the old interceptor is removed
// from registration.
ht.RestartNode(alice)
ht.EnsureConnected(alice, bob)
ht.Run("read-only not allowed to manipulate", func(tt *testing.T) {
registration := registerMiddleware(
tt, net.Alice, &lnrpc.MiddlewareRegistration{
MiddlewareName: "itest-interceptor",
tt, alice, &lnrpc.MiddlewareRegistration{
MiddlewareName: "itest-interceptor-3",
ReadOnlyMode: true,
}, true,
)
defer registration.cancel()
middlewareRequestManipulationTest(
tt, net.Alice, registration, adminMac, true,
tt, alice, registration, adminMac, true,
)
middlewareResponseManipulationTest(
tt, net.Alice, net.Bob, registration, readonlyMac, true,
tt, alice, bob, registration, readonlyMac, true,
)
})
net.EnsureConnected(t.t, net.Alice, net.Bob)
t.t.Run("encumbered macaroon manipulate", func(tt *testing.T) {
// NOTE: we may get an error here saying "interceptor RPC client quit"
// as it takes some time for the interceptor to fully quit. Thus we
// restart the node here to make sure the old interceptor is removed
// from registration.
ht.RestartNode(alice)
ht.EnsureConnected(alice, bob)
ht.Run("encumbered macaroon manipulate", func(tt *testing.T) {
registration := registerMiddleware(
tt, net.Alice, &lnrpc.MiddlewareRegistration{
MiddlewareName: "itest-interceptor",
tt, alice, &lnrpc.MiddlewareRegistration{
MiddlewareName: "itest-interceptor-4",
CustomMacaroonCaveatName: "itest-caveat",
}, true,
)
defer registration.cancel()
middlewareRequestManipulationTest(
tt, net.Alice, registration, customCaveatAdminMac,
false,
tt, alice, registration, customCaveatAdminMac, false,
)
middlewareResponseManipulationTest(
tt, net.Alice, net.Bob, registration,
tt, alice, bob, registration,
customCaveatReadonlyMac, false,
)
})
// And finally make sure mandatory middleware is always checked for any
// RPC request.
t.t.Run("mandatory middleware", func(tt *testing.T) {
middlewareMandatoryTest(tt, net.Alice, net)
ht.Run("mandatory middleware", func(tt *testing.T) {
st := ht.Subtest(tt)
middlewareMandatoryTest(st, alice)
})
}
// middlewareRegistrationRestrictionTests tests all restrictions that apply to
// registering a middleware.
func middlewareRegistrationRestrictionTests(t *testing.T,
node *lntest.HarnessNode) {
node *node.HarnessNode) {
testCases := []struct {
registration *lnrpc.MiddlewareRegistration
@ -189,10 +206,12 @@ func middlewareRegistrationRestrictionTests(t *testing.T,
// intercepted. It also makes sure that depending on the mode (read-only or
// custom macaroon caveat) a middleware only gets access to the requests it
// should be allowed access to.
func middlewareInterceptionTest(t *testing.T, node *lntest.HarnessNode,
peer *lntest.HarnessNode, registration *middlewareHarness,
userMac *macaroon.Macaroon, disallowedMac *macaroon.Macaroon,
readOnly bool) {
func middlewareInterceptionTest(t *testing.T,
node, peer *node.HarnessNode, registration *middlewareHarness,
userMac *macaroon.Macaroon,
disallowedMac *macaroon.Macaroon, readOnly bool) {
t.Helper()
// Everything we test here should be executed in a matter of
// milliseconds, so we can use one single timeout context for all calls.
@ -253,10 +272,7 @@ func middlewareInterceptionTest(t *testing.T, node *lntest.HarnessNode,
// Disconnect Bob to trigger a peer event without using Alice's RPC
// interface itself.
_, err = peer.DisconnectPeer(ctxc, &lnrpc.DisconnectPeerRequest{
PubKey: node.PubKeyStr,
})
require.NoError(t, err)
peer.RPC.DisconnectPeer(node.PubKeyStr)
peerEvent, err := resp2.Recv()
require.NoError(t, err)
require.Equal(t, lnrpc.PeerEvent_PEER_OFFLINE, peerEvent.GetType())
@ -330,10 +346,12 @@ func middlewareInterceptionTest(t *testing.T, node *lntest.HarnessNode,
// middlewareResponseManipulationTest tests that unary and streaming responses
// can be intercepted and also manipulated, at least if the middleware didn't
// register for read-only access.
func middlewareResponseManipulationTest(t *testing.T, node *lntest.HarnessNode,
peer *lntest.HarnessNode, registration *middlewareHarness,
func middlewareResponseManipulationTest(t *testing.T,
node, peer *node.HarnessNode, registration *middlewareHarness,
userMac *macaroon.Macaroon, readOnly bool) {
t.Helper()
// Everything we test here should be executed in a matter of
// milliseconds, so we can use one single timeout context for all calls.
ctxb := context.Background()
@ -421,10 +439,7 @@ func middlewareResponseManipulationTest(t *testing.T, node *lntest.HarnessNode,
// Disconnect Bob to trigger a peer event without using Alice's RPC
// interface itself.
_, err = peer.DisconnectPeer(ctxc, &lnrpc.DisconnectPeerRequest{
PubKey: node.PubKeyStr,
})
require.NoError(t, err)
peer.RPC.DisconnectPeer(node.PubKeyStr)
peerEvent, err := resp2.Recv()
require.NoError(t, err)
@ -448,10 +463,12 @@ func middlewareResponseManipulationTest(t *testing.T, node *lntest.HarnessNode,
// middlewareRequestManipulationTest tests that unary and streaming requests
// can be intercepted and also manipulated, at least if the middleware didn't
// register for read-only access.
func middlewareRequestManipulationTest(t *testing.T, node *lntest.HarnessNode,
func middlewareRequestManipulationTest(t *testing.T, node *node.HarnessNode,
registration *middlewareHarness, userMac *macaroon.Macaroon,
readOnly bool) {
t.Helper()
// Everything we test here should be executed in a matter of
// milliseconds, so we can use one single timeout context for all calls.
ctxb := context.Background()
@ -528,54 +545,44 @@ func middlewareRequestManipulationTest(t *testing.T, node *lntest.HarnessNode,
// middlewareMandatoryTest tests that all RPC requests are blocked if there is
// a mandatory middleware declared that's currently not registered.
func middlewareMandatoryTest(t *testing.T, node *lntest.HarnessNode,
net *lntest.NetworkHarness) {
func middlewareMandatoryTest(ht *lntemp.HarnessTest, node *node.HarnessNode) {
// Let's declare our itest interceptor as mandatory but don't register
// it just yet. That should cause all RPC requests to fail, except for
// the registration itself.
node.Cfg.ExtraArgs = append(
node.Cfg.ExtraArgs,
node.Cfg.SkipUnlock = true
ht.RestartNodeWithExtraArgs(node, []string{
"--noseedbackup", "--rpcmiddleware.enable",
"--rpcmiddleware.addmandatory=itest-interceptor",
)
err := net.RestartNodeNoUnlock(node, nil, false)
require.NoError(t, err)
})
// The "wait for node to start" flag of the above restart does too much
// and has a call to GetInfo built in, which will fail in this special
// test case. So we need to do the wait and client setup manually here.
conn, err := node.ConnectRPC(true)
require.NoError(t, err)
conn, err := node.ConnectRPC()
require.NoError(ht, err)
node.InitRPCClients(conn)
err = node.WaitUntilStateReached(lnrpc.WalletState_RPC_ACTIVE)
require.NoError(t, err)
node.LightningClient = lnrpc.NewLightningClient(conn)
err = node.WaitUntilServerActive()
require.NoError(ht, err)
ctxb := context.Background()
ctxc, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
// Test a unary request first.
_, err = node.ListChannels(ctxc, &lnrpc.ListChannelsRequest{})
require.Error(t, err)
require.Contains(
t, err.Error(), "middleware 'itest-interceptor' is "+
"currently not registered",
)
_, err = node.RPC.LN.ListChannels(ctxc, &lnrpc.ListChannelsRequest{})
require.Contains(ht, err.Error(), "middleware 'itest-interceptor' is "+
"currently not registered")
// Then a streaming one.
stream, err := node.SubscribeInvoices(ctxc, &lnrpc.InvoiceSubscription{})
require.NoError(t, err)
stream := node.RPC.SubscribeInvoices(&lnrpc.InvoiceSubscription{})
_, err = stream.Recv()
require.Error(t, err)
require.Contains(
t, err.Error(), "middleware 'itest-interceptor' is "+
"currently not registered",
)
require.Error(ht, err)
require.Contains(ht, err.Error(), "middleware 'itest-interceptor' is "+
"currently not registered")
// Now let's register the middleware and try again.
registration := registerMiddleware(
t, node, &lnrpc.MiddlewareRegistration{
ht.T, node, &lnrpc.MiddlewareRegistration{
MiddlewareName: "itest-interceptor",
CustomMacaroonCaveatName: "itest-caveat",
}, true,
@ -584,16 +591,13 @@ func middlewareMandatoryTest(t *testing.T, node *lntest.HarnessNode,
// Both the unary and streaming requests should now be allowed.
time.Sleep(500 * time.Millisecond)
_, err = node.ListChannels(ctxc, &lnrpc.ListChannelsRequest{})
require.NoError(t, err)
_, err = node.SubscribeInvoices(ctxc, &lnrpc.InvoiceSubscription{})
require.NoError(t, err)
node.RPC.ListChannels(&lnrpc.ListChannelsRequest{})
node.RPC.SubscribeInvoices(&lnrpc.InvoiceSubscription{})
// We now shut down the node manually to prevent the test from failing
// because we can't call the stop RPC if we unregister the middleware in
// the defer statement above.
err = net.ShutdownNode(node)
require.NoError(t, err)
// because we can't call the stop RPC if we unregister the middleware
// in the defer statement above.
ht.KillNode(node)
}
// assertInterceptedType makes sure that the intercept message sent by the RPC
@ -648,35 +652,62 @@ type middlewareHarness struct {
// registerMiddleware creates a new middleware harness and sends the initial
// register message to the RPC server.
func registerMiddleware(t *testing.T, node *lntest.HarnessNode,
func registerMiddleware(t *testing.T, node *node.HarnessNode,
registration *lnrpc.MiddlewareRegistration,
waitForRegister bool) *middlewareHarness {
ctxc, cancel := context.WithCancel(context.Background())
t.Helper()
middlewareStream, err := node.RegisterRPCMiddleware(ctxc)
require.NoError(t, err)
middlewareStream, cancel := node.RPC.RegisterRPCMiddleware()
err = middlewareStream.Send(&lnrpc.RPCMiddlewareResponse{
MiddlewareMessage: &lnrpc.RPCMiddlewareResponse_Register{
errChan := make(chan error)
go func() {
msg := &lnrpc.RPCMiddlewareResponse_Register{
Register: registration,
},
}
err := middlewareStream.Send(&lnrpc.RPCMiddlewareResponse{
MiddlewareMessage: msg,
})
require.NoError(t, err)
if waitForRegister {
// Wait for the registration complete message.
regCompleteMsg, err := middlewareStream.Recv()
require.NoError(t, err)
require.True(t, regCompleteMsg.GetRegComplete())
errChan <- err
}()
select {
case <-time.After(defaultTimeout):
require.Fail(t, "registerMiddleware send timeout")
case err := <-errChan:
require.NoError(t, err, "registerMiddleware send failed")
}
return &middlewareHarness{
mh := &middlewareHarness{
t: t,
cancel: cancel,
stream: middlewareStream,
responsesChan: make(chan *lnrpc.RPCMessage),
}
if !waitForRegister {
return mh
}
// Wait for the registration complete message.
msg := make(chan *lnrpc.RPCMiddlewareRequest)
go func() {
regCompleteMsg, err := middlewareStream.Recv()
require.NoError(t, err, "registerMiddleware recv failed")
msg <- regCompleteMsg
}()
select {
case <-time.After(defaultTimeout):
require.Fail(t, "registerMiddleware recv timeout")
case m := <-msg:
require.True(t, m.GetRegComplete())
}
return mh
}
// interceptUnary intercepts a unary call, optionally requesting to replace the

View file

@ -2,35 +2,34 @@ package itest
import (
"bytes"
"context"
"encoding/hex"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/record"
"github.com/stretchr/testify/require"
)
func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
// Open a channel with 100k satoshis between Alice and Bob with Alice being
// the sole funder of the channel.
func testSingleHopInvoice(ht *lntemp.HarnessTest) {
// Open a channel with 100k satoshis between Alice and Bob with Alice
// being the sole funder of the channel.
chanAmt := btcutil.Amount(100000)
chanPoint := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
Amt: chanAmt,
},
alice, bob := ht.Alice, ht.Bob
cp := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt},
)
// assertAmountPaid is a helper closure that asserts the amount paid by
// Alice and received by Bob are expected.
assertAmountPaid := func(expected int64) {
ht.AssertAmountPaid("alice -> bob", alice, cp, expected, 0)
ht.AssertAmountPaid("bob <- alice", bob, cp, 0, expected)
}
// Now that the channel is open, create an invoice for Bob which
// expects a payment of 1000 satoshis from Alice paid via a particular
// preimage.
@ -41,61 +40,20 @@ func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) {
RPreimage: preimage,
Value: paymentAmt,
}
invoiceResp, err := net.Bob.AddInvoice(ctxb, invoice)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
// Wait for Alice to recognize and advertise the new channel generated
// above.
err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
if err != nil {
t.Fatalf("alice didn't advertise channel before "+
"timeout: %v", err)
}
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
if err != nil {
t.Fatalf("bob didn't advertise channel before "+
"timeout: %v", err)
}
invoiceResp := bob.RPC.AddInvoice(invoice)
// With the invoice for Bob added, send a payment towards Alice paying
// to the above generated invoice.
resp := sendAndAssertSuccess(
t, net.Alice, &routerrpc.SendPaymentRequest{
PaymentRequest: invoiceResp.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
if hex.EncodeToString(preimage) != resp.PaymentPreimage {
t.Fatalf("preimage mismatch: expected %v, got %v", preimage,
resp.PaymentPreimage)
}
ht.CompletePaymentRequests(alice, []string{invoiceResp.PaymentRequest})
// Bob's invoice should now be found and marked as settled.
payHash := &lnrpc.PaymentHash{
RHash: invoiceResp.RHash,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
dbInvoice, err := net.Bob.LookupInvoice(ctxt, payHash)
if err != nil {
t.Fatalf("unable to lookup invoice: %v", err)
}
if !dbInvoice.Settled { // nolint:staticcheck
t.Fatalf("bob's invoice should be marked as settled: %v",
spew.Sdump(dbInvoice))
}
dbInvoice := bob.RPC.LookupInvoice(invoiceResp.RHash)
require.Equal(ht, lnrpc.Invoice_SETTLED, dbInvoice.State,
"bob's invoice should be marked as settled")
// With the payment completed all balance related stats should be
// properly updated.
err = wait.NoError(
assertAmountSent(paymentAmt, net.Alice, net.Bob),
3*time.Second,
)
if err != nil {
t.Fatalf(err.Error())
}
assertAmountPaid(paymentAmt)
// Create another invoice for Bob, this time leaving off the preimage
// to one will be randomly generated. We'll test the proper
@ -104,39 +62,22 @@ func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) {
Memo: "test3",
Value: paymentAmt,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
invoiceResp, err = net.Bob.AddInvoice(ctxt, invoice)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
invoiceResp = bob.RPC.AddInvoice(invoice)
// Next send another payment, but this time using a zpay32 encoded
// invoice rather than manually specifying the payment details.
sendAndAssertSuccess(
t, net.Alice, &routerrpc.SendPaymentRequest{
PaymentRequest: invoiceResp.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
ht.CompletePaymentRequests(alice, []string{invoiceResp.PaymentRequest})
// The second payment should also have succeeded, with the balances
// being update accordingly.
err = wait.NoError(
assertAmountSent(2*paymentAmt, net.Alice, net.Bob),
3*time.Second,
)
if err != nil {
t.Fatalf(err.Error())
}
assertAmountPaid(paymentAmt * 2)
// Next send a keysend payment.
keySendPreimage := lntypes.Preimage{3, 4, 5, 11}
keySendHash := keySendPreimage.Hash()
sendAndAssertSuccess(
t, net.Alice, &routerrpc.SendPaymentRequest{
Dest: net.Bob.PubKey[:],
req := &routerrpc.SendPaymentRequest{
Dest: bob.PubKey[:],
Amt: paymentAmt,
FinalCltvDelta: 40,
PaymentHash: keySendHash[:],
@ -145,51 +86,34 @@ func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) {
},
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
}
ht.SendPaymentAssertSettled(alice, req)
// The keysend payment should also have succeeded, with the balances
// being update accordingly.
err = wait.NoError(
assertAmountSent(3*paymentAmt, net.Alice, net.Bob),
3*time.Second,
)
if err != nil {
t.Fatalf(err.Error())
}
assertAmountPaid(paymentAmt * 3)
// Assert that the invoice has the proper AMP fields set, since the
// legacy keysend payment should have been promoted into an AMP payment
// internally.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
keysendInvoice, err := net.Bob.LookupInvoice(
ctxt, &lnrpc.PaymentHash{
RHash: keySendHash[:],
},
)
require.NoError(t.t, err)
require.Equal(t.t, 1, len(keysendInvoice.Htlcs))
keysendInvoice := bob.RPC.LookupInvoice(keySendHash[:])
require.Len(ht, keysendInvoice.Htlcs, 1)
htlc := keysendInvoice.Htlcs[0]
require.Equal(t.t, uint64(0), htlc.MppTotalAmtMsat)
require.Nil(t.t, htlc.Amp)
require.Zero(ht, htlc.MppTotalAmtMsat)
require.Nil(ht, htlc.Amp)
// Now create an invoice and specify routing hints.
// We will test that the routing hints are encoded properly.
hintChannel := lnwire.ShortChannelID{BlockHeight: 10}
bobPubKey := hex.EncodeToString(net.Bob.PubKey[:])
hints := []*lnrpc.RouteHint{
{
HopHints: []*lnrpc.HopHint{
{
bobPubKey := hex.EncodeToString(bob.PubKey[:])
hint := &lnrpc.HopHint{
NodeId: bobPubKey,
ChanId: hintChannel.ToUint64(),
FeeBaseMsat: 1,
FeeProportionalMillionths: 1000000,
CltvExpiryDelta: 20,
},
},
},
}
hints := []*lnrpc.RouteHint{{HopHints: []*lnrpc.HopHint{hint}}}
invoice = &lnrpc.Invoice{
Memo: "hints",
@ -197,43 +121,21 @@ func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) {
RouteHints: hints,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
invoiceResp, err = net.Bob.AddInvoice(ctxt, invoice)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
payreq, err := net.Bob.DecodePayReq(ctxt, &lnrpc.PayReqString{PayReq: invoiceResp.PaymentRequest})
if err != nil {
t.Fatalf("failed to decode payment request %v", err)
}
if len(payreq.RouteHints) != 1 {
t.Fatalf("expected one routing hint")
}
invoiceResp = bob.RPC.AddInvoice(invoice)
payreq := bob.RPC.DecodePayReq(invoiceResp.PaymentRequest)
require.Len(ht, payreq.RouteHints, 1, "expected one routing hint")
routingHint := payreq.RouteHints[0]
if len(routingHint.HopHints) != 1 {
t.Fatalf("expected one hop hint")
}
hopHint := routingHint.HopHints[0]
if hopHint.FeeProportionalMillionths != 1000000 {
t.Fatalf("wrong FeeProportionalMillionths %v",
hopHint.FeeProportionalMillionths)
}
if hopHint.NodeId != bobPubKey {
t.Fatalf("wrong NodeId %v",
hopHint.NodeId)
}
if hopHint.ChanId != hintChannel.ToUint64() {
t.Fatalf("wrong ChanId %v",
hopHint.ChanId)
}
if hopHint.FeeBaseMsat != 1 {
t.Fatalf("wrong FeeBaseMsat %v",
hopHint.FeeBaseMsat)
}
if hopHint.CltvExpiryDelta != 20 {
t.Fatalf("wrong CltvExpiryDelta %v",
hopHint.CltvExpiryDelta)
}
require.Len(ht, routingHint.HopHints, 1, "expected one hop hint")
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
hopHint := routingHint.HopHints[0]
require.EqualValues(ht, 1000000, hopHint.FeeProportionalMillionths,
"wrong FeeProportionalMillionths")
require.Equal(ht, bobPubKey, hopHint.NodeId, "wrong NodeId")
require.Equal(ht, hintChannel.ToUint64(), hopHint.ChanId,
"wrong ChanId")
require.EqualValues(ht, 1, hopHint.FeeBaseMsat, "wrong FeeBaseMsat")
require.EqualValues(ht, 20, hopHint.CltvExpiryDelta,
"wrong CltvExpiryDelta")
ht.CloseChannel(alice, cp)
}

View file

@ -4,50 +4,10 @@
package itest
var allTestCases = []*testCase{
{
name: "open channel reorg test",
test: testOpenChannelAfterReorg,
},
{
name: "single hop invoice",
test: testSingleHopInvoice,
},
{
name: "multi-hop payments",
test: testMultiHopPayments,
},
{
name: "single-hop send to route",
test: testSingleHopSendToRoute,
},
{
name: "multi-hop send to route",
test: testMultiHopSendToRoute,
},
{
name: "send to route error propagation",
test: testSendToRouteErrorPropagation,
},
{
name: "private channels",
test: testPrivateChannels,
},
{
name: "invoice routing hints",
test: testInvoiceRoutingHints,
},
{
name: "multi-hop payments over private channels",
test: testMultiHopOverPrivateChannels,
},
{
name: "multiple channel creation and update subscription",
test: testBasicChannelCreationAndUpdates,
},
{
name: "multi-hop htlc error propagation",
test: testHtlcErrorPropagation,
},
{
name: "derive shared key",
test: testDeriveSharedKey,
@ -84,76 +44,18 @@ var allTestCases = []*testCase{
name: "switch offline delivery outgoing offline",
test: testSwitchOfflineDeliveryOutgoingOffline,
},
{
// TODO(roasbeef): test always needs to be last as Bob's state
// is borked since we trick him into attempting to cheat Alice?
name: "revoked uncooperative close retribution",
test: testRevokedCloseRetribution,
},
{
name: "revoked uncooperative close retribution zero value remote output",
test: testRevokedCloseRetributionZeroValueRemoteOutput,
},
{
name: "revoked uncooperative close retribution remote hodl",
test: testRevokedCloseRetributionRemoteHodl,
},
{
name: "revoked uncooperative close retribution altruist watchtower",
test: testRevokedCloseRetributionAltruistWatchtower,
},
{
name: "query routes",
test: testQueryRoutes,
},
{
name: "route fee cutoff",
test: testRouteFeeCutoff,
},
{
name: "hold invoice sender persistence",
test: testHoldInvoicePersistence,
},
{
name: "hold invoice force close",
test: testHoldInvoiceForceClose,
},
{
name: "cpfp",
test: testCPFP,
},
{
name: "anchors reserved value",
test: testAnchorReservedValue,
},
{
name: "macaroon authentication",
test: testMacaroonAuthentication,
},
{
name: "bake macaroon",
test: testBakeMacaroon,
},
{
name: "delete macaroon id",
test: testDeleteMacaroonID,
},
{
name: "psbt channel funding",
test: testPsbtChanFunding,
},
{
name: "psbt channel funding external",
test: testPsbtChanFundingExternal,
},
{
name: "sign psbt",
test: testSignPsbt,
},
{
name: "psbt channel funding single step",
test: testPsbtChanFundingSingleStep,
},
{
name: "sendtoroute multi path payment",
test: testSendToRouteMultiPath,
@ -178,10 +80,6 @@ var allTestCases = []*testCase{
name: "send multi path payment",
test: testSendMultiPathPayment,
},
{
name: "REST API",
test: testRestAPI,
},
{
name: "forward interceptor",
test: testForwardInterceptorBasic,
@ -190,18 +88,6 @@ var allTestCases = []*testCase{
name: "forward interceptor dedup htlcs",
test: testForwardInterceptorDedupHtlc,
},
{
name: "wumbo channels",
test: testWumboChannels,
},
{
name: "maximum channel size",
test: testMaxChannelSize,
},
{
name: "stateless init",
test: testStatelessInit,
},
{
name: "wallet import account",
test: testWalletImportAccount,
@ -210,38 +96,14 @@ var allTestCases = []*testCase{
name: "wallet import pubkey",
test: testWalletImportPubKey,
},
{
name: "etcd_failover",
test: testEtcdFailover,
},
{
name: "max htlc pathfind",
test: testMaxHtlcPathfind,
},
{
name: "rpc middleware interceptor",
test: testRPCMiddlewareInterceptor,
},
{
name: "wipe forwarding packages",
test: testWipeForwardingPackages,
},
{
name: "remote signer",
test: testRemoteSigner,
},
{
name: "3rd party anchor spend",
test: testAnchorThirdPartySpend,
},
{
name: "taproot",
test: testTaproot,
},
{
name: "resolution handoff",
test: testResHandoff,
},
{
name: "zero conf channel open",
test: testZeroConfChannelOpen,

View file

@ -1,14 +1,11 @@
package itest
import (
"context"
"testing"
"time"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/stretchr/testify/require"
)
@ -22,91 +19,9 @@ type pendingChan *lnrpc.PendingChannelsResponse_PendingChannel
// - Bob force closes the channel Alice->Bob, and checks from both Bob's
// PoV(local force close) and Alice's Pov(remote close) that the forwarding
// packages are wiped.
// - Bob coop closes the channel Bob->Carol, and checks from both Bob PoVs that
// the forwarding packages are wiped.
func testWipeForwardingPackages(net *lntest.NetworkHarness,
t *harnessTest) {
// Setup the test and get the channel points.
pointAB, pointBC, carol, cleanUp := setupFwdPkgTest(net, t)
defer cleanUp()
// Firstly, Bob force closes the channel.
_, _, err := net.CloseChannel(net.Bob, pointAB, true)
require.NoError(t.t, err, "unable to force close channel")
// Now that the channel has been force closed, it should show up in
// bob's PendingChannels RPC under the waiting close section.
pendingChan := assertWaitingCloseChannel(t.t, net.Bob)
// Check that Bob has created forwarding packages. We don't care the
// exact number here as long as these packages are deleted when the
// channel is closed.
require.NotZero(t.t, pendingChan.NumForwardingPackages)
// Mine 1 block to get the closing transaction confirmed.
_, err = net.Miner.Client.Generate(1)
require.NoError(t.t, err, "unable to mine blocks")
// Now that the closing transaction is confirmed, the above waiting
// close channel should now become pending force closed channel.
pendingChan = assertPendingForceClosedChannel(t.t, net.Bob)
// Check the forwarding packages are deleted.
require.Zero(t.t, pendingChan.NumForwardingPackages)
// For Alice, the forwarding packages should have been wiped too.
pendingChanAlice := assertPendingForceClosedChannel(t.t, net.Alice)
require.Zero(t.t, pendingChanAlice.NumForwardingPackages)
// Secondly, Bob coop closes the channel.
_, _, err = net.CloseChannel(net.Bob, pointBC, false)
require.NoError(t.t, err, "unable to coop close channel")
// Now that the channel has been coop closed, it should show up in
// bob's PendingChannels RPC under the waiting close section.
pendingChan = assertWaitingCloseChannel(t.t, net.Bob)
// Check that Bob has created forwarding packages. We don't care the
// exact number here as long as these packages are deleted when the
// channel is closed.
require.NotZero(t.t, pendingChan.NumForwardingPackages)
// Since it's a coop close, Carol should see the waiting close channel
// too.
pendingChanCarol := assertWaitingCloseChannel(t.t, carol)
require.NotZero(t.t, pendingChanCarol.NumForwardingPackages)
// Mine 1 block to get the closing transaction confirmed.
_, err = net.Miner.Client.Generate(1)
require.NoError(t.t, err, "unable to mine blocks")
// Now that the closing transaction is confirmed, the above waiting
// close channel should now become pending closed channel. Note that
// the name PendingForceClosingChannels is a bit confusing, what it
// really contains is channels whose closing tx has been broadcast.
pendingChan = assertPendingForceClosedChannel(t.t, net.Bob)
// Check the forwarding packages are deleted.
require.Zero(t.t, pendingChan.NumForwardingPackages)
// Mine a block to confirm sweep transactions such that they
// don't remain in the mempool for any subsequent tests.
_, err = net.Miner.Client.Generate(1)
require.NoError(t.t, err, "unable to mine blocks")
}
// setupFwdPkgTest prepares the wipe forwarding packages tests. It creates a
// network topology that has a channel direction: Alice -> Bob -> Carol, sends
// several payments from Alice to Carol, and returns the two channel points(one
// for Alice and Bob, the other for Bob and Carol), the node Carol, and a
// cleanup function to be used when the test finishes.
func setupFwdPkgTest(net *lntest.NetworkHarness,
t *harnessTest) (*lnrpc.ChannelPoint, *lnrpc.ChannelPoint,
*lntest.HarnessNode, func()) {
ctxb := context.Background()
// - Bob coop closes the channel Bob->Carol, and checks from both Bob PoVs
// that the forwarding packages are wiped.
func testWipeForwardingPackages(ht *lntemp.HarnessTest) {
const (
chanAmt = 10e6
paymentAmt = 10e4
@ -114,114 +29,96 @@ func setupFwdPkgTest(net *lntest.NetworkHarness,
numInvoices = 3
)
// Grab Alice and Bob from harness net.
alice, bob := net.Alice, net.Bob
// Grab Alice and Bob from HarnessTest.
alice, bob := ht.Alice, ht.Bob
// Create a new node Carol, which will create invoices that require
// Alice to pay.
carol := net.NewNode(t.t, "Carol", nil)
carol := ht.NewNode("Carol", nil)
// Connect Bob to Carol.
net.ConnectNodes(t.t, bob, carol)
ht.ConnectNodes(bob, carol)
// Open a channel between Alice and Bob.
chanPointAB := openChannelAndAssert(
t, net, alice, bob, lntest.OpenChannelParams{
Amt: chanAmt,
},
chanPointAB := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt},
)
// Open a channel between Bob and Carol.
chanPointBC := openChannelAndAssert(
t, net, bob, carol, lntest.OpenChannelParams{
Amt: chanAmt,
},
chanPointBC := ht.OpenChannel(
bob, carol, lntemp.OpenChannelParams{Amt: chanAmt},
)
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
// Before we continue, make sure Alice has seen the channel between Bob
// and Carol.
ht.AssertTopologyChannelOpen(alice, chanPointBC)
// Alice sends several payments to Carol through Bob, which triggers
// Bob to create forwarding packages.
for i := 0; i < numInvoices; i++ {
// Add an invoice for Carol.
invoice := &lnrpc.Invoice{Memo: "testing", Value: paymentAmt}
invoiceResp, err := carol.AddInvoice(ctxt, invoice)
require.NoError(t.t, err, "unable to add invoice")
resp := carol.RPC.AddInvoice(invoice)
// Alice sends a payment to Carol through Bob.
sendAndAssertSuccess(
t, net.Alice, &routerrpc.SendPaymentRequest{
PaymentRequest: invoiceResp.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitSat: noFeeLimitMsat,
},
)
ht.CompletePaymentRequests(alice, []string{resp.PaymentRequest})
}
return chanPointAB, chanPointBC, carol, func() {
shutdownAndAssert(net, t, alice)
shutdownAndAssert(net, t, bob)
shutdownAndAssert(net, t, carol)
}
}
// assertWaitingCloseChannel checks there is a single channel that is waiting
// for close and returns the channel found.
func assertWaitingCloseChannel(t *testing.T,
node *lntest.HarnessNode) pendingChan {
ctxb := context.Background()
var channel pendingChan
require.Eventually(t, func() bool {
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
req := &lnrpc.PendingChannelsRequest{}
resp, err := node.PendingChannels(ctxt, req)
// We require the RPC call to be succeeded and won't retry upon
// an error.
require.NoError(t, err, "unable to query for pending channels")
if err := checkNumWaitingCloseChannels(resp, 1); err != nil {
return false
}
channel = resp.WaitingCloseChannels[0].Channel
return true
}, defaultTimeout, 200*time.Millisecond)
return channel
}
// assertForceClosedChannel checks there is a single channel that is pending
// force closed and returns the channel found.
func assertPendingForceClosedChannel(t *testing.T,
node *lntest.HarnessNode) pendingChan {
ctxb := context.Background()
var channel pendingChan
require.Eventually(t, func() bool {
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
req := &lnrpc.PendingChannelsRequest{}
resp, err := node.PendingChannels(ctxt, req)
// We require the RPC call to be succeeded and won't retry upon
// an error.
require.NoError(t, err, "unable to query for pending channels")
if err := checkNumForceClosedChannels(resp, 1); err != nil {
return false
}
channel = resp.PendingForceClosingChannels[0].Channel
return true
}, defaultTimeout, 200*time.Millisecond)
return channel
// TODO(yy): remove the sleep once the following bug is fixed.
// When the invoice is reported settled, the commitment dance is not
// yet finished, which can cause an error when closing the channel,
// saying there's active HTLCs. We need to investigate this issue and
// reverse the order to, first finish the commitment dance, then report
// the invoice as settled.
time.Sleep(2 * time.Second)
// Firstly, Bob force closes the channel.
ht.CloseChannelAssertPending(bob, chanPointAB, true)
// Now that the channel has been force closed, it should show up in
// bob's PendingChannels RPC under the waiting close section.
pendingAB := ht.AssertChannelWaitingClose(bob, chanPointAB).Channel
// Check that Bob has created forwarding packages. We don't care the
// exact number here as long as these packages are deleted when the
// channel is closed.
require.NotZero(ht, pendingAB.NumForwardingPackages)
// Secondly, Bob coop closes the channel.
ht.CloseChannelAssertPending(bob, chanPointBC, false)
// Now that the channel has been coop closed, it should show up in
// bob's PendingChannels RPC under the waiting close section.
pendingBC := ht.AssertChannelWaitingClose(bob, chanPointBC).Channel
// Check that Bob has created forwarding packages. We don't care the
// exact number here as long as these packages are deleted when the
// channel is closed.
require.NotZero(ht, pendingBC.NumForwardingPackages)
// Since it's a coop close, Carol should see the waiting close channel
// too.
pendingBC = ht.AssertChannelWaitingClose(carol, chanPointBC).Channel
require.NotZero(ht, pendingBC.NumForwardingPackages)
// Mine 1 block to get the two closing transactions confirmed.
ht.MineBlocksAndAssertNumTxes(1, 2)
// Now that the closing transaction is confirmed, the above waiting
// close channel should now become pending force closed channel.
pendingAB = ht.AssertChannelPendingForceClose(bob, chanPointAB).Channel
// Check the forwarding pacakges are deleted.
require.Zero(ht, pendingAB.NumForwardingPackages)
// For Alice, the forwarding packages should have been wiped too.
pending := ht.AssertChannelPendingForceClose(alice, chanPointAB)
pendingAB = pending.Channel
require.Zero(ht, pendingAB.NumForwardingPackages)
// Mine 1 block to get Alice's sweeping tx confirmed.
ht.MineBlocksAndAssertNumTxes(1, 1)
// Clean up the force closed channel.
ht.CleanupForceClose(bob, chanPointAB)
}

View file

@ -1,70 +1,52 @@
package itest
import (
"strings"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/funding"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lnwallet"
)
// testWumboChannels tests that only a node that signals wumbo channel
// acceptances will allow a wumbo channel to be created. Additionally, if a
// node is running with mini channels only enabled, then they should reject any
// inbound wumbo channel requests.
func testWumboChannels(net *lntest.NetworkHarness, t *harnessTest) {
func testWumboChannels(ht *lntemp.HarnessTest) {
// With all the channel types exercised, we'll now make sure the wumbo
// signalling support works properly.
//
// We'll make two new nodes, with one of them signalling support for
// wumbo channels while the other doesn't.
wumboNode := net.NewNode(
t.t, "wumbo", []string{"--protocol.wumbo-channels"},
)
defer shutdownAndAssert(net, t, wumboNode)
miniNode := net.NewNode(t.t, "mini", nil)
defer shutdownAndAssert(net, t, miniNode)
wumboNode := ht.NewNode("wumbo", []string{"--protocol.wumbo-channels"})
miniNode := ht.NewNode("mini", nil)
// We'll send coins to the wumbo node, as it'll be the one imitating
// the channel funding.
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, wumboNode)
ht.FundCoins(btcutil.SatoshiPerBitcoin, wumboNode)
// Next we'll connect both nodes, then attempt to make a wumbo channel
// funding request to the mini node we created above. The wumbo request
// should fail as the node isn't advertising wumbo channels.
net.EnsureConnected(t.t, wumboNode, miniNode)
ht.EnsureConnected(wumboNode, miniNode)
chanAmt := funding.MaxBtcFundingAmount + 1
_, err := net.OpenChannel(
wumboNode, miniNode, lntest.OpenChannelParams{
Amt: chanAmt,
},
)
if err == nil {
t.Fatalf("expected wumbo channel funding to fail")
}
// The test should indicate a failure due to the channel being too
// large.
if !strings.Contains(err.Error(), "exceeds maximum chan size") {
t.Fatalf("channel should be rejected due to size, instead "+
"error was: %v", err)
}
ht.OpenChannelAssertErr(
wumboNode, miniNode, lntemp.OpenChannelParams{Amt: chanAmt},
lnwallet.ErrChanTooLarge(chanAmt, funding.MaxBtcFundingAmount),
)
// We'll now make another wumbo node to accept our wumbo channel
// funding.
wumboNode2 := net.NewNode(
t.t, "wumbo2", []string{"--protocol.wumbo-channels"},
wumboNode2 := ht.NewNode(
"wumbo2", []string{"--protocol.wumbo-channels"},
)
defer shutdownAndAssert(net, t, wumboNode2)
// Creating a wumbo channel between these two nodes should succeed.
net.EnsureConnected(t.t, wumboNode, wumboNode2)
chanPoint := openChannelAndAssert(
t, net, wumboNode, wumboNode2,
lntest.OpenChannelParams{
Amt: chanAmt,
},
ht.EnsureConnected(wumboNode, wumboNode2)
chanPoint := ht.OpenChannel(
wumboNode, wumboNode2, lntemp.OpenChannelParams{Amt: chanAmt},
)
closeChannelAndAssert(t, net, wumboNode, chanPoint, false)
ht.CloseChannel(wumboNode, chanPoint)
}

View file

@ -798,6 +798,9 @@ func (l *LightningWallet) handleFundingReserveRequest(req *InitFundingReserveMsg
return
}
walletLog.Debugf("Registered funding intent for "+
"PendingChanID: %x", req.PendingChanID)
localFundingAmt = fundingIntent.LocalFundingAmt()
remoteFundingAmt = fundingIntent.RemoteFundingAmt()
}
@ -891,6 +894,10 @@ func (l *LightningWallet) handleFundingReserveRequest(req *InitFundingReserveMsg
// completed, or canceled.
req.resp <- reservation
req.err <- nil
walletLog.Debugf("Successfully handled funding reservation with "+
"pendingChanID: %x, reservationID: %v",
reservation.pendingChanID, reservation.reservationID)
}
// enforceReservedValue enforces that the wallet, upon a new channel being

View file

@ -77,6 +77,7 @@ func (b *bandwidthManager) getBandwidth(cid lnwire.ShortChannelID,
if err != nil {
// If the link isn't online, then we'll report that it has
// zero bandwidth.
log.Warnf("ShortChannelID=%v: link not found: %v", cid, err)
return 0
}
@ -84,12 +85,15 @@ func (b *bandwidthManager) getBandwidth(cid lnwire.ShortChannelID,
// to forward any HTLCs, then we'll treat it as if it isn't online in
// the first place.
if !link.EligibleToForward() {
log.Warnf("ShortChannelID=%v: not eligible to forward", cid)
return 0
}
// If our link isn't currently in a state where it can add another
// outgoing htlc, treat the link as unusable.
if err := link.MayAddOutgoingHtlc(amount); err != nil {
log.Warnf("ShortChannelID=%v: cannot add outgoing htlc: %v",
cid, err)
return 0
}