lntest+lntemp: refactor testMultiHopHtlcLocalTimeout

This commit is contained in:
yyforyongyu 2022-07-29 01:24:54 +08:00
parent 4a7f45b5d4
commit 5a7c6cea02
No known key found for this signature in database
GPG key ID: 9BCD95C4FF296868
5 changed files with 206 additions and 121 deletions

View file

@ -434,7 +434,14 @@ func (h *HarnessTest) SuspendNode(node *node.HarnessNode) func() error {
err := node.Stop() err := node.Stop()
require.NoErrorf(h, err, "failed to stop %s", node.Name()) require.NoErrorf(h, err, "failed to stop %s", node.Name())
return func() error { return node.Start(h.runCtx) } // Remove the node from active nodes.
delete(h.manager.activeNodes, node.Cfg.NodeID)
return func() error {
h.manager.registerNode(node)
return node.Start(h.runCtx)
}
} }
// RestartNode restarts a given node and asserts. // RestartNode restarts a given node and asserts.
@ -1160,3 +1167,23 @@ func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) {
// continue with the tests. // continue with the tests.
h.WaitForBlockchainSync(hn) h.WaitForBlockchainSync(hn)
} }
// MineBlocksAssertNodesSync mines blocks and asserts all active nodes have
// synced to the chain. Use this method when more than 3 blocks are mined to
// make sure the nodes stay synced.
//
// TODO(yy): replace directly mining with this one.
func (h *HarnessTest) MineBlocksAssertNodesSync(num uint32) {
// If we are mining more than 3 blocks, use the slow mining.
if num > 3 {
h.Miner.MineBlocksSlow(num)
} else {
// Mine the blocks.
h.Miner.MineBlocks(num)
}
// Make sure all the active nodes are synced.
for _, node := range h.manager.activeNodes {
h.WaitForBlockchainSync(node)
}
}

View file

@ -3,6 +3,7 @@ package lntemp
import ( import (
"context" "context"
"crypto/rand" "crypto/rand"
"encoding/hex"
"fmt" "fmt"
"math" "math"
"strings" "strings"
@ -961,3 +962,80 @@ func (h *HarnessTest) AssertChannelNumUpdates(hn *node.HarnessNode,
}, DefaultTimeout) }, DefaultTimeout)
require.NoError(h, err, "timeout while checking for num of updates") require.NoError(h, err, "timeout while checking for num of updates")
} }
// AssertNumActiveHtlcs asserts that a given number of HTLCs are seen in the
// node's channels.
func (h *HarnessTest) AssertNumActiveHtlcs(hn *node.HarnessNode, num int) {
old := hn.State.HTLC
err := wait.NoError(func() error {
// We require the RPC call to be succeeded and won't wait for
// it as it's an unexpected behavior.
req := &lnrpc.ListChannelsRequest{}
nodeChans := hn.RPC.ListChannels(req)
total := 0
for _, channel := range nodeChans.Channels {
total += len(channel.PendingHtlcs)
}
if total-old != num {
return errNumNotMatched(hn.Name(), "active HTLCs",
num, total-old, total, old)
}
return nil
}, DefaultTimeout)
require.NoErrorf(h, err, "%s timeout checking num active htlcs",
hn.Name())
}
// AssertActiveHtlcs makes sure the node has the _exact_ HTLCs matching
// payHashes on _all_ their channels.
func (h *HarnessTest) AssertActiveHtlcs(hn *node.HarnessNode,
payHashes ...[]byte) {
err := wait.NoError(func() error {
// We require the RPC call to be succeeded and won't wait for
// it as it's an unexpected behavior.
req := &lnrpc.ListChannelsRequest{}
nodeChans := hn.RPC.ListChannels(req)
for _, ch := range nodeChans.Channels {
// Record all payment hashes active for this channel.
htlcHashes := make(map[string]struct{})
for _, htlc := range ch.PendingHtlcs {
h := hex.EncodeToString(htlc.HashLock)
_, ok := htlcHashes[h]
if ok {
return fmt.Errorf("duplicate HashLock")
}
htlcHashes[h] = struct{}{}
}
// Channel should have exactly the payHashes active.
if len(payHashes) != len(htlcHashes) {
return fmt.Errorf("node [%s:%x] had %v "+
"htlcs active, expected %v",
hn.Name(), hn.PubKey[:],
len(htlcHashes), len(payHashes))
}
// Make sure all the payHashes are active.
for _, payHash := range payHashes {
h := hex.EncodeToString(payHash)
if _, ok := htlcHashes[h]; ok {
continue
}
return fmt.Errorf("node [%s:%x] didn't have: "+
"the payHash %v active", hn.Name(),
hn.PubKey[:], h)
}
}
return nil
}, DefaultTimeout)
require.NoError(h, err, "timeout checking active HTLCs")
}

View file

@ -327,3 +327,37 @@ func (h *HarnessMiner) MineBlocksSlow(num uint32) []*wire.MsgBlock {
return blocks return blocks
} }
// AssertOutpointInMempool asserts a given outpoint can be found in the mempool.
func (h *HarnessMiner) AssertOutpointInMempool(op wire.OutPoint) *wire.MsgTx {
var msgTx *wire.MsgTx
err := wait.NoError(func() error {
// We require the RPC call to be succeeded and won't wait for
// it as it's an unexpected behavior.
mempool := h.GetRawMempool()
if len(mempool) == 0 {
return fmt.Errorf("empty mempool")
}
for _, txid := range mempool {
// We require the RPC call to be succeeded and won't
// wait for it as it's an unexpected behavior.
tx := h.GetRawTransaction(txid)
msgTx = tx.MsgTx()
for _, txIn := range msgTx.TxIn {
if txIn.PreviousOutPoint == op {
return nil
}
}
}
return fmt.Errorf("outpoint %v not found in mempool", op)
}, lntest.MinerMempoolTimeout)
require.NoError(h, err, "timeout checking mempool")
return msgTx
}

View file

@ -1,16 +1,13 @@
package itest package itest
import ( import (
"context"
"time"
"github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntemp/node"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -19,24 +16,16 @@ import (
// it using the HTLC timeout transaction. Any dust HTLC's should be immediately // it using the HTLC timeout transaction. Any dust HTLC's should be immediately
// canceled backwards. Once the timeout has been reached, then we should sweep // canceled backwards. Once the timeout has been reached, then we should sweep
// it on-chain, and cancel the HTLC backwards. // it on-chain, and cancel the HTLC backwards.
func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, func testMultiHopHtlcLocalTimeout(ht *lntemp.HarnessTest,
alice, bob *lntest.HarnessNode, c lnrpc.CommitmentType, alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
zeroConf bool) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with // First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's // Carol refusing to actually settle or directly cancel any HTLC's
// self. // self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetworkOld( aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
t, net, alice, bob, true, c, zeroConf, ht, alice, bob, true, c, zeroConf,
) )
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
time.Sleep(time.Second * 1)
// Now that our channels are set up, we'll send two HTLC's from Alice // Now that our channels are set up, we'll send two HTLC's from Alice
// to Carol. The first HTLC will be universally considered "dust", // to Carol. The first HTLC will be universally considered "dust",
// while the second will be a proper fully valued HTLC. // while the second will be a proper fully valued HTLC.
@ -46,50 +35,39 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
finalCltvDelta = 40 finalCltvDelta = 40
) )
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
// We'll create two random payment hashes unknown to carol, then send // We'll create two random payment hashes unknown to carol, then send
// each of them by manually specifying the HTLC details. // each of them by manually specifying the HTLC details.
carolPubKey := carol.PubKey[:] carolPubKey := carol.PubKey[:]
dustPayHash := makeFakePayHash(t) dustPayHash := ht.Random32Bytes()
payHash := makeFakePayHash(t) payHash := ht.Random32Bytes()
_, err := alice.RouterClient.SendPaymentV2( alice.RPC.SendPayment(&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{ Dest: carolPubKey,
Dest: carolPubKey, Amt: int64(dustHtlcAmt),
Amt: int64(dustHtlcAmt), PaymentHash: dustPayHash,
PaymentHash: dustPayHash, FinalCltvDelta: finalCltvDelta,
FinalCltvDelta: finalCltvDelta, TimeoutSeconds: 60,
TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat,
FeeLimitMsat: noFeeLimitMsat, })
},
)
require.NoError(t.t, err)
_, err = alice.RouterClient.SendPaymentV2( alice.RPC.SendPayment(&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{ Dest: carolPubKey,
Dest: carolPubKey, Amt: int64(htlcAmt),
Amt: int64(htlcAmt), PaymentHash: payHash,
PaymentHash: payHash, FinalCltvDelta: finalCltvDelta,
FinalCltvDelta: finalCltvDelta, TimeoutSeconds: 60,
TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat,
FeeLimitMsat: noFeeLimitMsat, })
},
)
require.NoError(t.t, err)
// Verify that all nodes in the path now have two HTLC's with the // Verify that all nodes in the path now have two HTLC's with the
// proper parameters. // proper parameters.
nodes := []*lntest.HarnessNode{alice, bob, carol} ht.AssertActiveHtlcs(alice, dustPayHash, payHash)
err = wait.NoError(func() error { ht.AssertActiveHtlcs(bob, dustPayHash, payHash)
return assertActiveHtlcs(nodes, dustPayHash, payHash) ht.AssertActiveHtlcs(carol, dustPayHash, payHash)
}, defaultTimeout)
require.NoError(t.t, err)
// Increase the fee estimate so that the following force close tx will // Increase the fee estimate so that the following force close tx will
// be cpfp'ed. // be cpfp'ed.
net.SetFeeEstimate(30000) ht.SetFeeEstimate(30000)
// We'll now mine enough blocks to trigger Bob's broadcast of his // We'll now mine enough blocks to trigger Bob's broadcast of his
// commitment transaction due to the fact that the HTLC is about to // commitment transaction due to the fact that the HTLC is about to
@ -98,8 +76,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
numBlocks := padCLTV( numBlocks := padCLTV(
uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
) )
_, err = net.Miner.Client.Generate(numBlocks) ht.MineBlocksAssertNodesSync(numBlocks)
require.NoError(t.t, err)
// Bob's force close transaction should now be found in the mempool. If // Bob's force close transaction should now be found in the mempool. If
// there are anchors, we also expect Bob's anchor sweep. // there are anchors, we also expect Bob's anchor sweep.
@ -108,74 +85,51 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
if hasAnchors { if hasAnchors {
expectedTxes = 2 expectedTxes = 2
} }
_, err = waitForNTxsInMempool( ht.Miner.AssertNumTxsInMempool(expectedTxes)
net.Miner.Client, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
bobFundingTxid, err := lnrpc.GetChanPointFundingTxid(bobChanPoint) op := ht.OutPointFromChannelPoint(bobChanPoint)
require.NoError(t.t, err) closeTx := ht.Miner.AssertOutpointInMempool(op)
bobChanOutpoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
closeTxid := assertSpendingTxInMempool(
t, net.Miner.Client, minerMempoolTimeout, bobChanOutpoint,
)
// Mine a block to confirm the closing transaction. // Mine a block to confirm the closing transaction.
mineBlocks(t, net, 1, expectedTxes) ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes)
// At this point, Bob should have canceled backwards the dust HTLC // At this point, Bob should have canceled backwards the dust HTLC
// that we sent earlier. This means Alice should now only have a single // that we sent earlier. This means Alice should now only have a single
// HTLC on her channel. // HTLC on her channel.
nodes = []*lntest.HarnessNode{alice} ht.AssertActiveHtlcs(alice, payHash)
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash)
}, defaultTimeout)
require.NoError(t.t, err)
// With the closing transaction confirmed, we should expect Bob's HTLC // With the closing transaction confirmed, we should expect Bob's HTLC
// timeout transaction to be broadcast due to the expiry being reached. // timeout transaction to be broadcast due to the expiry being reached.
// If there are anchors, we also expect Carol's anchor sweep now. // If there are anchors, we also expect Carol's anchor sweep now.
_, err = getNTxsFromMempool( ht.Miner.AssertNumTxsInMempool(expectedTxes)
net.Miner.Client, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
// We'll also obtain the expected HTLC timeout transaction hash. // We'll also obtain the expected HTLC timeout transaction hash.
htlcOutpoint := wire.OutPoint{Hash: closeTxid, Index: 0} htlcOutpoint := wire.OutPoint{Hash: closeTx.TxHash(), Index: 0}
commitOutpoint := wire.OutPoint{Hash: closeTxid, Index: 1} commitOutpoint := wire.OutPoint{Hash: closeTx.TxHash(), Index: 1}
if hasAnchors { if hasAnchors {
htlcOutpoint.Index = 2 htlcOutpoint.Index = 2
commitOutpoint.Index = 3 commitOutpoint.Index = 3
} }
htlcTimeoutTxid := assertSpendingTxInMempool( htlcTimeoutTxid := ht.Miner.AssertOutpointInMempool(
t, net.Miner.Client, minerMempoolTimeout, htlcOutpoint, htlcOutpoint,
) ).TxHash()
// Mine a block to confirm the expected transactions. // Mine a block to confirm the expected transactions.
_ = mineBlocks(t, net, 1, expectedTxes) ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes)
// With Bob's HTLC timeout transaction confirmed, there should be no // With Bob's HTLC timeout transaction confirmed, there should be no
// active HTLC's on the commitment transaction from Alice -> Bob. // active HTLC's on the commitment transaction from Alice -> Bob.
err = wait.NoError(func() error { ht.AssertNumActiveHtlcs(alice, 0)
return assertNumActiveHtlcs([]*lntest.HarnessNode{alice}, 0)
}, defaultTimeout)
require.NoError(t.t, err)
// At this point, Bob should show that the pending HTLC has advanced to // At this point, Bob should show that the pending HTLC has advanced to
// the second stage and is ready to be swept once the timelock is up. // the second stage and is ready to be swept once the timelock is up.
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) pendingChanResp := bob.RPC.PendingChannels()
pendingChansRequest := &lnrpc.PendingChannelsRequest{} require.Equal(ht, 1, len(pendingChanResp.PendingForceClosingChannels))
pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest)
require.NoError(t.t, err)
require.Equal(t.t, 1, len(pendingChanResp.PendingForceClosingChannels))
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
require.NotZero(t.t, forceCloseChan.LimboBalance) require.NotZero(ht, forceCloseChan.LimboBalance)
require.Positive(t.t, forceCloseChan.BlocksTilMaturity) require.Positive(ht, forceCloseChan.BlocksTilMaturity)
require.Equal(t.t, 1, len(forceCloseChan.PendingHtlcs)) require.Equal(ht, 1, len(forceCloseChan.PendingHtlcs))
require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage) require.Equal(ht, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
htlcTimeoutOutpoint := wire.OutPoint{Hash: htlcTimeoutTxid, Index: 0} htlcTimeoutOutpoint := wire.OutPoint{Hash: htlcTimeoutTxid, Index: 0}
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
@ -184,48 +138,40 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
// CLTV on top of the usual CSV delay on any outputs that he can // CLTV on top of the usual CSV delay on any outputs that he can
// sweep back to his wallet. // sweep back to his wallet.
blocksTilMaturity := uint32(forceCloseChan.BlocksTilMaturity) blocksTilMaturity := uint32(forceCloseChan.BlocksTilMaturity)
mineBlocks(t, net, blocksTilMaturity, 0) ht.MineBlocksAssertNodesSync(blocksTilMaturity)
// Check that the sweep spends the expected inputs. // Check that the sweep spends the expected inputs.
_ = assertSpendingTxInMempool( ht.Miner.AssertOutpointInMempool(commitOutpoint)
t, net.Miner.Client, minerMempoolTimeout, ht.Miner.AssertOutpointInMempool(htlcTimeoutOutpoint)
commitOutpoint, htlcTimeoutOutpoint,
)
} else { } else {
// Since Bob force closed the channel between him and Carol, he // Since Bob force closed the channel between him and Carol, he
// will incur the usual CSV delay on any outputs that he can // will incur the usual CSV delay on any outputs that he can
// sweep back to his wallet. We'll subtract one block from our // sweep back to his wallet. We'll subtract one block from our
// current maturity period to assert on the mempool. // current maturity period to assert on the mempool.
mineBlocks(t, net, uint32(forceCloseChan.BlocksTilMaturity-1), 0) numBlocks := uint32(forceCloseChan.BlocksTilMaturity - 1)
ht.MineBlocksAssertNodesSync(numBlocks)
// Check that the sweep spends from the mined commitment. // Check that the sweep spends from the mined commitment.
_ = assertSpendingTxInMempool( ht.Miner.AssertOutpointInMempool(commitOutpoint)
t, net.Miner.Client, minerMempoolTimeout, commitOutpoint,
)
// Mine a block to confirm Bob's commit sweep tx and assert it // Mine a block to confirm Bob's commit sweep tx and assert it
// was in fact mined. // was in fact mined.
_ = mineBlocks(t, net, 1, 1)[0] ht.Miner.MineBlocksAndAssertNumTxes(1, 1)
// Mine an additional block to prompt Bob to broadcast their // Mine an additional block to prompt Bob to broadcast their
// second layer sweep due to the CSV on the HTLC timeout output. // second layer sweep due to the CSV on the HTLC timeout output.
mineBlocks(t, net, 1, 0) ht.Miner.MineBlocksAndAssertNumTxes(1, 0)
_ = assertSpendingTxInMempool( ht.Miner.AssertOutpointInMempool(htlcTimeoutOutpoint)
t, net.Miner.Client, minerMempoolTimeout,
htlcTimeoutOutpoint,
)
} }
// Next, we'll mine a final block that should confirm the sweeping // Next, we'll mine a final block that should confirm the sweeping
// transactions left. // transactions left.
_, err = net.Miner.Client.Generate(1) ht.MineBlocksAssertNodesSync(1)
require.NoError(t.t, err)
// Once this transaction has been confirmed, Bob should detect that he // Once this transaction has been confirmed, Bob should detect that he
// no longer has any pending channels. // no longer has any pending channels.
err = waitForNumChannelPendingForceClose(bob, 0, nil) ht.AssertNumPendingForceClose(bob, 0)
require.NoError(t.t, err)
// Coop close channel, expect no anchors. // Coop close channel, expect no anchors.
closeChannelAndAssertType(t, net, alice, aliceChanPoint, false, false) ht.CloseChannel(alice, aliceChanPoint)
} }

View file

@ -27,12 +27,12 @@ func testMultiHopHtlcClaims(ht *lntemp.HarnessTest) {
} }
subTests := []testCase{ subTests := []testCase{
// { {
// // bob: outgoing our commit timeout // bob: outgoing our commit timeout
// // carol: incoming their commit watch and see timeout // carol: incoming their commit watch and see timeout
// name: "local force close immediate expiry", name: "local force close immediate expiry",
// test: testMultiHopHtlcLocalTimeout, test: testMultiHopHtlcLocalTimeout,
// }, },
// { // {
// // bob: outgoing watch and see, they sweep on chain // // bob: outgoing watch and see, they sweep on chain
// // carol: incoming our commit, know preimage // // carol: incoming our commit, know preimage