mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-02-22 14:22:37 +01:00
lntest+itest: add new method CurrentHeight
This commit is contained in:
parent
14e7b134d9
commit
f1f341095e
14 changed files with 66 additions and 41 deletions
|
@ -624,8 +624,8 @@ func runChanRestoreScenarioCommitTypes(ht *lntest.HarnessTest,
|
|||
|
||||
var fundingShim *lnrpc.FundingShim
|
||||
if ct == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
||||
_, minerHeight := ht.GetBestBlock()
|
||||
thawHeight := uint32(minerHeight + thawHeightDelta)
|
||||
minerHeight := ht.CurrentHeight()
|
||||
thawHeight := minerHeight + thawHeightDelta
|
||||
|
||||
fundingShim, _ = deriveFundingShim(
|
||||
ht, dave, carol, crs.params.Amt, thawHeight, true, ct,
|
||||
|
|
|
@ -160,7 +160,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
|
|||
|
||||
// Fetch starting height of this test so we can compute the block
|
||||
// heights we expect certain events to take place.
|
||||
_, curHeight := ht.GetBestBlock()
|
||||
curHeight := int32(ht.CurrentHeight())
|
||||
|
||||
// Using the current height of the chain, derive the relevant heights
|
||||
// for incubating two-stage htlcs.
|
||||
|
@ -431,7 +431,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
|
|||
ht.MineBlocksAndAssertNumTxes(1, 1)
|
||||
|
||||
// Update current height
|
||||
_, curHeight = ht.GetBestBlock()
|
||||
curHeight = int32(ht.CurrentHeight())
|
||||
|
||||
// checkForceClosedChannelNumHtlcs verifies that a force closed channel
|
||||
// has the proper number of htlcs.
|
||||
|
@ -485,7 +485,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
|
|||
// number of blocks we have generated since adding it to the nursery,
|
||||
// and take an additional block off so that we end up one block shy of
|
||||
// the expiry height, and add the block padding.
|
||||
_, currentHeight := ht.GetBestBlock()
|
||||
currentHeight := int32(ht.CurrentHeight())
|
||||
cltvHeightDelta := int(htlcExpiryHeight - uint32(currentHeight) - 1)
|
||||
|
||||
// Advance the blockchain until just before the CLTV expires, nothing
|
||||
|
@ -662,7 +662,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
|
|||
|
||||
// Advance the chain until just before the 2nd-layer CSV delays expire.
|
||||
// For anchor channels this is one block earlier.
|
||||
_, currentHeight = ht.GetBestBlock()
|
||||
currentHeight = int32(ht.CurrentHeight())
|
||||
ht.Logf("current height: %v, htlcCsvMaturityHeight=%v", currentHeight,
|
||||
htlcCsvMaturityHeight)
|
||||
numBlocks := int(htlcCsvMaturityHeight - uint32(currentHeight) - 2)
|
||||
|
|
|
@ -316,7 +316,7 @@ func testGraphTopologyNtfns(ht *lntest.HarnessTest, pinned bool) {
|
|||
ht.AssertNumNodeAnns(alice, alice.PubKeyStr, 1)
|
||||
ht.AssertNumNodeAnns(alice, bob.PubKeyStr, 1)
|
||||
|
||||
_, blockHeight := ht.GetBestBlock()
|
||||
blockHeight := ht.CurrentHeight()
|
||||
|
||||
// Now we'll test that updates are properly sent after channels are
|
||||
// closed within the network.
|
||||
|
@ -326,7 +326,7 @@ func testGraphTopologyNtfns(ht *lntest.HarnessTest, pinned bool) {
|
|||
// notification indicating so.
|
||||
closedChan := ht.AssertTopologyChannelClosed(alice, chanPoint)
|
||||
|
||||
require.Equal(ht, uint32(blockHeight+1), closedChan.ClosedHeight,
|
||||
require.Equal(ht, blockHeight+1, closedChan.ClosedHeight,
|
||||
"close heights of channel mismatch")
|
||||
|
||||
fundingTxid := ht.OutPointFromChannelPoint(chanPoint)
|
||||
|
|
|
@ -862,7 +862,7 @@ func testChannelFundingPersistence(ht *lntest.HarnessTest) {
|
|||
ht.AssertTxInBlock(block, fundingTxID)
|
||||
|
||||
// Get the height that our transaction confirmed at.
|
||||
_, height := ht.GetBestBlock()
|
||||
height := int32(ht.CurrentHeight())
|
||||
|
||||
// Restart both nodes to test that the appropriate state has been
|
||||
// persisted and that both nodes recover gracefully.
|
||||
|
|
|
@ -59,14 +59,14 @@ func testHoldInvoiceForceClose(ht *lntest.HarnessTest) {
|
|||
require.Len(ht, channel.PendingHtlcs, 1)
|
||||
activeHtlc := channel.PendingHtlcs[0]
|
||||
|
||||
_, currentHeight := ht.GetBestBlock()
|
||||
currentHeight := ht.CurrentHeight()
|
||||
|
||||
// Now we will mine blocks until the htlc expires, and wait for each
|
||||
// node to sync to our latest height. Sanity check that we won't
|
||||
// underflow.
|
||||
require.Greater(ht, activeHtlc.ExpirationHeight, uint32(currentHeight),
|
||||
require.Greater(ht, activeHtlc.ExpirationHeight, currentHeight,
|
||||
"expected expiry after current height")
|
||||
blocksTillExpiry := activeHtlc.ExpirationHeight - uint32(currentHeight)
|
||||
blocksTillExpiry := activeHtlc.ExpirationHeight - currentHeight
|
||||
|
||||
// Alice will go to chain with some delta, sanity check that we won't
|
||||
// underflow and subtract this from our mined blocks.
|
||||
|
|
|
@ -2092,7 +2092,7 @@ func runMultiHopHtlcAggregation(ht *lntest.HarnessTest,
|
|||
numBlocks := uint32(forceCloseChan.BlocksTilMaturity)
|
||||
|
||||
// Add debug log.
|
||||
_, height := ht.GetBestBlock()
|
||||
height := ht.CurrentHeight()
|
||||
bob.AddToLogf("itest: now mine %d blocks at height %d",
|
||||
numBlocks, height)
|
||||
ht.MineEmptyBlocks(int(numBlocks) - 1)
|
||||
|
@ -2232,8 +2232,8 @@ func createThreeHopNetwork(ht *lntest.HarnessTest,
|
|||
var aliceFundingShim *lnrpc.FundingShim
|
||||
var thawHeight uint32
|
||||
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
||||
_, minerHeight := ht.GetBestBlock()
|
||||
thawHeight = uint32(minerHeight + thawHeightDelta)
|
||||
minerHeight := ht.CurrentHeight()
|
||||
thawHeight = minerHeight + thawHeightDelta
|
||||
aliceFundingShim, _ = deriveFundingShim(
|
||||
ht, alice, bob, chanAmt, thawHeight, true, c,
|
||||
)
|
||||
|
@ -2449,10 +2449,10 @@ func runExtraPreimageFromRemoteCommit(ht *lntest.HarnessTest,
|
|||
|
||||
// Get the current height to compute number of blocks to mine to
|
||||
// trigger the htlc timeout resolver from Bob.
|
||||
_, height := ht.GetBestBlock()
|
||||
height := ht.CurrentHeight()
|
||||
|
||||
// We'll now mine enough blocks to trigger Bob's timeout resolver.
|
||||
numBlocks = htlc.ExpirationHeight - uint32(height) -
|
||||
numBlocks = htlc.ExpirationHeight - height -
|
||||
lncfg.DefaultOutgoingBroadcastDelta
|
||||
|
||||
// We should now have Carol's htlc success tx in the mempool.
|
||||
|
@ -2680,12 +2680,12 @@ func runExtraPreimageFromLocalCommit(ht *lntest.HarnessTest,
|
|||
|
||||
// Get the current height to compute number of blocks to mine to
|
||||
// trigger the timeout resolver from Bob.
|
||||
_, height := ht.GetBestBlock()
|
||||
height := ht.CurrentHeight()
|
||||
|
||||
// We'll now mine enough blocks to trigger Bob's htlc timeout resolver
|
||||
// to act. Once his timeout resolver starts, it will extract the
|
||||
// preimage from Carol's direct spend tx found in the mempool.
|
||||
numBlocks = htlc.ExpirationHeight - uint32(height) -
|
||||
numBlocks = htlc.ExpirationHeight - height -
|
||||
lncfg.DefaultOutgoingBroadcastDelta
|
||||
|
||||
// Decrease the fee rate used by the sweeper so Bob's timeout tx will
|
||||
|
|
|
@ -486,7 +486,7 @@ func testAnchorThirdPartySpend(ht *lntest.HarnessTest) {
|
|||
|
||||
// We now update the anchor sweep's deadline to be different than the
|
||||
// commit sweep so they can won't grouped together.
|
||||
_, currentHeight := ht.GetBestBlock()
|
||||
currentHeight := int32(ht.CurrentHeight())
|
||||
deadline := int32(commit.DeadlineHeight) - currentHeight
|
||||
require.Positive(ht, deadline)
|
||||
ht.Logf("Found commit deadline %d, anchor deadline %d",
|
||||
|
@ -836,7 +836,7 @@ func testListSweeps(ht *lntest.HarnessTest) {
|
|||
ht.MineEmptyBlocks(1)
|
||||
|
||||
// Get the current block height.
|
||||
_, blockHeight := ht.GetBestBlock()
|
||||
blockHeight := int32(ht.CurrentHeight())
|
||||
|
||||
// Close the second channel and also sweep the funds.
|
||||
ht.ForceCloseChannel(alice, chanPoints[1])
|
||||
|
|
|
@ -117,7 +117,7 @@ func testSingleHopSendToRouteCase(ht *lntest.HarnessTest,
|
|||
// Assert Carol and Dave are synced to the chain before proceeding, to
|
||||
// ensure the queried route will have a valid final CLTV once the HTLC
|
||||
// reaches Dave.
|
||||
_, minerHeight := ht.GetBestBlock()
|
||||
minerHeight := int32(ht.CurrentHeight())
|
||||
ht.WaitForNodeBlockHeight(carol, minerHeight)
|
||||
ht.WaitForNodeBlockHeight(dave, minerHeight)
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
|
|||
|
||||
// Remember the force close height so we can calculate the deadline
|
||||
// height.
|
||||
_, forceCloseHeight := ht.GetBestBlock()
|
||||
forceCloseHeight := ht.CurrentHeight()
|
||||
|
||||
// Bob should have two pending sweeps,
|
||||
// - anchor sweeping from his local commitment.
|
||||
|
@ -188,7 +188,7 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
|
|||
sweeps := ht.AssertNumPendingSweeps(bob, 2)
|
||||
|
||||
// The two anchor sweeping should have the same deadline height.
|
||||
deadlineHeight := uint32(forceCloseHeight) + deadlineDeltaAnchor
|
||||
deadlineHeight := forceCloseHeight + deadlineDeltaAnchor
|
||||
require.Equal(ht, deadlineHeight, sweeps[0].DeadlineHeight)
|
||||
require.Equal(ht, deadlineHeight, sweeps[1].DeadlineHeight)
|
||||
|
||||
|
@ -304,7 +304,7 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
|
|||
//
|
||||
// Once out of the above loop, we expect to be 2 blocks before the CPFP
|
||||
// deadline.
|
||||
_, currentHeight := ht.GetBestBlock()
|
||||
currentHeight := ht.CurrentHeight()
|
||||
require.Equal(ht, int(anchorDeadline-2), int(currentHeight))
|
||||
|
||||
// Mine one more block, we'd use up all the CPFP budget.
|
||||
|
@ -512,8 +512,8 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
|
|||
forceCloseHeight := htlc.ExpirationHeight - goToChainDelta
|
||||
|
||||
// Mine till the goToChainHeight is reached.
|
||||
_, currentHeight := ht.GetBestBlock()
|
||||
numBlocks := forceCloseHeight - uint32(currentHeight)
|
||||
currentHeight := ht.CurrentHeight()
|
||||
numBlocks := forceCloseHeight - currentHeight
|
||||
ht.MineEmptyBlocks(int(numBlocks))
|
||||
|
||||
// Assert Bob's force closing tx has been broadcast.
|
||||
|
@ -641,7 +641,7 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
|
|||
//
|
||||
// Once out of the above loop, we expect to be 2 blocks before the CPFP
|
||||
// deadline.
|
||||
_, currentHeight = ht.GetBestBlock()
|
||||
currentHeight = ht.CurrentHeight()
|
||||
require.Equal(ht, int(anchorDeadline-2), int(currentHeight))
|
||||
|
||||
// Mine one more block, we'd use up all the CPFP budget.
|
||||
|
@ -1380,7 +1380,7 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
|
|||
//
|
||||
// TODO(yy): assert they are equal once blocks are synced via
|
||||
// `blockbeat`.
|
||||
_, currentHeight := ht.GetBestBlock()
|
||||
currentHeight := int32(ht.CurrentHeight())
|
||||
actualDeadline := int32(pendingSweepBob.DeadlineHeight) - currentHeight
|
||||
if actualDeadline != int32(deadlineB) {
|
||||
ht.Logf("!!! Found unsynced block between sweeper and "+
|
||||
|
@ -1438,7 +1438,7 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
|
|||
//
|
||||
// TODO(yy): assert they are equal once blocks are synced via
|
||||
// `blockbeat`.
|
||||
_, currentHeight = ht.GetBestBlock()
|
||||
currentHeight = int32(ht.CurrentHeight())
|
||||
actualDeadline = int32(aliceCommit.DeadlineHeight) - currentHeight
|
||||
if actualDeadline != int32(deadlineA) {
|
||||
ht.Logf("!!! Found unsynced block between Alice's sweeper and "+
|
||||
|
@ -1972,7 +1972,7 @@ func runBumpFee(ht *lntest.HarnessTest, alice *node.HarnessNode) {
|
|||
|
||||
// Since the request doesn't specify a deadline, we expect the default
|
||||
// deadline to be used.
|
||||
_, currentHeight := ht.GetBestBlock()
|
||||
currentHeight := int32(ht.CurrentHeight())
|
||||
deadline := uint32(currentHeight + sweep.DefaultDeadlineDelta)
|
||||
|
||||
// Assert the pending sweep is created with the expected values:
|
||||
|
|
|
@ -1535,13 +1535,13 @@ func publishTxAndConfirmSweep(ht *lntest.HarnessTest, node *node.HarnessNode,
|
|||
// Before we publish the tx that spends the p2tr transaction, we want to
|
||||
// register a spend listener that we expect to fire after mining the
|
||||
// block.
|
||||
_, currentHeight := ht.GetBestBlock()
|
||||
currentHeight := ht.CurrentHeight()
|
||||
|
||||
// For a Taproot output we cannot leave the outpoint empty. Let's make
|
||||
// sure the API returns the correct error here.
|
||||
req := &chainrpc.SpendRequest{
|
||||
Script: spendRequest.Script,
|
||||
HeightHint: uint32(currentHeight),
|
||||
HeightHint: currentHeight,
|
||||
}
|
||||
spendClient := node.RPC.RegisterSpendNtfn(req)
|
||||
|
||||
|
@ -1556,7 +1556,7 @@ func publishTxAndConfirmSweep(ht *lntest.HarnessTest, node *node.HarnessNode,
|
|||
req = &chainrpc.SpendRequest{
|
||||
Outpoint: spendRequest.Outpoint,
|
||||
Script: spendRequest.Script,
|
||||
HeightHint: uint32(currentHeight),
|
||||
HeightHint: currentHeight,
|
||||
}
|
||||
spendClient = node.RPC.RegisterSpendNtfn(req)
|
||||
|
||||
|
@ -1582,7 +1582,7 @@ func publishTxAndConfirmSweep(ht *lntest.HarnessTest, node *node.HarnessNode,
|
|||
require.NoError(ht, err)
|
||||
spend := spendMsg.GetSpend()
|
||||
require.NotNil(ht, spend)
|
||||
require.Equal(ht, spend.SpendingHeight, uint32(currentHeight+1))
|
||||
require.Equal(ht, spend.SpendingHeight, currentHeight+1)
|
||||
}
|
||||
|
||||
// confirmAddress makes sure that a transaction in the mempool spends funds to
|
||||
|
@ -1609,11 +1609,11 @@ func confirmAddress(ht *lntest.HarnessTest, hn *node.HarnessNode,
|
|||
addrPkScript, err := txscript.PayToAddrScript(parsedAddr)
|
||||
require.NoError(ht, err)
|
||||
|
||||
_, currentHeight := ht.GetBestBlock()
|
||||
currentHeight := ht.CurrentHeight()
|
||||
req := &chainrpc.ConfRequest{
|
||||
Script: addrPkScript,
|
||||
Txid: txid[:],
|
||||
HeightHint: uint32(currentHeight),
|
||||
HeightHint: currentHeight,
|
||||
NumConfs: 1,
|
||||
IncludeBlock: true,
|
||||
}
|
||||
|
@ -1628,7 +1628,7 @@ func confirmAddress(ht *lntest.HarnessTest, hn *node.HarnessNode,
|
|||
require.NoError(ht, err)
|
||||
conf := confMsg.GetConf()
|
||||
require.NotNil(ht, conf)
|
||||
require.Equal(ht, conf.BlockHeight, uint32(currentHeight+1))
|
||||
require.Equal(ht, conf.BlockHeight, currentHeight+1)
|
||||
require.NotNil(ht, conf.RawBlock)
|
||||
|
||||
// We should also be able to decode the raw block.
|
||||
|
|
|
@ -151,7 +151,7 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
_, height := harnessTest.GetBestBlock()
|
||||
height := harnessTest.CurrentHeight()
|
||||
t.Logf("=========> tests finished for tranche: %v, tested %d "+
|
||||
"cases, end height: %d\n", trancheIndex, len(testCases), height)
|
||||
}
|
||||
|
|
|
@ -962,7 +962,7 @@ func testZeroConfReorg(ht *lntest.HarnessTest) {
|
|||
ht.AssertMinerBlockHeightDelta(tempMiner, 1)
|
||||
|
||||
// Wait for Carol to sync to the original miner's chain.
|
||||
_, minerHeight := ht.GetBestBlock()
|
||||
minerHeight := int32(ht.CurrentHeight())
|
||||
ht.WaitForNodeBlockHeight(carol, minerHeight)
|
||||
|
||||
// Now we'll disconnect Carol's chain backend from the original miner
|
||||
|
|
|
@ -105,6 +105,9 @@ type HarnessTest struct {
|
|||
// cleaned specifies whether the cleanup has been applied for the
|
||||
// current HarnessTest.
|
||||
cleaned bool
|
||||
|
||||
// currentHeight is the current height of the chain backend.
|
||||
currentHeight uint32
|
||||
}
|
||||
|
||||
// harnessOpts contains functional option to modify the behavior of the various
|
||||
|
@ -433,7 +436,8 @@ func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
|
|||
st.feeService.Reset()
|
||||
|
||||
// Record block height.
|
||||
_, startHeight := h.GetBestBlock()
|
||||
h.updateCurrentHeight()
|
||||
startHeight := int32(h.CurrentHeight())
|
||||
|
||||
st.Cleanup(func() {
|
||||
_, endHeight := h.GetBestBlock()
|
||||
|
|
|
@ -31,6 +31,9 @@ func (h *HarnessTest) Miner() *miner.HarnessMiner {
|
|||
func (h *HarnessTest) MineBlocks(num int) {
|
||||
require.Less(h, num, maxBlocksAllowed, "too many blocks to mine")
|
||||
|
||||
// Update the harness's current height.
|
||||
defer h.updateCurrentHeight()
|
||||
|
||||
// Mine num of blocks.
|
||||
for i := 0; i < num; i++ {
|
||||
block := h.miner.MineBlocks(1)[0]
|
||||
|
@ -73,6 +76,9 @@ func (h *HarnessTest) MineBlocks(num int) {
|
|||
func (h *HarnessTest) MineEmptyBlocks(num int) []*wire.MsgBlock {
|
||||
require.Less(h, num, maxBlocksAllowed, "too many blocks to mine")
|
||||
|
||||
// Update the harness's current height.
|
||||
defer h.updateCurrentHeight()
|
||||
|
||||
blocks := h.miner.MineEmptyBlocks(num)
|
||||
|
||||
// Finally, make sure all the active nodes are synced.
|
||||
|
@ -90,6 +96,9 @@ func (h *HarnessTest) MineEmptyBlocks(num int) []*wire.MsgBlock {
|
|||
func (h *HarnessTest) MineBlocksAndAssertNumTxes(num uint32,
|
||||
numTxs int) []*wire.MsgBlock {
|
||||
|
||||
// Update the harness's current height.
|
||||
defer h.updateCurrentHeight()
|
||||
|
||||
// If we expect transactions to be included in the blocks we'll mine,
|
||||
// we wait here until they are seen in the miner's mempool.
|
||||
txids := h.AssertNumTxsInMempool(numTxs)
|
||||
|
@ -309,3 +318,15 @@ func (h *HarnessTest) SendRawTransaction(tx *wire.MsgTx,
|
|||
|
||||
return *txid, nil
|
||||
}
|
||||
|
||||
// CurrentHeight returns the current block height.
|
||||
func (h *HarnessTest) CurrentHeight() uint32 {
|
||||
return h.currentHeight
|
||||
}
|
||||
|
||||
// updateCurrentHeight set the harness's current height to the best known
|
||||
// height.
|
||||
func (h *HarnessTest) updateCurrentHeight() {
|
||||
_, height := h.GetBestBlock()
|
||||
h.currentHeight = uint32(height)
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue