itest: fix test flake in testZeroConfReorg

This commit is contained in:
yyforyongyu 2023-01-31 21:22:54 +08:00
parent eb57de2f0e
commit 0c50d4379f
No known key found for this signature in database
GPG key ID: 9BCD95C4FF296868
2 changed files with 31 additions and 28 deletions

View file

@ -1,7 +1,6 @@
package itest package itest
import ( import (
"context"
"testing" "testing"
"time" "time"
@ -897,10 +896,7 @@ func testZeroConfReorg(ht *lntest.HarnessTest) {
ht.Skipf("skipping zero-conf reorg test for neutrino backend") ht.Skipf("skipping zero-conf reorg test for neutrino backend")
} }
var ( var temp = "temp"
ctxb = context.Background()
temp = "temp"
)
// Since zero-conf is opt in, the harness nodes provided won't be able // Since zero-conf is opt in, the harness nodes provided won't be able
// to open zero-conf channels. In that case, we just spin up new nodes. // to open zero-conf channels. In that case, we just spin up new nodes.
@ -949,10 +945,9 @@ func testZeroConfReorg(ht *lntest.HarnessTest) {
// We will now attempt to query for the alias SCID in Carol's graph. // We will now attempt to query for the alias SCID in Carol's graph.
// We will query for the starting alias, which is exported by the // We will query for the starting alias, which is exported by the
// aliasmgr package. // aliasmgr package.
_, err := carol.RPC.LN.GetChanInfo(ctxb, &lnrpc.ChanInfoRequest{ carol.RPC.GetChanInfo(&lnrpc.ChanInfoRequest{
ChanId: aliasmgr.StartingAlias.ToUint64(), ChanId: aliasmgr.StartingAlias.ToUint64(),
}) })
require.NoError(ht.T, err)
// Now we will trigger a reorg and we'll assert that the edge still // Now we will trigger a reorg and we'll assert that the edge still
// exists in the graph. // exists in the graph.
@ -971,7 +966,7 @@ func testZeroConfReorg(ht *lntest.HarnessTest) {
// We start by connecting the new miner to our original miner, such // We start by connecting the new miner to our original miner, such
// that it will sync to our original chain. // that it will sync to our original chain.
err = ht.Miner.Client.Node( err := ht.Miner.Client.Node(
btcjson.NConnect, tempMiner.P2PAddress(), &temp, btcjson.NConnect, tempMiner.P2PAddress(), &temp,
) )
require.NoError(ht.T, err, "unable to connect node") require.NoError(ht.T, err, "unable to connect node")
@ -991,18 +986,16 @@ func testZeroConfReorg(ht *lntest.HarnessTest) {
require.NoError(ht.T, err, "unable to remove node") require.NoError(ht.T, err, "unable to remove node")
// We now cause a fork, by letting our original miner mine 1 block and // We now cause a fork, by letting our original miner mine 1 block and
// our new miner will mine 2. // our new miner will mine 2. We also expect the funding transition to
ht.MineBlocks(1) // be mined.
_, err = tempMiner.Client.Generate(2) ht.MineBlocksAndAssertNumTxes(1, 1)
require.NoError(ht.T, err, "unable to generate blocks") tempMiner.MineEmptyBlocks(2)
// Ensure the temp miner is one block ahead. // Ensure the temp miner is one block ahead.
assertMinerBlockHeightDelta(ht, ht.Miner, tempMiner, 1) assertMinerBlockHeightDelta(ht, ht.Miner, tempMiner, 1)
// Wait for Carol to sync to the original miner's chain. // Wait for Carol to sync to the original miner's chain.
_, minerHeight, err := ht.Miner.Client.GetBestBlock() _, minerHeight := ht.Miner.GetBestBlock()
require.NoError(ht.T, err, "unable to get current blockheight")
ht.WaitForNodeBlockHeight(carol, minerHeight) ht.WaitForNodeBlockHeight(carol, minerHeight)
// Now we'll disconnect Carol's chain backend from the original miner // Now we'll disconnect Carol's chain backend from the original miner
@ -1033,23 +1026,19 @@ func testZeroConfReorg(ht *lntest.HarnessTest) {
ht.ConnectMiner() ht.ConnectMiner()
// This should have caused a reorg and Alice should sync to the new // This should have caused a reorg and Carol should sync to the new
// chain. // chain.
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock() _, tempMinerHeight := tempMiner.GetBestBlock()
require.NoError(ht.T, err, "unable to get current blockheight")
ht.WaitForNodeBlockHeight(carol, tempMinerHeight) ht.WaitForNodeBlockHeight(carol, tempMinerHeight)
err = wait.Predicate(func() bool { // Make sure all active nodes are synced.
_, err = carol.RPC.LN.GetChanInfo( ht.AssertActiveNodesSynced()
ht.Context(), &lnrpc.ChanInfoRequest{
ChanId: aliasmgr.StartingAlias.ToUint64(),
})
return err == nil // Carol should have the channel once synced.
}, defaultTimeout) carol.RPC.GetChanInfo(&lnrpc.ChanInfoRequest{
require.NoError(ht.T, err, "carol doesn't have zero-conf edge") ChanId: aliasmgr.StartingAlias.ToUint64(),
})
// Mine the zero-conf funding transaction so the test doesn't fail. // Mine the zero-conf funding transaction so the test doesn't fail.
ht.MineBlocks(1) ht.MineBlocksAndAssertNumTxes(1, 1)
} }

View file

@ -682,3 +682,17 @@ func (h *HarnessRPC) SendCustomMessage(
return resp return resp
} }
// GetChanInfo makes a RPC call to the node's GetChanInfo and returns the
// response.
func (h *HarnessRPC) GetChanInfo(
req *lnrpc.ChanInfoRequest) *lnrpc.ChannelEdge {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.LN.GetChanInfo(ctxt, req)
h.NoError(err, "GetChanInfo")
return resp
}