mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-01-19 05:45:21 +01:00
lntemp+itest: refactor testOpenChannelAfterReorg
This commit is contained in:
parent
9cb8b120ba
commit
68016c0b51
@ -1709,3 +1709,16 @@ func findSweepInDetails(ht *HarnessTest, sweepTxid string,
|
|||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConnectMiner connects the miner with the chain backend in the network.
|
||||||
|
func (h *HarnessTest) ConnectMiner() {
|
||||||
|
err := h.manager.chainBackend.ConnectMiner()
|
||||||
|
require.NoError(h, err, "failed to connect miner")
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisconnectMiner removes the connection between the miner and the chain
|
||||||
|
// backend in the network.
|
||||||
|
func (h *HarnessTest) DisconnectMiner() {
|
||||||
|
err := h.manager.chainBackend.DisconnectMiner()
|
||||||
|
require.NoError(h, err, "failed to disconnect miner")
|
||||||
|
}
|
||||||
|
@ -565,8 +565,10 @@ func (h *HarnessTest) AssertStreamChannelCoopClosed(hn *node.HarnessNode,
|
|||||||
h.AssertNumWaitingClose(hn, 0)
|
h.AssertNumWaitingClose(hn, 0)
|
||||||
|
|
||||||
// Finally, check that the node's topology graph has seen this channel
|
// Finally, check that the node's topology graph has seen this channel
|
||||||
// closed.
|
// closed if it's a public channel.
|
||||||
h.AssertTopologyChannelClosed(hn, cp)
|
if !resp.Channel.Private {
|
||||||
|
h.AssertTopologyChannelClosed(hn, cp)
|
||||||
|
}
|
||||||
|
|
||||||
return closingTxid
|
return closingTxid
|
||||||
}
|
}
|
||||||
@ -611,8 +613,10 @@ func (h *HarnessTest) AssertStreamChannelForceClosed(hn *node.HarnessNode,
|
|||||||
h.AssertNumPendingForceClose(hn, 1)
|
h.AssertNumPendingForceClose(hn, 1)
|
||||||
|
|
||||||
// Finally, check that the node's topology graph has seen this channel
|
// Finally, check that the node's topology graph has seen this channel
|
||||||
// closed.
|
// closed if it's a public channel.
|
||||||
h.AssertTopologyChannelClosed(hn, cp)
|
if !resp.Channel.Private {
|
||||||
|
h.AssertTopologyChannelClosed(hn, cp)
|
||||||
|
}
|
||||||
|
|
||||||
return closingTxid
|
return closingTxid
|
||||||
}
|
}
|
||||||
@ -1969,3 +1973,22 @@ func (h *HarnessTest) AssertTransactionNotInWallet(hn *node.HarnessNode,
|
|||||||
|
|
||||||
require.NoErrorf(h, err, "%s: failed to assert tx not found", hn.Name())
|
require.NoErrorf(h, err, "%s: failed to assert tx not found", hn.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WaitForNodeBlockHeight queries the node for its current block height until
|
||||||
|
// it reaches the passed height.
|
||||||
|
func (h *HarnessTest) WaitForNodeBlockHeight(hn *node.HarnessNode,
|
||||||
|
height int32) {
|
||||||
|
|
||||||
|
err := wait.NoError(func() error {
|
||||||
|
info := hn.RPC.GetInfo()
|
||||||
|
if int32(info.BlockHeight) != height {
|
||||||
|
return fmt.Errorf("expected block height to "+
|
||||||
|
"be %v, was %v", height, info.BlockHeight)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, DefaultTimeout)
|
||||||
|
|
||||||
|
require.NoErrorf(h, err, "%s: timeout while waiting for height",
|
||||||
|
hn.Name())
|
||||||
|
}
|
||||||
|
@ -57,6 +57,16 @@ func NewMiner(ctxt context.Context, t *testing.T) *HarnessMiner {
|
|||||||
return newMiner(ctxt, t, minerLogDir, minerLogFilename)
|
return newMiner(ctxt, t, minerLogDir, minerLogFilename)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewTempMiner creates a new miner using btcd backend with the specified log
|
||||||
|
// file dir and name.
|
||||||
|
func NewTempMiner(ctxt context.Context, t *testing.T,
|
||||||
|
tempDir, tempLogFilename string) *HarnessMiner {
|
||||||
|
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
return newMiner(ctxt, t, tempDir, tempLogFilename)
|
||||||
|
}
|
||||||
|
|
||||||
// newMiner creates a new miner using btcd's rpctest.
|
// newMiner creates a new miner using btcd's rpctest.
|
||||||
func newMiner(ctxb context.Context, t *testing.T, minerDirName,
|
func newMiner(ctxb context.Context, t *testing.T, minerDirName,
|
||||||
logFilename string) *HarnessMiner {
|
logFilename string) *HarnessMiner {
|
||||||
|
@ -690,41 +690,6 @@ func assertChannelPolicy(t *harnessTest, node *lntest.HarnessNode,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// assertMinerBlockHeightDelta ensures that tempMiner is 'delta' blocks ahead
|
|
||||||
// of miner.
|
|
||||||
func assertMinerBlockHeightDelta(t *harnessTest,
|
|
||||||
miner, tempMiner *lntest.HarnessMiner, delta int32) {
|
|
||||||
|
|
||||||
// Ensure the chain lengths are what we expect.
|
|
||||||
var predErr error
|
|
||||||
err := wait.Predicate(func() bool {
|
|
||||||
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock()
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to get current "+
|
|
||||||
"blockheight %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
_, minerHeight, err := miner.Client.GetBestBlock()
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to get current "+
|
|
||||||
"blockheight %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if tempMinerHeight != minerHeight+delta {
|
|
||||||
predErr = fmt.Errorf("expected new miner(%d) to be %d "+
|
|
||||||
"blocks ahead of original miner(%d)",
|
|
||||||
tempMinerHeight, delta, minerHeight)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, defaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkCommitmentMaturity(
|
func checkCommitmentMaturity(
|
||||||
forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
|
forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
|
||||||
maturityHeight uint32, blocksTilMaturity int32) error {
|
maturityHeight uint32, blocksTilMaturity int32) error {
|
||||||
|
@ -271,4 +271,8 @@ var allTestCasesTemp = []*lntemp.TestCase{
|
|||||||
Name: "3rd party anchor spend",
|
Name: "3rd party anchor spend",
|
||||||
TestFunc: testAnchorThirdPartySpend,
|
TestFunc: testAnchorThirdPartySpend,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "open channel reorg test",
|
||||||
|
TestFunc: testOpenChannelAfterReorg,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
@ -6,12 +6,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcjson"
|
"github.com/btcsuite/btcd/btcjson"
|
||||||
"github.com/btcsuite/btcd/btcutil"
|
|
||||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
"github.com/btcsuite/btcd/integration/rpctest"
|
"github.com/btcsuite/btcd/integration/rpctest"
|
||||||
"github.com/lightningnetwork/lnd/chainreg"
|
"github.com/lightningnetwork/lnd/chainreg"
|
||||||
"github.com/lightningnetwork/lnd/funding"
|
"github.com/lightningnetwork/lnd/funding"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -20,112 +20,83 @@ import (
|
|||||||
// testOpenChannelAfterReorg tests that in the case where we have an open
|
// testOpenChannelAfterReorg tests that in the case where we have an open
|
||||||
// channel where the funding tx gets reorged out, the channel will no
|
// channel where the funding tx gets reorged out, the channel will no
|
||||||
// longer be present in the node's routing table.
|
// longer be present in the node's routing table.
|
||||||
func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
func testOpenChannelAfterReorg(ht *lntemp.HarnessTest) {
|
||||||
// Skip test for neutrino, as we cannot disconnect the miner at will.
|
// Skip test for neutrino, as we cannot disconnect the miner at will.
|
||||||
// TODO(halseth): remove when either can disconnect at will, or restart
|
// TODO(halseth): remove when either can disconnect at will, or restart
|
||||||
// node with connection to new miner.
|
// node with connection to new miner.
|
||||||
if net.BackendCfg.Name() == lntest.NeutrinoBackendName {
|
if ht.IsNeutrinoBackend() {
|
||||||
t.Skipf("skipping reorg test for neutrino backend")
|
ht.Skipf("skipping reorg test for neutrino backend")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
temp := "temp"
|
||||||
ctxb = context.Background()
|
|
||||||
temp = "temp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set up a new miner that we can use to cause a reorg.
|
// Set up a new miner that we can use to cause a reorg.
|
||||||
tempLogDir := ".tempminerlogs"
|
tempLogDir := ".tempminerlogs"
|
||||||
logFilename := "output-open_channel_reorg-temp_miner.log"
|
logFilename := "output-open_channel_reorg-temp_miner.log"
|
||||||
tempMiner, err := lntest.NewTempMiner(tempLogDir, logFilename)
|
tempMiner := lntemp.NewTempMiner(
|
||||||
require.NoError(t.t, err, "failed to create temp miner")
|
ht.Context(), ht.T, tempLogDir, logFilename,
|
||||||
defer func() {
|
)
|
||||||
require.NoError(
|
defer tempMiner.Stop()
|
||||||
t.t, tempMiner.Stop(),
|
|
||||||
"failed to clean up temp miner",
|
|
||||||
)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Setup the temp miner
|
// Setup the temp miner
|
||||||
require.NoError(
|
require.NoError(ht, tempMiner.SetUp(false, 0),
|
||||||
t.t, tempMiner.SetUp(false, 0), "unable to set up mining node",
|
"unable to set up mining node")
|
||||||
)
|
|
||||||
|
miner := ht.Miner
|
||||||
|
alice, bob := ht.Alice, ht.Bob
|
||||||
|
|
||||||
// We start by connecting the new miner to our original miner,
|
// We start by connecting the new miner to our original miner,
|
||||||
// such that it will sync to our original chain.
|
// such that it will sync to our original chain.
|
||||||
err = net.Miner.Client.Node(
|
err := miner.Client.Node(
|
||||||
btcjson.NConnect, tempMiner.P2PAddress(), &temp,
|
btcjson.NConnect, tempMiner.P2PAddress(), &temp,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(ht, err, "unable to connect miners")
|
||||||
t.Fatalf("unable to remove node: %v", err)
|
|
||||||
}
|
nodeSlice := []*rpctest.Harness{miner.Harness, tempMiner.Harness}
|
||||||
nodeSlice := []*rpctest.Harness{net.Miner.Harness, tempMiner.Harness}
|
err = rpctest.JoinNodes(nodeSlice, rpctest.Blocks)
|
||||||
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
|
require.NoError(ht, err, "unable to join node on blocks")
|
||||||
t.Fatalf("unable to join node on blocks: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The two miners should be on the same blockheight.
|
// The two miners should be on the same blockheight.
|
||||||
assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 0)
|
assertMinerBlockHeightDelta(ht, miner, tempMiner, 0)
|
||||||
|
|
||||||
// We disconnect the two miners, such that we can mine two different
|
// We disconnect the two miners, such that we can mine two different
|
||||||
// chains and can cause a reorg later.
|
// chains and can cause a reorg later.
|
||||||
err = net.Miner.Client.Node(
|
err = miner.Client.Node(
|
||||||
btcjson.NDisconnect, tempMiner.P2PAddress(), &temp,
|
btcjson.NDisconnect, tempMiner.P2PAddress(), &temp,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(ht, err, "unable to disconnect miners")
|
||||||
t.Fatalf("unable to remove node: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new channel that requires 1 confs before it's considered
|
// Create a new channel that requires 1 confs before it's considered
|
||||||
// open, then broadcast the funding transaction
|
// open, then broadcast the funding transaction
|
||||||
chanAmt := funding.MaxBtcFundingAmount
|
params := lntemp.OpenChannelParams{
|
||||||
pushAmt := btcutil.Amount(0)
|
Amt: funding.MaxBtcFundingAmount,
|
||||||
pendingUpdate, err := net.OpenPendingChannel(
|
Private: true,
|
||||||
net.Alice, net.Bob, chanAmt, pushAmt,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to open channel: %v", err)
|
|
||||||
}
|
}
|
||||||
|
pendingUpdate := ht.OpenChannelAssertPending(alice, bob, params)
|
||||||
|
|
||||||
// Wait for miner to have seen the funding tx. The temporary miner is
|
// Wait for miner to have seen the funding tx. The temporary miner is
|
||||||
// disconnected, and won't see the transaction.
|
// disconnected, and won't see the transaction.
|
||||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
ht.Miner.AssertNumTxsInMempool(1)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to find funding tx in mempool: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, the channel's funding transaction will have been
|
// At this point, the channel's funding transaction will have been
|
||||||
// broadcast, but not confirmed, and the channel should be pending.
|
// broadcast, but not confirmed, and the channel should be pending.
|
||||||
assertNumOpenChannelsPending(t, net.Alice, net.Bob, 1)
|
ht.AssertNodesNumPendingOpenChannels(alice, bob, 1)
|
||||||
|
|
||||||
fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid)
|
fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid)
|
||||||
if err != nil {
|
require.NoError(ht, err, "convert funding txid into chainhash failed")
|
||||||
t.Fatalf("unable to convert funding txid into chainhash.Hash:"+
|
|
||||||
" %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We now cause a fork, by letting our original miner mine 10 blocks,
|
// We now cause a fork, by letting our original miner mine 10 blocks,
|
||||||
// and our new miner mine 15. This will also confirm our pending
|
// and our new miner mine 15. This will also confirm our pending
|
||||||
// channel on the original miner's chain, which should be considered
|
// channel on the original miner's chain, which should be considered
|
||||||
// open.
|
// open.
|
||||||
block := mineBlocks(t, net, 10, 1)[0]
|
block := ht.MineBlocks(10)[0]
|
||||||
assertTxInBlock(t, block, fundingTxID)
|
ht.Miner.AssertTxInBlock(block, fundingTxID)
|
||||||
if _, err := tempMiner.Client.Generate(15); err != nil {
|
_, err = tempMiner.Client.Generate(15)
|
||||||
t.Fatalf("unable to generate blocks: %v", err)
|
require.NoError(ht, err, "unable to generate blocks")
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the chain lengths are what we expect, with the temp miner
|
// Ensure the chain lengths are what we expect, with the temp miner
|
||||||
// being 5 blocks ahead.
|
// being 5 blocks ahead.
|
||||||
assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 5)
|
assertMinerBlockHeightDelta(ht, miner, tempMiner, 5)
|
||||||
|
|
||||||
// Wait for Alice to sync to the original miner's chain.
|
|
||||||
_, minerHeight, err := net.Miner.Client.GetBestBlock()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to get current blockheight %v", err)
|
|
||||||
}
|
|
||||||
err = waitForNodeBlockHeight(net.Alice, minerHeight)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to sync to chain: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
chanPoint := &lnrpc.ChannelPoint{
|
chanPoint := &lnrpc.ChannelPoint{
|
||||||
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
||||||
@ -135,121 +106,57 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ensure channel is no longer pending.
|
// Ensure channel is no longer pending.
|
||||||
assertNumOpenChannelsPending(t, net.Alice, net.Bob, 0)
|
ht.AssertNodesNumPendingOpenChannels(alice, bob, 0)
|
||||||
|
|
||||||
// Wait for Alice and Bob to recognize and advertise the new channel
|
// Wait for Alice and Bob to recognize and advertise the new channel
|
||||||
// generated above.
|
// generated above.
|
||||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
|
ht.AssertTopologyChannelOpen(alice, chanPoint)
|
||||||
if err != nil {
|
ht.AssertTopologyChannelOpen(bob, chanPoint)
|
||||||
t.Fatalf("alice didn't advertise channel before "+
|
|
||||||
"timeout: %v", err)
|
|
||||||
}
|
|
||||||
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bob didn't advertise channel before "+
|
|
||||||
"timeout: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Alice should now have 1 edge in her graph.
|
// Alice should now have 1 edge in her graph.
|
||||||
req := &lnrpc.ChannelGraphRequest{
|
ht.AssertNumEdges(alice, 1, true)
|
||||||
IncludeUnannounced: true,
|
|
||||||
}
|
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
chanGraph, err := net.Alice.DescribeGraph(ctxt, req)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to query for alice's routing table: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
numEdges := len(chanGraph.Edges)
|
|
||||||
if numEdges != 1 {
|
|
||||||
t.Fatalf("expected to find one edge in the graph, found %d",
|
|
||||||
numEdges)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we disconnect Alice's chain backend from the original miner, and
|
// Now we disconnect Alice's chain backend from the original miner, and
|
||||||
// connect the two miners together. Since the temporary miner knows
|
// connect the two miners together. Since the temporary miner knows
|
||||||
// about a longer chain, both miners should sync to that chain.
|
// about a longer chain, both miners should sync to that chain.
|
||||||
err = net.BackendCfg.DisconnectMiner()
|
ht.DisconnectMiner()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to remove node: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connecting to the temporary miner should now cause our original
|
// Connecting to the temporary miner should now cause our original
|
||||||
// chain to be re-orged out.
|
// chain to be re-orged out.
|
||||||
err = net.Miner.Client.Node(
|
err = miner.Client.Node(btcjson.NConnect, tempMiner.P2PAddress(), &temp)
|
||||||
btcjson.NConnect, tempMiner.P2PAddress(), &temp,
|
require.NoError(ht, err, "unable to connect temp miner")
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to remove node: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes := []*rpctest.Harness{tempMiner.Harness, net.Miner.Harness}
|
nodes := []*rpctest.Harness{tempMiner.Harness, miner.Harness}
|
||||||
if err := rpctest.JoinNodes(nodes, rpctest.Blocks); err != nil {
|
err = rpctest.JoinNodes(nodes, rpctest.Blocks)
|
||||||
t.Fatalf("unable to join node on blocks: %v", err)
|
require.NoError(ht, err, "unable to join node on blocks")
|
||||||
}
|
|
||||||
|
|
||||||
// Once again they should be on the same chain.
|
// Once again they should be on the same chain.
|
||||||
assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 0)
|
assertMinerBlockHeightDelta(ht, miner, tempMiner, 0)
|
||||||
|
|
||||||
// Now we disconnect the two miners, and connect our original miner to
|
// Now we disconnect the two miners, and connect our original miner to
|
||||||
// our chain backend once again.
|
// our chain backend once again.
|
||||||
err = net.Miner.Client.Node(
|
err = miner.Client.Node(
|
||||||
btcjson.NDisconnect, tempMiner.P2PAddress(), &temp,
|
btcjson.NDisconnect, tempMiner.P2PAddress(), &temp,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(ht, err, "unable to disconnect temp miner")
|
||||||
t.Fatalf("unable to remove node: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = net.BackendCfg.ConnectMiner()
|
ht.ConnectMiner()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to remove node: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This should have caused a reorg, and Alice should sync to the longer
|
// This should have caused a reorg, and Alice should sync to the longer
|
||||||
// chain, where the funding transaction is not confirmed.
|
// chain, where the funding transaction is not confirmed.
|
||||||
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock()
|
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock()
|
||||||
if err != nil {
|
require.NoError(ht, err, "unable to get current blockheight")
|
||||||
t.Fatalf("unable to get current blockheight %v", err)
|
ht.WaitForNodeBlockHeight(alice, tempMinerHeight)
|
||||||
}
|
|
||||||
err = waitForNodeBlockHeight(net.Alice, tempMinerHeight)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to sync to chain: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since the fundingtx was reorged out, Alice should now have no edges
|
// Since the fundingtx was reorged out, Alice should now have no edges
|
||||||
// in her graph.
|
// in her graph.
|
||||||
req = &lnrpc.ChannelGraphRequest{
|
ht.AssertNumEdges(alice, 0, true)
|
||||||
IncludeUnannounced: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
var predErr error
|
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
chanGraph, err = net.Alice.DescribeGraph(ctxt, req)
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to query for "+
|
|
||||||
"alice's routing table: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
numEdges = len(chanGraph.Edges)
|
|
||||||
if numEdges != 0 {
|
|
||||||
predErr = fmt.Errorf("expected to find "+
|
|
||||||
"no edge in the graph, found %d",
|
|
||||||
numEdges)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, defaultTimeout)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup by mining the funding tx again, then closing the channel.
|
// Cleanup by mining the funding tx again, then closing the channel.
|
||||||
block = mineBlocks(t, net, 1, 1)[0]
|
block = ht.MineBlocksAndAssertNumTxes(1, 1)[0]
|
||||||
assertTxInBlock(t, block, fundingTxID)
|
ht.Miner.AssertTxInBlock(block, fundingTxID)
|
||||||
|
|
||||||
closeReorgedChannelAndAssert(t, net, net.Alice, chanPoint, false)
|
ht.CloseChannel(alice, chanPoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// testOpenChannelFeePolicy checks if different channel fee scenarios
|
// testOpenChannelFeePolicy checks if different channel fee scenarios
|
||||||
@ -579,3 +486,33 @@ func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
|
|||||||
), "verifying alice close updates",
|
), "verifying alice close updates",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// assertMinerBlockHeightDelta ensures that tempMiner is 'delta' blocks ahead
|
||||||
|
// of miner.
|
||||||
|
func assertMinerBlockHeightDelta(ht *lntemp.HarnessTest,
|
||||||
|
miner, tempMiner *lntemp.HarnessMiner, delta int32) {
|
||||||
|
|
||||||
|
// Ensure the chain lengths are what we expect.
|
||||||
|
err := wait.NoError(func() error {
|
||||||
|
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to get current "+
|
||||||
|
"blockheight %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, minerHeight, err := miner.Client.GetBestBlock()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to get current "+
|
||||||
|
"blockheight %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tempMinerHeight != minerHeight+delta {
|
||||||
|
return fmt.Errorf("expected new miner(%d) to be %d "+
|
||||||
|
"blocks ahead of original miner(%d)",
|
||||||
|
tempMinerHeight, delta, minerHeight)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, defaultTimeout)
|
||||||
|
require.NoError(ht, err, "failed to assert block height delta")
|
||||||
|
}
|
||||||
|
@ -4,10 +4,6 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
var allTestCases = []*testCase{
|
var allTestCases = []*testCase{
|
||||||
{
|
|
||||||
name: "open channel reorg test",
|
|
||||||
test: testOpenChannelAfterReorg,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "single hop invoice",
|
name: "single hop invoice",
|
||||||
test: testSingleHopInvoice,
|
test: testSingleHopInvoice,
|
||||||
|
Loading…
Reference in New Issue
Block a user