mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-02-22 22:25:24 +01:00
itest: manage context inside assertions - II
This commit continues moving context management into assertions.
This commit is contained in:
parent
02e4c3ad4c
commit
1629858a3d
15 changed files with 71 additions and 86 deletions
|
@ -37,12 +37,16 @@ func AddToNodeLog(t *testing.T,
|
|||
// openChannelStream blocks until an OpenChannel request for a channel funding
|
||||
// by alice succeeds. If it does, a stream client is returned to receive events
|
||||
// about the opening channel.
|
||||
func openChannelStream(ctx context.Context, t *harnessTest,
|
||||
net *lntest.NetworkHarness, alice, bob *lntest.HarnessNode,
|
||||
func openChannelStream(t *harnessTest, net *lntest.NetworkHarness,
|
||||
alice, bob *lntest.HarnessNode,
|
||||
p lntest.OpenChannelParams) lnrpc.Lightning_OpenChannelClient {
|
||||
|
||||
t.t.Helper()
|
||||
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Wait until we are able to fund a channel successfully. This wait
|
||||
// prevents us from erroring out when trying to create a channel while
|
||||
// the node is starting up.
|
||||
|
@ -72,7 +76,7 @@ func openChannelAndAssert(t *harnessTest, net *lntest.NetworkHarness,
|
|||
ctx, cancel := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
defer cancel()
|
||||
|
||||
chanOpenUpdate := openChannelStream(ctx, t, net, alice, bob, p)
|
||||
chanOpenUpdate := openChannelStream(t, net, alice, bob, p)
|
||||
|
||||
// Mine 6 blocks, then wait for Alice's node to notify us that the
|
||||
// channel has been opened. The funding transaction should be found
|
||||
|
@ -265,10 +269,14 @@ func closeChannelAndAssertType(t *harnessTest,
|
|||
//
|
||||
// NOTE: This method does not verify that the node sends a disable update for
|
||||
// the closed channel.
|
||||
func closeReorgedChannelAndAssert(ctx context.Context, t *harnessTest,
|
||||
func closeReorgedChannelAndAssert(t *harnessTest,
|
||||
net *lntest.NetworkHarness, node *lntest.HarnessNode,
|
||||
fundingChanPoint *lnrpc.ChannelPoint, force bool) *chainhash.Hash {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
defer cancel()
|
||||
|
||||
closeUpdates, _, err := net.CloseChannel(ctx, node, fundingChanPoint, force)
|
||||
require.NoError(t.t, err, "unable to close channel")
|
||||
|
||||
|
@ -399,8 +407,12 @@ func findWaitingCloseChannel(pendingChanResp *lnrpc.PendingChannelsResponse,
|
|||
|
||||
// waitForChannelPendingForceClose waits for the node to report that the
|
||||
// channel is pending force close, and that the UTXO nursery is aware of it.
|
||||
func waitForChannelPendingForceClose(ctx context.Context,
|
||||
node *lntest.HarnessNode, fundingChanPoint *lnrpc.ChannelPoint) error {
|
||||
func waitForChannelPendingForceClose(node *lntest.HarnessNode,
|
||||
fundingChanPoint *lnrpc.ChannelPoint) error {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
txid, err := lnrpc.GetChanPointFundingTxid(fundingChanPoint)
|
||||
if err != nil {
|
||||
|
@ -443,10 +455,14 @@ type lnrpcForceCloseChannel = lnrpc.PendingChannelsResponse_ForceClosedChannel
|
|||
|
||||
// waitForNumChannelPendingForceClose waits for the node to report a certain
|
||||
// number of channels in state pending force close.
|
||||
func waitForNumChannelPendingForceClose(ctx context.Context,
|
||||
node *lntest.HarnessNode, expectedNum int,
|
||||
func waitForNumChannelPendingForceClose(node *lntest.HarnessNode,
|
||||
expectedNum int,
|
||||
perChanCheck func(channel *lnrpcForceCloseChannel) error) error {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
return wait.NoError(func() error {
|
||||
resp, err := node.PendingChannels(
|
||||
ctx, &lnrpc.PendingChannelsRequest{},
|
||||
|
@ -481,11 +497,9 @@ func waitForNumChannelPendingForceClose(ctx context.Context,
|
|||
// the following sweep transaction from the force closing node.
|
||||
func cleanupForceClose(t *harnessTest, net *lntest.NetworkHarness,
|
||||
node *lntest.HarnessNode, chanPoint *lnrpc.ChannelPoint) {
|
||||
ctxb := context.Background()
|
||||
|
||||
// Wait for the channel to be marked pending force close.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := waitForChannelPendingForceClose(ctxt, node, chanPoint)
|
||||
err := waitForChannelPendingForceClose(node, chanPoint)
|
||||
require.NoError(t.t, err, "channel not pending force close")
|
||||
|
||||
// Mine enough blocks for the node to sweep its funds from the force
|
||||
|
@ -517,9 +531,13 @@ func numOpenChannelsPending(ctxt context.Context,
|
|||
|
||||
// assertNumOpenChannelsPending asserts that a pair of nodes have the expected
|
||||
// number of pending channels between them.
|
||||
func assertNumOpenChannelsPending(ctxt context.Context, t *harnessTest,
|
||||
func assertNumOpenChannelsPending(t *harnessTest,
|
||||
alice, bob *lntest.HarnessNode, expected int) {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := wait.NoError(func() error {
|
||||
aliceNumChans, err := numOpenChannelsPending(ctxt, alice)
|
||||
if err != nil {
|
||||
|
|
|
@ -301,9 +301,8 @@ func testUnannouncedChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
|
||||
// Open a channel between Alice and Bob, ensuring the
|
||||
// channel has been opened properly.
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanOpenUpdate := openChannelStream(
|
||||
ctxt, t, net, net.Alice, net.Bob,
|
||||
t, net, net.Alice, net.Bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: amount,
|
||||
},
|
||||
|
@ -315,7 +314,7 @@ func testUnannouncedChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
|
||||
// One block is enough to make the channel ready for use, since the
|
||||
// nodes have defaultNumConfs=1 set.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
fundingChanPoint, err := net.WaitForChannelOpen(ctxt, chanOpenUpdate)
|
||||
if err != nil {
|
||||
t.Fatalf("error while waiting for channel open: %v", err)
|
||||
|
|
|
@ -290,7 +290,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
net.ConnectNodes(t.t, carol, net.Alice)
|
||||
|
||||
chanOpenUpdate := openChannelStream(
|
||||
ctxt, t, net, carol, net.Alice,
|
||||
t, net, carol, net.Alice,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
PushAmt: pushAmt,
|
||||
|
@ -398,13 +398,12 @@ func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
net, t, carol, dave, chanSize, thawHeight, 1, false,
|
||||
)
|
||||
_ = openChannelStream(
|
||||
ctxb, t, net, carol, dave, lntest.OpenChannelParams{
|
||||
t, net, carol, dave, lntest.OpenChannelParams{
|
||||
Amt: chanSize,
|
||||
FundingShim: fundingShim1,
|
||||
},
|
||||
)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, carol, dave, 1)
|
||||
assertNumOpenChannelsPending(t, carol, dave, 1)
|
||||
|
||||
// That channel is now pending forever and normally would saturate the
|
||||
// max pending channel limit for both nodes. But because the channel is
|
||||
|
@ -439,7 +438,7 @@ func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
Memo: "new chans",
|
||||
Value: int64(payAmt),
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := dave.AddInvoice(ctxt, invoice)
|
||||
require.NoError(t.t, err)
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
|
@ -469,8 +468,7 @@ func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
|
||||
// As a last step, we check if we still have the pending channel hanging
|
||||
// around because we never published the funding TX.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, carol, dave, 1)
|
||||
assertNumOpenChannelsPending(t, carol, dave, 1)
|
||||
|
||||
// Let's make sure we can abandon it.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
|
@ -487,7 +485,7 @@ func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
require.NoError(t.t, err)
|
||||
|
||||
// It should now not appear in the pending channels anymore.
|
||||
assertNumOpenChannelsPending(ctxt, t, carol, dave, 0)
|
||||
assertNumOpenChannelsPending(t, carol, dave, 0)
|
||||
}
|
||||
|
||||
// testFundingPersistence is intended to ensure that the Funding Manager
|
||||
|
@ -526,8 +524,7 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
// At this point, the channel's funding transaction will have been
|
||||
// broadcast, but not confirmed. Alice and Bob's nodes should reflect
|
||||
// this when queried via RPC.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, carol, 1)
|
||||
assertNumOpenChannelsPending(t, net.Alice, carol, 1)
|
||||
|
||||
// Restart both nodes to test that the appropriate state has been
|
||||
// persisted and that both nodes recover gracefully.
|
||||
|
@ -586,8 +583,7 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
|
||||
// Both nodes should still show a single channel as pending.
|
||||
time.Sleep(time.Second * 1)
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, carol, 1)
|
||||
assertNumOpenChannelsPending(t, net.Alice, carol, 1)
|
||||
|
||||
// Finally, mine the last block which should mark the channel as open.
|
||||
if _, err := net.Miner.Client.Generate(1); err != nil {
|
||||
|
@ -597,8 +593,7 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
// At this point, the channel should be fully opened and there should
|
||||
// be no pending channels remaining for either node.
|
||||
time.Sleep(time.Second * 1)
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, carol, 0)
|
||||
assertNumOpenChannelsPending(t, net.Alice, carol, 0)
|
||||
|
||||
// The channel should be listed in the peer information returned by
|
||||
// both peers.
|
||||
|
|
|
@ -72,8 +72,7 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
// At this point, the channel's funding transaction will have been
|
||||
// broadcast, but not confirmed. Alice and Bob's nodes should reflect
|
||||
// this when queried via RPC.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, alice, bob, 1)
|
||||
assertNumOpenChannelsPending(t, alice, bob, 1)
|
||||
|
||||
// Disconnect Alice-peer from Bob-peer and get error causes by one
|
||||
// pending channel with detach node is existing.
|
||||
|
@ -102,9 +101,8 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
// At this point, the channel should be fully opened and there should be
|
||||
// no pending channels remaining for either node.
|
||||
time.Sleep(time.Millisecond * 300)
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
|
||||
assertNumOpenChannelsPending(ctxt, t, alice, bob, 0)
|
||||
assertNumOpenChannelsPending(t, alice, bob, 0)
|
||||
|
||||
// Reconnect the nodes so that the channel can become active.
|
||||
net.ConnectNodes(t.t, alice, bob)
|
||||
|
@ -510,9 +508,8 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
// the channel if the number of pending channels exceed max value.
|
||||
openStreams := make([]lnrpc.Lightning_OpenChannelClient, maxPendingChannels)
|
||||
for i := 0; i < maxPendingChannels; i++ {
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
stream := openChannelStream(
|
||||
ctxt, t, net, net.Alice, carol,
|
||||
t, net, net.Alice, carol,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: amount,
|
||||
},
|
||||
|
@ -1051,8 +1048,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
closeChannelAndAssert(t, net, carol, chanPoint2, true)
|
||||
|
||||
// Wait for the channel to be marked pending force close.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForChannelPendingForceClose(ctxt, carol, chanPoint2)
|
||||
err = waitForChannelPendingForceClose(carol, chanPoint2)
|
||||
if err != nil {
|
||||
t.Fatalf("channel not pending force close: %v", err)
|
||||
}
|
||||
|
|
|
@ -318,9 +318,8 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
|||
// At this point, Bob should have broadcast his second layer success
|
||||
// transaction, and should have sent it to the nursery for incubation,
|
||||
// or to the sweeper for sweeping.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(
|
||||
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
if c.Channel.LocalBalance != 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -415,15 +414,14 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
|||
block = mineBlocks(t, net, 1, 1)[0]
|
||||
assertTxInBlock(t, block, bobSweep)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||
err = waitForNumChannelPendingForceClose(bob, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// THe channel with Alice is still open.
|
||||
assertNodeNumChannels(t, bob, 1)
|
||||
|
||||
// Carol should have no channels left (open nor pending).
|
||||
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
|
||||
err = waitForNumChannelPendingForceClose(carol, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
assertNodeNumChannels(t, carol, 0)
|
||||
|
||||
|
|
|
@ -219,9 +219,8 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||
|
||||
// At this point, Bob should have broadcast his second layer success
|
||||
// transaction, and should have sent it to the nursery for incubation.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(
|
||||
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
if c.Channel.LocalBalance != 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -288,13 +287,12 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||
block = mineBlocks(t, net, 1, 1)[0]
|
||||
assertTxInBlock(t, block, bobSweep)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||
err = waitForNumChannelPendingForceClose(bob, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
assertNodeNumChannels(t, bob, 0)
|
||||
|
||||
// Also Carol should have no channels left (open nor pending).
|
||||
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
|
||||
err = waitForNumChannelPendingForceClose(carol, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
assertNodeNumChannels(t, carol, 0)
|
||||
|
||||
|
|
|
@ -223,8 +223,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||
|
||||
// Once this transaction has been confirmed, Bob should detect that he
|
||||
// no longer has any pending channels.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||
err = waitForNumChannelPendingForceClose(bob, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Coop close channel, expect no anchors.
|
||||
|
|
|
@ -209,8 +209,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||
// afterwards.
|
||||
_, err = net.Miner.Client.Generate(1)
|
||||
require.NoError(t.t, err)
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
|
||||
err = waitForNumChannelPendingForceClose(carol, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// The invoice should show as settled for Carol, indicating that it was
|
||||
|
|
|
@ -90,8 +90,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||
)
|
||||
|
||||
// Wait for the channel to be marked pending force close.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint)
|
||||
err = waitForChannelPendingForceClose(alice, aliceChanPoint)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// After closeChannelAndAssertType returns, it has mined a block so now
|
||||
|
@ -230,8 +229,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||
// Now that the sweeping transaction has been confirmed, Bob should now
|
||||
// recognize that all contracts have been fully resolved, and show no
|
||||
// pending close channels.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||
err = waitForNumChannelPendingForceClose(bob, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// If we then mine 3 additional blocks, Carol's second level tx will
|
||||
|
@ -249,8 +247,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||
block = mineBlocks(t, net, 1, 1)[0]
|
||||
assertTxInBlock(t, block, carolSweep)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
|
||||
err = waitForNumChannelPendingForceClose(carol, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// The invoice should show as settled for Carol, indicating that it was
|
||||
|
|
|
@ -77,9 +77,8 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||
|
||||
// At this point, Bob should have a pending force close channel as he
|
||||
// just went to chain.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(
|
||||
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
if c.LimboBalance == 0 {
|
||||
return fmt.Errorf("bob should have nonzero "+
|
||||
"limbo balance instead has: %v",
|
||||
|
@ -117,9 +116,8 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||
|
||||
// Bob's pending channel report should show that he has a single HTLC
|
||||
// that's now in stage one.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(
|
||||
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
if len(c.PendingHtlcs) != 1 {
|
||||
return fmt.Errorf("bob should have pending " +
|
||||
"htlc but doesn't")
|
||||
|
@ -155,9 +153,8 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||
|
||||
// Additionally, Bob should now show that HTLC as being advanced to the
|
||||
// second stage.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(
|
||||
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
if len(c.PendingHtlcs) != 1 {
|
||||
return fmt.Errorf("bob should have pending " +
|
||||
"htlc but doesn't")
|
||||
|
@ -189,8 +186,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||
|
||||
// At this point, Bob should no longer show any channels as pending
|
||||
// close.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||
err = waitForNumChannelPendingForceClose(bob, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Coop close, no anchors.
|
||||
|
|
|
@ -89,8 +89,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||
|
||||
// At this point, Bob should have a pending force close channel as
|
||||
// Carol has gone directly to chain.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(ctxt, bob, 1, nil)
|
||||
err = waitForNumChannelPendingForceClose(bob, 1, nil)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Bob can sweep his output immediately. If there is an anchor, Bob will
|
||||
|
@ -115,9 +114,8 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||
// If we check Bob's pending channel report, it should show that he has
|
||||
// a single HTLC that's now in the second stage, as skip the initial
|
||||
// first stage since this is a direct HTLC.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(
|
||||
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
if len(c.PendingHtlcs) != 1 {
|
||||
return fmt.Errorf("bob should have pending " +
|
||||
"htlc but doesn't")
|
||||
|
@ -172,8 +170,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||
// Now we'll check Bob's pending channel report. Since this was Carol's
|
||||
// commitment, he doesn't have to wait for any CSV delays. As a result,
|
||||
// he should show no additional pending transactions.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||
err = waitForNumChannelPendingForceClose(bob, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// While we're here, we assert that our expired invoice's state is
|
||||
|
|
|
@ -98,8 +98,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
|
||||
// At this point, the channel's funding transaction will have been
|
||||
// broadcast, but not confirmed, and the channel should be pending.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 1)
|
||||
assertNumOpenChannelsPending(t, net.Alice, net.Bob, 1)
|
||||
|
||||
fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid)
|
||||
if err != nil {
|
||||
|
@ -140,7 +139,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
}
|
||||
|
||||
// Ensure channel is no longer pending.
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0)
|
||||
assertNumOpenChannelsPending(t, net.Alice, net.Bob, 0)
|
||||
|
||||
// Wait for Alice and Bob to recognize and advertise the new channel
|
||||
// generated above.
|
||||
|
@ -255,8 +254,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
block = mineBlocks(t, net, 1, 1)[0]
|
||||
assertTxInBlock(t, block, fundingTxID)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeReorgedChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
closeReorgedChannelAndAssert(t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
// testBasicChannelCreationAndUpdates tests multiple channel opening and closing,
|
||||
|
|
|
@ -183,9 +183,7 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest)
|
|||
// At this point, the channel's funding transaction will have been
|
||||
// broadcast, but not confirmed. Alice and Bob's nodes
|
||||
// should reflect this when queried via RPC.
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 1)
|
||||
assertNumOpenChannelsPending(t, net.Alice, net.Bob, 1)
|
||||
|
||||
// We are restarting Bob's node to let the link be created for the
|
||||
// pending channel.
|
||||
|
@ -200,9 +198,7 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest)
|
|||
_ = mineBlocks(t, net, 6, 1)[0]
|
||||
|
||||
// We verify that the channel is open from both nodes point of view.
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0)
|
||||
assertNumOpenChannelsPending(t, net.Alice, net.Bob, 0)
|
||||
|
||||
// With the channel open, we'll create invoices for Bob that Alice will
|
||||
// pay to in order to advance the state of the channel.
|
||||
|
|
|
@ -825,9 +825,8 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
// Now create a _private_ channel directly between Carol and
|
||||
// Alice of 100k.
|
||||
net.ConnectNodes(t.t, carol, net.Alice)
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanOpenUpdate := openChannelStream(
|
||||
ctxt, t, net, carol, net.Alice,
|
||||
t, net, carol, net.Alice,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
Private: true,
|
||||
|
@ -841,7 +840,7 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
// nodes have defaultNumConfs=1 set.
|
||||
block := mineBlocks(t, net, 1, 1)[0]
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
chanPointPrivate, err := net.WaitForChannelOpen(ctxt, chanOpenUpdate)
|
||||
if err != nil {
|
||||
t.Fatalf("error while waiting for channel open: %v", err)
|
||||
|
|
|
@ -39,8 +39,8 @@ const (
|
|||
defaultCSV = lntest.DefaultCSV
|
||||
defaultTimeout = lntest.DefaultTimeout
|
||||
minerMempoolTimeout = lntest.MinerMempoolTimeout
|
||||
channelOpenTimeout = lntest.ChannelOpenTimeout
|
||||
channelCloseTimeout = lntest.ChannelCloseTimeout
|
||||
channelOpenTimeout = lntest.ChannelOpenTimeout
|
||||
itestLndBinary = "../../lnd-itest"
|
||||
anchorSize = 330
|
||||
noFeeLimitMsat = math.MaxInt64
|
||||
|
|
Loading…
Add table
Reference in a new issue