mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-02-23 14:40:30 +01:00
itest: test local force close restore scenario
We want to make sure we can recover funds from a channel that was force closed by the local node just before the node needed to be restored. We add a special test case with specific assertions for that scenario.
This commit is contained in:
parent
294fba0bc5
commit
bb4c754504
3 changed files with 172 additions and 1 deletions
|
@ -1386,6 +1386,91 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest,
|
|||
assertNodeNumChannels(t, carol, 0)
|
||||
}
|
||||
|
||||
func assertTimeLockSwept(net *lntest.NetworkHarness, t *harnessTest,
|
||||
carol *lntest.HarnessNode, carolStartingBalance int64,
|
||||
dave *lntest.HarnessNode, daveStartingBalance int64,
|
||||
anchors bool) {
|
||||
|
||||
ctxb := context.Background()
|
||||
expectedTxes := 2
|
||||
if anchors {
|
||||
expectedTxes = 3
|
||||
}
|
||||
|
||||
// Carol should sweep her funds immediately, as they are not timelocked.
|
||||
// We also expect Carol and Dave to sweep their anchor, if present.
|
||||
_, err := waitForNTxsInMempool(
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err, "unable to find Carol's sweep tx in mempool")
|
||||
|
||||
// Carol should consider the channel pending force close (since she is
|
||||
// waiting for her sweep to confirm).
|
||||
assertNumPendingChannels(t, carol, 0, 1)
|
||||
|
||||
// Dave is considering it "pending force close", as we must wait
|
||||
// before he can sweep her outputs.
|
||||
assertNumPendingChannels(t, dave, 0, 1)
|
||||
|
||||
// Mine the sweep (and anchor) tx(ns).
|
||||
_ = mineBlocks(t, net, 1, expectedTxes)[0]
|
||||
|
||||
// Now Carol should consider the channel fully closed.
|
||||
assertNumPendingChannels(t, carol, 0, 0)
|
||||
|
||||
// We query Carol's balance to make sure it increased after the channel
|
||||
// closed. This checks that she was able to sweep the funds she had in
|
||||
// the channel.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
balReq := &lnrpc.WalletBalanceRequest{}
|
||||
carolBalResp, err := carol.WalletBalance(ctxt, balReq)
|
||||
require.NoError(t.t, err, "unable to get Carol's balance")
|
||||
|
||||
carolBalance := carolBalResp.ConfirmedBalance
|
||||
require.Greater(
|
||||
t.t, carolBalance, carolStartingBalance, "balance not increased",
|
||||
)
|
||||
|
||||
// After the Dave's output matures, he should reclaim his funds.
|
||||
//
|
||||
// The commit sweep resolver publishes the sweep tx at defaultCSV-1 and
|
||||
// we already mined one block after the commitment was published, so
|
||||
// take that into account.
|
||||
mineBlocks(t, net, defaultCSV-1-1, 0)
|
||||
daveSweep, err := waitForTxInMempool(
|
||||
net.Miner.Client, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err, "unable to find Dave's sweep tx in mempool")
|
||||
block := mineBlocks(t, net, 1, 1)[0]
|
||||
assertTxInBlock(t, block, daveSweep)
|
||||
|
||||
// Now the channel should be fully closed also from Dave's POV.
|
||||
assertNumPendingChannels(t, dave, 0, 0)
|
||||
|
||||
// Make sure Dave got his balance back.
|
||||
err = wait.NoError(func() error {
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
daveBalResp, err := dave.WalletBalance(ctxt, balReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get Dave's balance: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
daveBalance := daveBalResp.ConfirmedBalance
|
||||
if daveBalance <= daveStartingBalance {
|
||||
return fmt.Errorf("expected dave to have balance "+
|
||||
"above %d, instead had %v", daveStartingBalance,
|
||||
daveBalance)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, defaultTimeout)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
assertNodeNumChannels(t, dave, 0)
|
||||
assertNodeNumChannels(t, carol, 0)
|
||||
}
|
||||
|
||||
// verifyCloseUpdate is used to verify that a closed channel update is of the
|
||||
// expected type.
|
||||
func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate,
|
||||
|
|
|
@ -390,6 +390,35 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
)
|
||||
},
|
||||
},
|
||||
|
||||
// Restore a channel that was force closed by dave just before
|
||||
// going offline.
|
||||
{
|
||||
name: "restore force closed from backup file " +
|
||||
"anchors",
|
||||
initiator: true,
|
||||
private: false,
|
||||
anchorCommit: true,
|
||||
localForceClose: true,
|
||||
restoreMethod: func(oldNode *lntest.HarnessNode,
|
||||
backupFilePath string,
|
||||
mnemonic []string) (nodeRestorer, error) {
|
||||
|
||||
// Read the entire Multi backup stored within
|
||||
// this node's channels.backup file.
|
||||
multi, err := ioutil.ReadFile(backupFilePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now that we have Dave's backup file, we'll
|
||||
// create a new nodeRestorer that will restore
|
||||
// using the on-disk channels.backup.
|
||||
return chanRestoreViaRPC(
|
||||
net, password, mnemonic, multi, oldNode,
|
||||
)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// TODO(roasbeef): online vs offline close?
|
||||
|
@ -806,6 +835,10 @@ type chanRestoreTestCase struct {
|
|||
// producer format should also be created before restoring.
|
||||
legacyRevocation bool
|
||||
|
||||
// localForceClose signals if the channel should be force closed by the
|
||||
// node that is going to recover.
|
||||
localForceClose bool
|
||||
|
||||
// restoreMethod takes an old node, then returns a function
|
||||
// closure that'll return the same node, but with its state
|
||||
// restored via a custom method. We use this to abstract away
|
||||
|
@ -882,6 +915,7 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
|||
|
||||
// We will either open a confirmed or unconfirmed channel, depending on
|
||||
// the requirements of the test case.
|
||||
var chanPoint *lnrpc.ChannelPoint
|
||||
switch {
|
||||
case testCase.unconfirmed:
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
|
@ -916,7 +950,7 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
|||
|
||||
default:
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint := openChannelAndAssert(
|
||||
chanPoint = openChannelAndAssert(
|
||||
ctxt, t, net, from, to,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
|
@ -959,6 +993,32 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
|||
}
|
||||
}
|
||||
|
||||
// If we're testing that locally force closed channels can be restored
|
||||
// then we issue the force close now.
|
||||
if testCase.localForceClose && chanPoint != nil {
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err = dave.CloseChannel(ctxt, &lnrpc.CloseChannelRequest{
|
||||
ChannelPoint: chanPoint,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// After closing the channel we mine one transaction to make
|
||||
// sure the commitment TX was confirmed.
|
||||
_ = mineBlocks(t, net, 1, 1)
|
||||
|
||||
// Now we need to make sure that the channel is still in the
|
||||
// backup. Otherwise restoring won't work later.
|
||||
_, err = dave.ExportChannelBackup(
|
||||
ctxt, &lnrpc.ExportChannelBackupRequest{
|
||||
ChanPoint: chanPoint,
|
||||
},
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
}
|
||||
|
||||
// Before we start the recovery, we'll record the balances of both
|
||||
// Carol and Dave to ensure they both sweep their coins at the end.
|
||||
balReq := &lnrpc.WalletBalanceRequest{}
|
||||
|
@ -1022,6 +1082,30 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
|||
t.Fatalf("On-chain balance not restored: %v", err)
|
||||
}
|
||||
|
||||
// For our force close scenario we don't need the channel to be closed
|
||||
// by Carol since it was already force closed before we started the
|
||||
// recovery. All we need is for Carol to send us over the commit height
|
||||
// so we can sweep the time locked output with the correct commit point.
|
||||
if testCase.localForceClose {
|
||||
assertNumPendingChannels(t, dave, 0, 1)
|
||||
|
||||
err = restartCarol()
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Now that we have our new node up, we expect that it'll
|
||||
// re-connect to Carol automatically based on the restored
|
||||
// backup.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.EnsureConnected(ctxt, t.t, dave, carol)
|
||||
|
||||
assertTimeLockSwept(
|
||||
net, t, carol, carolStartingBalance, dave,
|
||||
daveStartingBalance, testCase.anchorCommit,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// We now check that the restored channel is in the proper state. It
|
||||
// should not yet be force closing as no connection with the remote
|
||||
// peer was established yet. We should also not be able to close the
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcSuccessResolver: Transaction rejected: output already spent
|
||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcTimeoutResolver: htlcswitch shutting down
|
||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcTimeoutResolver: TxNotifier is exiting
|
||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unexpected local commitment confirmed while in StateDefault
|
||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unexpected local on-chain channel close
|
||||
<time> [ERR] CNCT: *contractcourt.commitSweepResolver(<chan_point>): unable to sweep input: remote party swept utxo
|
||||
<time> [ERR] CNCT: Unable to advance state: channel not found
|
||||
<time> [ERR] CNCT: unable to hand breached contract off to breachArbiter: server is shutting down
|
||||
|
|
Loading…
Add table
Reference in a new issue