2019-05-23 20:49:04 -07:00
|
|
|
package wtdb_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
crand "crypto/rand"
|
|
|
|
"io"
|
2022-10-21 11:21:18 +02:00
|
|
|
"math/rand"
|
2019-05-23 20:49:04 -07:00
|
|
|
"net"
|
|
|
|
"testing"
|
|
|
|
|
2022-02-23 14:48:00 +01:00
|
|
|
"github.com/btcsuite/btcd/btcec/v2"
|
2021-04-26 19:08:11 +02:00
|
|
|
"github.com/lightningnetwork/lnd/kvdb"
|
2019-05-23 20:49:04 -07:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
"github.com/lightningnetwork/lnd/watchtower/blob"
|
|
|
|
"github.com/lightningnetwork/lnd/watchtower/wtclient"
|
|
|
|
"github.com/lightningnetwork/lnd/watchtower/wtdb"
|
|
|
|
"github.com/lightningnetwork/lnd/watchtower/wtmock"
|
|
|
|
"github.com/lightningnetwork/lnd/watchtower/wtpolicy"
|
2022-10-10 12:47:08 +02:00
|
|
|
"github.com/stretchr/testify/require"
|
2019-05-23 20:49:04 -07:00
|
|
|
)
|
|
|
|
|
2022-10-21 11:21:18 +02:00
|
|
|
const blobType = blob.TypeAltruistCommit
|
|
|
|
|
2022-10-04 15:08:18 +02:00
|
|
|
// pseudoAddr is a fake network address to be used for testing purposes.
|
|
|
|
var pseudoAddr = &net.TCPAddr{IP: []byte{0x01, 0x00, 0x00, 0x00}, Port: 9911}
|
|
|
|
|
2022-08-15 21:08:45 +08:00
|
|
|
// clientDBInit is a closure used to initialize a wtclient.DB instance.
|
|
|
|
type clientDBInit func(t *testing.T) wtclient.DB
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
type clientDBHarness struct {
|
|
|
|
t *testing.T
|
|
|
|
db wtclient.DB
|
|
|
|
}
|
|
|
|
|
2022-08-15 21:08:45 +08:00
|
|
|
func newClientDBHarness(t *testing.T, init clientDBInit) *clientDBHarness {
|
|
|
|
db := init(t)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
h := &clientDBHarness{
|
|
|
|
t: t,
|
|
|
|
db: db,
|
|
|
|
}
|
|
|
|
|
2022-08-15 21:08:45 +08:00
|
|
|
return h
|
2019-05-23 20:49:04 -07:00
|
|
|
}
|
|
|
|
|
2022-10-04 14:59:11 +02:00
|
|
|
func (h *clientDBHarness) insertSession(session *wtdb.ClientSession,
|
|
|
|
expErr error) {
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
err := h.db.CreateClientSession(session)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.ErrorIs(h.t, err, expErr)
|
2019-05-23 20:49:04 -07:00
|
|
|
}
|
|
|
|
|
2022-10-13 14:24:15 +02:00
|
|
|
func (h *clientDBHarness) listSessions(id *wtdb.TowerID,
|
2022-10-18 12:08:46 +02:00
|
|
|
filterFn wtdb.ClientSessionFilterFn,
|
2022-10-13 14:24:15 +02:00
|
|
|
opts ...wtdb.ClientSessionListOption) map[wtdb.SessionID]*wtdb.ClientSession {
|
2022-10-04 14:59:11 +02:00
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
h.t.Helper()
|
|
|
|
|
2022-10-18 12:08:46 +02:00
|
|
|
sessions, err := h.db.ListClientSessions(id, filterFn, opts...)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(h.t, err, "unable to list client sessions")
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
return sessions
|
|
|
|
}
|
|
|
|
|
2020-12-01 16:54:53 -08:00
|
|
|
func (h *clientDBHarness) nextKeyIndex(id wtdb.TowerID,
|
|
|
|
blobType blob.Type) uint32 {
|
2020-12-01 13:03:18 -08:00
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
h.t.Helper()
|
|
|
|
|
2020-12-01 13:03:18 -08:00
|
|
|
index, err := h.db.NextSessionKeyIndex(id, blobType)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(h.t, err, "unable to create next session key index")
|
|
|
|
require.NotZero(h.t, index, "next key index should never be 0")
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
return index
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *clientDBHarness) createTower(lnAddr *lnwire.NetAddress,
|
|
|
|
expErr error) *wtdb.Tower {
|
|
|
|
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
tower, err := h.db.CreateTower(lnAddr)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.ErrorIs(h.t, err, expErr)
|
|
|
|
require.NotZero(h.t, tower.ID, "tower id should never be 0")
|
2019-05-23 20:49:04 -07:00
|
|
|
|
2022-10-18 12:08:46 +02:00
|
|
|
for _, session := range h.listSessions(&tower.ID, nil) {
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Equal(h.t, wtdb.CSessionActive, session.Status)
|
2019-06-07 17:45:11 -07:00
|
|
|
}
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
return tower
|
|
|
|
}
|
|
|
|
|
2019-06-07 17:45:11 -07:00
|
|
|
func (h *clientDBHarness) removeTower(pubKey *btcec.PublicKey, addr net.Addr,
|
|
|
|
hasSessions bool, expErr error) {
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
h.t.Helper()
|
|
|
|
|
2022-10-10 12:47:08 +02:00
|
|
|
err := h.db.RemoveTower(pubKey, addr)
|
|
|
|
require.ErrorIs(h.t, err, expErr)
|
|
|
|
|
2019-06-07 17:45:11 -07:00
|
|
|
if expErr != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-10-04 14:59:11 +02:00
|
|
|
pubKeyStr := pubKey.SerializeCompressed()
|
|
|
|
|
2019-06-07 17:45:11 -07:00
|
|
|
if addr != nil {
|
|
|
|
tower, err := h.db.LoadTower(pubKey)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoErrorf(h.t, err, "expected tower %x to still exist",
|
|
|
|
pubKeyStr)
|
2019-06-07 17:45:11 -07:00
|
|
|
|
|
|
|
removedAddr := addr.String()
|
|
|
|
for _, towerAddr := range tower.Addresses {
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NotEqualf(h.t, removedAddr, towerAddr,
|
|
|
|
"address %v not removed for tower %x",
|
|
|
|
removedAddr, pubKeyStr)
|
2019-06-07 17:45:11 -07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tower, err := h.db.LoadTower(pubKey)
|
2022-10-10 12:47:08 +02:00
|
|
|
if hasSessions {
|
|
|
|
require.NoError(h.t, err, "expected tower %x with "+
|
|
|
|
"sessions to still exist", pubKeyStr)
|
|
|
|
} else {
|
|
|
|
require.Errorf(h.t, err, "expected tower %x with no "+
|
|
|
|
"sessions to not exist", pubKeyStr)
|
2019-06-07 17:45:11 -07:00
|
|
|
return
|
|
|
|
}
|
2022-10-10 12:47:08 +02:00
|
|
|
|
2022-10-18 12:08:46 +02:00
|
|
|
for _, session := range h.listSessions(&tower.ID, nil) {
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Equal(h.t, wtdb.CSessionInactive,
|
|
|
|
session.Status, "expected status for session "+
|
|
|
|
"%v to be %v, got %v", session.ID,
|
|
|
|
wtdb.CSessionInactive, session.Status)
|
2019-06-07 17:45:11 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-04 14:59:11 +02:00
|
|
|
func (h *clientDBHarness) loadTower(pubKey *btcec.PublicKey,
|
|
|
|
expErr error) *wtdb.Tower {
|
|
|
|
|
2019-06-07 17:45:11 -07:00
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
tower, err := h.db.LoadTower(pubKey)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.ErrorIs(h.t, err, expErr)
|
2019-06-07 17:45:11 -07:00
|
|
|
|
|
|
|
return tower
|
|
|
|
}
|
|
|
|
|
2022-10-04 14:59:11 +02:00
|
|
|
func (h *clientDBHarness) loadTowerByID(id wtdb.TowerID,
|
|
|
|
expErr error) *wtdb.Tower {
|
|
|
|
|
2019-06-07 17:45:11 -07:00
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
tower, err := h.db.LoadTowerByID(id)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.ErrorIs(h.t, err, expErr)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
return tower
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *clientDBHarness) fetchChanSummaries() map[lnwire.ChannelID]wtdb.ClientChanSummary {
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
summaries, err := h.db.FetchChanSummaries()
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(h.t, err)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
return summaries
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *clientDBHarness) registerChan(chanID lnwire.ChannelID,
|
|
|
|
sweepPkScript []byte, expErr error) {
|
|
|
|
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
err := h.db.RegisterChannel(chanID, sweepPkScript)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.ErrorIs(h.t, err, expErr)
|
2019-05-23 20:49:04 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (h *clientDBHarness) commitUpdate(id *wtdb.SessionID,
|
|
|
|
update *wtdb.CommittedUpdate, expErr error) uint16 {
|
|
|
|
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
lastApplied, err := h.db.CommitUpdate(id, update)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.ErrorIs(h.t, err, expErr)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
return lastApplied
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *clientDBHarness) ackUpdate(id *wtdb.SessionID, seqNum uint16,
|
|
|
|
lastApplied uint16, expErr error) {
|
|
|
|
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
err := h.db.AckUpdate(id, seqNum, lastApplied)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.ErrorIs(h.t, err, expErr)
|
2019-05-23 20:49:04 -07:00
|
|
|
}
|
|
|
|
|
2022-10-21 11:21:18 +02:00
|
|
|
func (h *clientDBHarness) markChannelClosed(id lnwire.ChannelID,
|
|
|
|
blockHeight uint32, expErr error) []wtdb.SessionID {
|
|
|
|
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
closableSessions, err := h.db.MarkChannelClosed(id, blockHeight)
|
|
|
|
require.ErrorIs(h.t, err, expErr)
|
|
|
|
|
|
|
|
return closableSessions
|
|
|
|
}
|
|
|
|
|
2022-10-21 11:22:20 +02:00
|
|
|
func (h *clientDBHarness) listClosableSessions(
|
|
|
|
expErr error) map[wtdb.SessionID]uint32 {
|
|
|
|
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
closableSessions, err := h.db.ListClosableSessions()
|
|
|
|
require.ErrorIs(h.t, err, expErr)
|
|
|
|
|
|
|
|
return closableSessions
|
|
|
|
}
|
|
|
|
|
2022-10-04 15:08:18 +02:00
|
|
|
// newTower is a helper function that creates a new tower with a randomly
|
|
|
|
// generated public key and inserts it into the client DB.
|
|
|
|
func (h *clientDBHarness) newTower() *wtdb.Tower {
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
pk, err := randPubKey()
|
|
|
|
require.NoError(h.t, err)
|
|
|
|
|
|
|
|
// Insert a random tower into the database.
|
|
|
|
return h.createTower(&lnwire.NetAddress{
|
|
|
|
IdentityKey: pk,
|
|
|
|
Address: pseudoAddr,
|
|
|
|
}, nil)
|
|
|
|
}
|
|
|
|
|
2022-09-30 11:47:54 +02:00
|
|
|
func (h *clientDBHarness) fetchSessionCommittedUpdates(id *wtdb.SessionID,
|
|
|
|
expErr error) []wtdb.CommittedUpdate {
|
|
|
|
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
updates, err := h.db.FetchSessionCommittedUpdates(id)
|
|
|
|
if err != expErr {
|
|
|
|
h.t.Fatalf("expected fetch session committed updates error: "+
|
|
|
|
"%v, got: %v", expErr, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return updates
|
|
|
|
}
|
|
|
|
|
2022-12-23 11:14:01 +02:00
|
|
|
func (h *clientDBHarness) isAcked(id *wtdb.SessionID, backupID *wtdb.BackupID,
|
|
|
|
expErr error) bool {
|
|
|
|
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
isAcked, err := h.db.IsAcked(id, backupID)
|
|
|
|
require.ErrorIs(h.t, err, expErr)
|
|
|
|
|
|
|
|
return isAcked
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *clientDBHarness) numAcked(id *wtdb.SessionID, expErr error) uint64 {
|
|
|
|
h.t.Helper()
|
|
|
|
|
|
|
|
numAcked, err := h.db.NumAckedUpdates(id)
|
|
|
|
require.ErrorIs(h.t, err, expErr)
|
|
|
|
|
|
|
|
return numAcked
|
|
|
|
}
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
// testCreateClientSession asserts various conditions regarding the creation of
|
|
|
|
// a new ClientSession. The test asserts:
|
|
|
|
// - client sessions can only be created if a session key index is reserved.
|
|
|
|
// - client sessions cannot be created with an incorrect session key index .
|
|
|
|
// - inserting duplicate sessions fails.
|
|
|
|
func testCreateClientSession(h *clientDBHarness) {
|
2020-12-01 16:54:53 -08:00
|
|
|
const blobType = blob.TypeAltruistAnchorCommit
|
2020-12-01 13:03:18 -08:00
|
|
|
|
2022-10-04 15:08:18 +02:00
|
|
|
tower := h.newTower()
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
// Create a test client session to insert.
|
|
|
|
session := &wtdb.ClientSession{
|
|
|
|
ClientSessionBody: wtdb.ClientSessionBody{
|
2022-10-04 15:08:18 +02:00
|
|
|
TowerID: tower.ID,
|
2019-05-23 20:49:04 -07:00
|
|
|
Policy: wtpolicy.Policy{
|
2020-12-01 13:03:18 -08:00
|
|
|
TxPolicy: wtpolicy.TxPolicy{
|
|
|
|
BlobType: blobType,
|
|
|
|
},
|
2019-05-23 20:49:04 -07:00
|
|
|
MaxUpdates: 100,
|
|
|
|
},
|
|
|
|
RewardPkScript: []byte{0x01, 0x02, 0x03},
|
|
|
|
},
|
|
|
|
ID: wtdb.SessionID([33]byte{0x01}),
|
|
|
|
}
|
|
|
|
|
|
|
|
// First, assert that this session is not already present in the
|
|
|
|
// database.
|
2022-10-18 12:08:46 +02:00
|
|
|
_, ok := h.listSessions(nil, nil)[session.ID]
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Falsef(h.t, ok, "session for id %x should not exist yet",
|
|
|
|
session.ID)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Attempting to insert the client session without reserving a session
|
|
|
|
// key index should fail.
|
|
|
|
h.insertSession(session, wtdb.ErrNoReservedKeyIndex)
|
|
|
|
|
|
|
|
// Now, reserve a session key for this tower.
|
2020-12-01 16:54:53 -08:00
|
|
|
keyIndex := h.nextKeyIndex(session.TowerID, blobType)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// The client session hasn't been updated with the reserved key index
|
|
|
|
// (since it's still zero). Inserting should fail due to the mismatch.
|
|
|
|
h.insertSession(session, wtdb.ErrIncorrectKeyIndex)
|
|
|
|
|
|
|
|
// Reserve another key for the same index. Since no session has been
|
|
|
|
// successfully created, it should return the same index to maintain
|
|
|
|
// idempotency across restarts.
|
2020-12-01 16:54:53 -08:00
|
|
|
keyIndex2 := h.nextKeyIndex(session.TowerID, blobType)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Equalf(h.t, keyIndex, keyIndex2, "next key index should "+
|
|
|
|
"be idempotent: want: %v, got %v", keyIndex, keyIndex2)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Now, set the client session's key index so that it is proper and
|
|
|
|
// insert it. This should succeed.
|
|
|
|
session.KeyIndex = keyIndex
|
|
|
|
h.insertSession(session, nil)
|
|
|
|
|
|
|
|
// Verify that the session now exists in the database.
|
2022-10-18 12:08:46 +02:00
|
|
|
_, ok = h.listSessions(nil, nil)[session.ID]
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Truef(h.t, ok, "session for id %x should exist now", session.ID)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Attempt to insert the session again, which should fail due to the
|
|
|
|
// session already existing.
|
|
|
|
h.insertSession(session, wtdb.ErrClientSessionAlreadyExists)
|
|
|
|
|
|
|
|
// Finally, assert that reserving another key index succeeds with a
|
|
|
|
// different key index, now that the first one has been finalized.
|
2020-12-01 16:54:53 -08:00
|
|
|
keyIndex3 := h.nextKeyIndex(session.TowerID, blobType)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NotEqualf(h.t, keyIndex, keyIndex3, "key index still "+
|
|
|
|
"reserved after creating session")
|
2019-05-23 20:49:04 -07:00
|
|
|
}
|
|
|
|
|
2019-06-07 17:44:55 -07:00
|
|
|
// testFilterClientSessions asserts that we can correctly filter client sessions
|
|
|
|
// for a specific tower.
|
|
|
|
func testFilterClientSessions(h *clientDBHarness) {
|
|
|
|
// We'll create three client sessions, the first two belonging to one
|
|
|
|
// tower, and the last belonging to another one.
|
|
|
|
const numSessions = 3
|
2020-12-01 13:03:18 -08:00
|
|
|
const blobType = blob.TypeAltruistCommit
|
2019-06-07 17:44:55 -07:00
|
|
|
towerSessions := make(map[wtdb.TowerID][]wtdb.SessionID)
|
|
|
|
for i := 0; i < numSessions; i++ {
|
2022-10-04 15:08:18 +02:00
|
|
|
tower := h.newTower()
|
|
|
|
keyIndex := h.nextKeyIndex(tower.ID, blobType)
|
2019-06-07 17:44:55 -07:00
|
|
|
sessionID := wtdb.SessionID([33]byte{byte(i)})
|
|
|
|
h.insertSession(&wtdb.ClientSession{
|
|
|
|
ClientSessionBody: wtdb.ClientSessionBody{
|
2022-10-04 15:08:18 +02:00
|
|
|
TowerID: tower.ID,
|
2019-06-07 17:44:55 -07:00
|
|
|
Policy: wtpolicy.Policy{
|
2020-12-01 13:03:18 -08:00
|
|
|
TxPolicy: wtpolicy.TxPolicy{
|
|
|
|
BlobType: blobType,
|
|
|
|
},
|
2019-06-07 17:44:55 -07:00
|
|
|
MaxUpdates: 100,
|
|
|
|
},
|
|
|
|
RewardPkScript: []byte{0x01, 0x02, 0x03},
|
|
|
|
KeyIndex: keyIndex,
|
|
|
|
},
|
|
|
|
ID: sessionID,
|
|
|
|
}, nil)
|
2022-10-04 15:08:18 +02:00
|
|
|
towerSessions[tower.ID] = append(
|
|
|
|
towerSessions[tower.ID], sessionID,
|
2022-10-04 14:59:11 +02:00
|
|
|
)
|
2019-06-07 17:44:55 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// We should see the expected sessions for each tower when filtering
|
|
|
|
// them.
|
|
|
|
for towerID, expectedSessions := range towerSessions {
|
2022-10-18 12:08:46 +02:00
|
|
|
sessions := h.listSessions(&towerID, nil)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Len(h.t, sessions, len(expectedSessions))
|
|
|
|
|
2019-06-07 17:44:55 -07:00
|
|
|
for _, expectedSession := range expectedSessions {
|
2022-10-10 12:47:08 +02:00
|
|
|
_, ok := sessions[expectedSession]
|
|
|
|
require.Truef(h.t, ok, "expected session %v for "+
|
|
|
|
"tower %v", expectedSession, towerID)
|
2019-06-07 17:44:55 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
// testCreateTower asserts the behavior of creating new Tower objects within the
|
|
|
|
// database, and that the latest address is always prepended to the list of
|
|
|
|
// known addresses for the tower.
|
|
|
|
func testCreateTower(h *clientDBHarness) {
|
|
|
|
// Test that loading a tower with an arbitrary tower id fails.
|
2019-06-07 17:45:11 -07:00
|
|
|
h.loadTowerByID(20, wtdb.ErrTowerNotFound)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
2022-10-04 15:08:18 +02:00
|
|
|
tower := h.newTower()
|
2022-10-12 09:47:38 +02:00
|
|
|
require.Len(h.t, tower.Addresses, 1)
|
|
|
|
towerAddr := &lnwire.NetAddress{
|
|
|
|
IdentityKey: tower.IdentityKey,
|
|
|
|
Address: tower.Addresses[0],
|
|
|
|
}
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Load the tower from the database and assert that it matches the tower
|
|
|
|
// we created.
|
2019-06-07 17:45:11 -07:00
|
|
|
tower2 := h.loadTowerByID(tower.ID, nil)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Equal(h.t, tower, tower2)
|
|
|
|
|
|
|
|
tower2 = h.loadTower(tower.IdentityKey, nil)
|
|
|
|
require.Equal(h.t, tower, tower2)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Insert the address again into the database. Since the address is the
|
|
|
|
// same, this should result in an unmodified tower record.
|
2022-10-04 15:08:18 +02:00
|
|
|
towerDupAddr := h.createTower(towerAddr, nil)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Lenf(h.t, towerDupAddr.Addresses, 1, "duplicate address "+
|
|
|
|
"should be deduped")
|
|
|
|
|
|
|
|
require.Equal(h.t, tower, towerDupAddr)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Generate a new address for this tower.
|
|
|
|
addr2 := &net.TCPAddr{IP: []byte{0x02, 0x00, 0x00, 0x00}, Port: 9911}
|
|
|
|
|
|
|
|
lnAddr2 := &lnwire.NetAddress{
|
2022-10-04 15:08:18 +02:00
|
|
|
IdentityKey: tower.IdentityKey,
|
2019-05-23 20:49:04 -07:00
|
|
|
Address: addr2,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert the updated address, which should produce a tower with a new
|
|
|
|
// address.
|
|
|
|
towerNewAddr := h.createTower(lnAddr2, nil)
|
|
|
|
|
|
|
|
// Load the tower from the database, and assert that it matches the
|
|
|
|
// tower returned from creation.
|
2019-06-07 17:45:11 -07:00
|
|
|
towerNewAddr2 := h.loadTowerByID(tower.ID, nil)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Equal(h.t, towerNewAddr, towerNewAddr2)
|
|
|
|
|
|
|
|
towerNewAddr2 = h.loadTower(tower.IdentityKey, nil)
|
|
|
|
require.Equal(h.t, towerNewAddr, towerNewAddr2)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Assert that there are now two addresses on the tower object.
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Lenf(h.t, towerNewAddr.Addresses, 2, "new address should be "+
|
|
|
|
"added")
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Finally, assert that the new address was prepended since it is deemed
|
|
|
|
// fresher.
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Equal(h.t, tower.Addresses, towerNewAddr.Addresses[1:])
|
2019-05-23 20:49:04 -07:00
|
|
|
}
|
|
|
|
|
2019-06-07 17:45:11 -07:00
|
|
|
// testRemoveTower asserts the behavior of removing Tower objects as a whole and
|
|
|
|
// removing addresses from Tower objects within the database.
|
|
|
|
func testRemoveTower(h *clientDBHarness) {
|
|
|
|
// Generate a random public key we'll use for our tower.
|
|
|
|
pk, err := randPubKey()
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(h.t, err)
|
2019-06-07 17:45:11 -07:00
|
|
|
|
|
|
|
// Removing a tower that does not exist within the database should
|
|
|
|
// result in a NOP.
|
|
|
|
h.removeTower(pk, nil, false, nil)
|
|
|
|
|
|
|
|
// We'll create a tower with two addresses.
|
|
|
|
addr1 := &net.TCPAddr{IP: []byte{0x01, 0x00, 0x00, 0x00}, Port: 9911}
|
|
|
|
addr2 := &net.TCPAddr{IP: []byte{0x02, 0x00, 0x00, 0x00}, Port: 9911}
|
|
|
|
h.createTower(&lnwire.NetAddress{
|
|
|
|
IdentityKey: pk,
|
|
|
|
Address: addr1,
|
|
|
|
}, nil)
|
|
|
|
h.createTower(&lnwire.NetAddress{
|
|
|
|
IdentityKey: pk,
|
|
|
|
Address: addr2,
|
|
|
|
}, nil)
|
|
|
|
|
|
|
|
// We'll then remove the second address. We should now only see the
|
|
|
|
// first.
|
|
|
|
h.removeTower(pk, addr2, false, nil)
|
|
|
|
|
|
|
|
// We'll then remove the first address. We should now see that the tower
|
|
|
|
// has no addresses left.
|
2020-11-03 10:27:57 -08:00
|
|
|
h.removeTower(pk, addr1, false, wtdb.ErrLastTowerAddr)
|
2019-06-07 17:45:11 -07:00
|
|
|
|
|
|
|
// Removing the tower as a whole from the database should succeed since
|
|
|
|
// there aren't any active sessions for it.
|
|
|
|
h.removeTower(pk, nil, false, nil)
|
|
|
|
|
|
|
|
// We'll then recreate the tower, but this time we'll create a session
|
|
|
|
// for it.
|
|
|
|
tower := h.createTower(&lnwire.NetAddress{
|
|
|
|
IdentityKey: pk,
|
|
|
|
Address: addr1,
|
|
|
|
}, nil)
|
|
|
|
|
2020-12-01 13:03:18 -08:00
|
|
|
const blobType = blob.TypeAltruistCommit
|
2019-06-07 17:45:11 -07:00
|
|
|
session := &wtdb.ClientSession{
|
|
|
|
ClientSessionBody: wtdb.ClientSessionBody{
|
|
|
|
TowerID: tower.ID,
|
|
|
|
Policy: wtpolicy.Policy{
|
2020-12-01 13:03:18 -08:00
|
|
|
TxPolicy: wtpolicy.TxPolicy{
|
|
|
|
BlobType: blobType,
|
|
|
|
},
|
2019-06-07 17:45:11 -07:00
|
|
|
MaxUpdates: 100,
|
|
|
|
},
|
|
|
|
RewardPkScript: []byte{0x01, 0x02, 0x03},
|
2020-12-01 16:54:53 -08:00
|
|
|
KeyIndex: h.nextKeyIndex(tower.ID, blobType),
|
2019-06-07 17:45:11 -07:00
|
|
|
},
|
|
|
|
ID: wtdb.SessionID([33]byte{0x01}),
|
|
|
|
}
|
|
|
|
h.insertSession(session, nil)
|
|
|
|
update := randCommittedUpdate(h.t, 1)
|
2022-12-23 11:14:01 +02:00
|
|
|
h.registerChan(update.BackupID.ChanID, nil, nil)
|
2019-06-07 17:45:11 -07:00
|
|
|
h.commitUpdate(&session.ID, update, nil)
|
|
|
|
|
|
|
|
// We should not be able to fully remove it from the database since
|
|
|
|
// there's a session and it has unacked updates.
|
|
|
|
h.removeTower(pk, nil, true, wtdb.ErrTowerUnackedUpdates)
|
|
|
|
|
|
|
|
// Removing the tower after all sessions no longer have unacked updates
|
|
|
|
// should result in the sessions becoming inactive.
|
|
|
|
h.ackUpdate(&session.ID, 1, 1, nil)
|
|
|
|
h.removeTower(pk, nil, true, nil)
|
|
|
|
|
|
|
|
// Creating the tower again should mark all of the sessions active once
|
|
|
|
// again.
|
|
|
|
h.createTower(&lnwire.NetAddress{
|
|
|
|
IdentityKey: pk,
|
|
|
|
Address: addr1,
|
|
|
|
}, nil)
|
|
|
|
}
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
// testChanSummaries tests the process of a registering a channel and its
|
|
|
|
// associated sweep pkscript.
|
|
|
|
func testChanSummaries(h *clientDBHarness) {
|
|
|
|
// First, assert that this channel is not already registered.
|
|
|
|
var chanID lnwire.ChannelID
|
2022-10-10 12:47:08 +02:00
|
|
|
_, ok := h.fetchChanSummaries()[chanID]
|
|
|
|
require.Falsef(h.t, ok, "pkscript for channel %x should not exist yet",
|
|
|
|
chanID)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Generate a random sweep pkscript and register it for this channel.
|
|
|
|
expPkScript := make([]byte, 22)
|
2022-10-10 12:47:08 +02:00
|
|
|
_, err := io.ReadFull(crand.Reader, expPkScript)
|
|
|
|
require.NoError(h.t, err)
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
h.registerChan(chanID, expPkScript, nil)
|
|
|
|
|
|
|
|
// Assert that the channel exists and that its sweep pkscript matches
|
|
|
|
// the one we registered.
|
|
|
|
summary, ok := h.fetchChanSummaries()[chanID]
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Truef(h.t, ok, "pkscript for channel %x should not exist yet",
|
|
|
|
chanID)
|
|
|
|
require.Equal(h.t, expPkScript, summary.SweepPkScript)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Finally, assert that re-registering the same channel produces a
|
|
|
|
// failure.
|
|
|
|
h.registerChan(chanID, expPkScript, wtdb.ErrChannelAlreadyRegistered)
|
|
|
|
}
|
|
|
|
|
|
|
|
// testCommitUpdate tests the behavior of CommitUpdate, ensuring that they can
|
|
|
|
func testCommitUpdate(h *clientDBHarness) {
|
2020-12-01 13:03:18 -08:00
|
|
|
const blobType = blob.TypeAltruistCommit
|
2022-10-04 15:08:18 +02:00
|
|
|
|
|
|
|
tower := h.newTower()
|
2019-05-23 20:49:04 -07:00
|
|
|
session := &wtdb.ClientSession{
|
|
|
|
ClientSessionBody: wtdb.ClientSessionBody{
|
2022-10-04 15:08:18 +02:00
|
|
|
TowerID: tower.ID,
|
2019-05-23 20:49:04 -07:00
|
|
|
Policy: wtpolicy.Policy{
|
2020-12-01 13:03:18 -08:00
|
|
|
TxPolicy: wtpolicy.TxPolicy{
|
|
|
|
BlobType: blobType,
|
|
|
|
},
|
2019-05-23 20:49:04 -07:00
|
|
|
MaxUpdates: 100,
|
|
|
|
},
|
|
|
|
RewardPkScript: []byte{0x01, 0x02, 0x03},
|
|
|
|
},
|
|
|
|
ID: wtdb.SessionID([33]byte{0x02}),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a random update and try to commit before inserting the
|
|
|
|
// session, which should fail.
|
|
|
|
update1 := randCommittedUpdate(h.t, 1)
|
|
|
|
h.commitUpdate(&session.ID, update1, wtdb.ErrClientSessionNotFound)
|
2022-09-30 11:47:54 +02:00
|
|
|
h.fetchSessionCommittedUpdates(
|
|
|
|
&session.ID, wtdb.ErrClientSessionNotFound,
|
|
|
|
)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Reserve a session key index and insert the session.
|
2020-12-01 16:54:53 -08:00
|
|
|
session.KeyIndex = h.nextKeyIndex(session.TowerID, blobType)
|
2019-05-23 20:49:04 -07:00
|
|
|
h.insertSession(session, nil)
|
|
|
|
|
|
|
|
// Now, try to commit the update that failed initially which should
|
|
|
|
// succeed. The lastApplied value should be 0 since we have not received
|
|
|
|
// an ack from the tower.
|
|
|
|
lastApplied := h.commitUpdate(&session.ID, update1, nil)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Zero(h.t, lastApplied)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Assert that the committed update appears in the client session's
|
|
|
|
// CommittedUpdates map when loaded from disk and that there are no
|
|
|
|
// AckedUpdates.
|
2022-10-13 14:24:15 +02:00
|
|
|
h.assertUpdates(session.ID, []wtdb.CommittedUpdate{*update1}, nil)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Try to commit the same update, which should succeed due to
|
|
|
|
// idempotency (which is preserved when the breach hint is identical to
|
|
|
|
// the on-disk update's hint). The lastApplied value should remain
|
|
|
|
// unchanged.
|
|
|
|
lastApplied2 := h.commitUpdate(&session.ID, update1, nil)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Equal(h.t, lastApplied, lastApplied2)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Assert that the loaded ClientSession is the same as before.
|
2022-10-13 14:24:15 +02:00
|
|
|
h.assertUpdates(session.ID, []wtdb.CommittedUpdate{*update1}, nil)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Generate another random update and try to commit it at the identical
|
|
|
|
// sequence number. Since the breach hint has changed, this should fail.
|
|
|
|
update2 := randCommittedUpdate(h.t, 1)
|
|
|
|
h.commitUpdate(&session.ID, update2, wtdb.ErrUpdateAlreadyCommitted)
|
|
|
|
|
|
|
|
// Next, insert the new update at the next unallocated sequence number
|
|
|
|
// which should succeed.
|
|
|
|
update2.SeqNum = 2
|
|
|
|
lastApplied3 := h.commitUpdate(&session.ID, update2, nil)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Equal(h.t, lastApplied, lastApplied3)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Check that both updates now appear as committed on the ClientSession
|
|
|
|
// loaded from disk.
|
2022-10-13 14:24:15 +02:00
|
|
|
h.assertUpdates(session.ID, []wtdb.CommittedUpdate{
|
2019-05-23 20:49:04 -07:00
|
|
|
*update1,
|
|
|
|
*update2,
|
2022-10-13 14:24:15 +02:00
|
|
|
}, nil)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Finally, create one more random update and try to commit it at index
|
|
|
|
// 4, which should be rejected since 3 is the next slot the database
|
|
|
|
// expects.
|
|
|
|
update4 := randCommittedUpdate(h.t, 4)
|
|
|
|
h.commitUpdate(&session.ID, update4, wtdb.ErrCommitUnorderedUpdate)
|
|
|
|
|
|
|
|
// Assert that the ClientSession loaded from disk remains unchanged.
|
2022-10-13 14:24:15 +02:00
|
|
|
h.assertUpdates(session.ID, []wtdb.CommittedUpdate{
|
2019-05-23 20:49:04 -07:00
|
|
|
*update1,
|
|
|
|
*update2,
|
2022-10-13 14:24:15 +02:00
|
|
|
}, nil)
|
|
|
|
}
|
|
|
|
|
2022-10-21 11:21:18 +02:00
|
|
|
// testMarkChannelClosed asserts the behaviour of MarkChannelClosed.
|
|
|
|
func testMarkChannelClosed(h *clientDBHarness) {
|
|
|
|
tower := h.newTower()
|
|
|
|
|
|
|
|
// Create channel 1.
|
|
|
|
chanID1 := randChannelID(h.t)
|
|
|
|
|
|
|
|
// Since we have not yet registered the channel, we expect an error
|
|
|
|
// when attempting to mark it as closed.
|
|
|
|
h.markChannelClosed(chanID1, 1, wtdb.ErrChannelNotRegistered)
|
|
|
|
|
|
|
|
// Now register the channel.
|
|
|
|
h.registerChan(chanID1, nil, nil)
|
|
|
|
|
|
|
|
// Since there are still no sessions that would have updates for the
|
|
|
|
// channel, marking it as closed now should succeed.
|
|
|
|
h.markChannelClosed(chanID1, 1, nil)
|
|
|
|
|
|
|
|
// Register channel 2.
|
|
|
|
chanID2 := randChannelID(h.t)
|
|
|
|
h.registerChan(chanID2, nil, nil)
|
|
|
|
|
|
|
|
// Create session1 with MaxUpdates set to 5.
|
|
|
|
session1 := h.randSession(h.t, tower.ID, 5)
|
|
|
|
h.insertSession(session1, nil)
|
|
|
|
|
|
|
|
// Add an update for channel 2 in session 1 and ack it too.
|
|
|
|
update := randCommittedUpdateForChannel(h.t, chanID2, 1)
|
|
|
|
lastApplied := h.commitUpdate(&session1.ID, update, nil)
|
|
|
|
require.Zero(h.t, lastApplied)
|
|
|
|
h.ackUpdate(&session1.ID, 1, 1, nil)
|
|
|
|
|
|
|
|
// Marking channel 2 now should not result in any closable sessions
|
|
|
|
// since session 1 is not yet exhausted.
|
|
|
|
sl := h.markChannelClosed(chanID2, 1, nil)
|
|
|
|
require.Empty(h.t, sl)
|
|
|
|
|
|
|
|
// Create channel 3 and 4.
|
|
|
|
chanID3 := randChannelID(h.t)
|
|
|
|
h.registerChan(chanID3, nil, nil)
|
|
|
|
|
|
|
|
chanID4 := randChannelID(h.t)
|
|
|
|
h.registerChan(chanID4, nil, nil)
|
|
|
|
|
|
|
|
// Add an update for channel 4 and ack it.
|
|
|
|
update = randCommittedUpdateForChannel(h.t, chanID4, 2)
|
|
|
|
lastApplied = h.commitUpdate(&session1.ID, update, nil)
|
|
|
|
require.EqualValues(h.t, 1, lastApplied)
|
|
|
|
h.ackUpdate(&session1.ID, 2, 2, nil)
|
|
|
|
|
|
|
|
// Add an update for channel 3 in session 1. But dont ack it yet.
|
|
|
|
update = randCommittedUpdateForChannel(h.t, chanID2, 3)
|
|
|
|
lastApplied = h.commitUpdate(&session1.ID, update, nil)
|
|
|
|
require.EqualValues(h.t, 2, lastApplied)
|
|
|
|
|
|
|
|
// Mark channel 4 as closed & assert that session 1 is not seen as
|
|
|
|
// closable since it still has committed updates.
|
|
|
|
sl = h.markChannelClosed(chanID4, 1, nil)
|
|
|
|
require.Empty(h.t, sl)
|
|
|
|
|
|
|
|
// Now ack the update we added above.
|
|
|
|
h.ackUpdate(&session1.ID, 3, 3, nil)
|
|
|
|
|
|
|
|
// Mark channel 3 as closed & assert that session 1 is still not seen as
|
|
|
|
// closable since it is not yet exhausted.
|
|
|
|
sl = h.markChannelClosed(chanID3, 1, nil)
|
|
|
|
require.Empty(h.t, sl)
|
|
|
|
|
|
|
|
// Create channel 5 and 6.
|
|
|
|
chanID5 := randChannelID(h.t)
|
|
|
|
h.registerChan(chanID5, nil, nil)
|
|
|
|
|
|
|
|
chanID6 := randChannelID(h.t)
|
|
|
|
h.registerChan(chanID6, nil, nil)
|
|
|
|
|
|
|
|
// Add an update for channel 5 and ack it.
|
|
|
|
update = randCommittedUpdateForChannel(h.t, chanID5, 4)
|
|
|
|
lastApplied = h.commitUpdate(&session1.ID, update, nil)
|
|
|
|
require.EqualValues(h.t, 3, lastApplied)
|
|
|
|
h.ackUpdate(&session1.ID, 4, 4, nil)
|
|
|
|
|
|
|
|
// Add an update for channel 6 and ack it.
|
|
|
|
update = randCommittedUpdateForChannel(h.t, chanID6, 5)
|
|
|
|
lastApplied = h.commitUpdate(&session1.ID, update, nil)
|
|
|
|
require.EqualValues(h.t, 4, lastApplied)
|
|
|
|
h.ackUpdate(&session1.ID, 5, 5, nil)
|
|
|
|
|
|
|
|
// The session is no exhausted.
|
|
|
|
// If we now close channel 5, session 1 should still not be closable
|
|
|
|
// since it has an update for channel 6 which is still open.
|
|
|
|
sl = h.markChannelClosed(chanID5, 1, nil)
|
|
|
|
require.Empty(h.t, sl)
|
2022-10-21 11:22:20 +02:00
|
|
|
require.Empty(h.t, h.listClosableSessions(nil))
|
2022-10-21 11:21:18 +02:00
|
|
|
|
|
|
|
// Finally, if we close channel 6, session 1 _should_ be in the closable
|
|
|
|
// list.
|
2022-10-21 11:22:20 +02:00
|
|
|
sl = h.markChannelClosed(chanID6, 100, nil)
|
2022-10-21 11:21:18 +02:00
|
|
|
require.ElementsMatch(h.t, sl, []wtdb.SessionID{session1.ID})
|
2022-10-21 11:22:20 +02:00
|
|
|
slMap := h.listClosableSessions(nil)
|
|
|
|
require.InDeltaMapValues(h.t, slMap, map[wtdb.SessionID]uint32{
|
|
|
|
session1.ID: 100,
|
|
|
|
}, 0)
|
2022-10-21 11:21:18 +02:00
|
|
|
}
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
// testAckUpdate asserts the behavior of AckUpdate.
|
|
|
|
func testAckUpdate(h *clientDBHarness) {
|
2020-12-01 13:03:18 -08:00
|
|
|
const blobType = blob.TypeAltruistCommit
|
|
|
|
|
2022-10-04 15:08:18 +02:00
|
|
|
tower := h.newTower()
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
// Create a new session that the updates in this will be tied to.
|
|
|
|
session := &wtdb.ClientSession{
|
|
|
|
ClientSessionBody: wtdb.ClientSessionBody{
|
2022-10-04 15:08:18 +02:00
|
|
|
TowerID: tower.ID,
|
2019-05-23 20:49:04 -07:00
|
|
|
Policy: wtpolicy.Policy{
|
2020-12-01 13:03:18 -08:00
|
|
|
TxPolicy: wtpolicy.TxPolicy{
|
|
|
|
BlobType: blobType,
|
|
|
|
},
|
2019-05-23 20:49:04 -07:00
|
|
|
MaxUpdates: 100,
|
|
|
|
},
|
|
|
|
RewardPkScript: []byte{0x01, 0x02, 0x03},
|
|
|
|
},
|
|
|
|
ID: wtdb.SessionID([33]byte{0x03}),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to ack an update before inserting the client session, which
|
|
|
|
// should fail.
|
|
|
|
h.ackUpdate(&session.ID, 1, 0, wtdb.ErrClientSessionNotFound)
|
|
|
|
|
|
|
|
// Reserve a session key and insert the client session.
|
2020-12-01 16:54:53 -08:00
|
|
|
session.KeyIndex = h.nextKeyIndex(session.TowerID, blobType)
|
2019-05-23 20:49:04 -07:00
|
|
|
h.insertSession(session, nil)
|
|
|
|
|
|
|
|
// Now, try to ack update 1. This should fail since update 1 was never
|
|
|
|
// committed.
|
|
|
|
h.ackUpdate(&session.ID, 1, 0, wtdb.ErrCommittedUpdateNotFound)
|
|
|
|
|
|
|
|
// Commit to a random update at seqnum 1.
|
|
|
|
update1 := randCommittedUpdate(h.t, 1)
|
2022-12-23 11:14:01 +02:00
|
|
|
|
|
|
|
h.registerChan(update1.BackupID.ChanID, nil, nil)
|
2019-05-23 20:49:04 -07:00
|
|
|
lastApplied := h.commitUpdate(&session.ID, update1, nil)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.Zero(h.t, lastApplied)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Acking seqnum 1 should succeed.
|
|
|
|
h.ackUpdate(&session.ID, 1, 1, nil)
|
|
|
|
|
|
|
|
// Acking seqnum 1 again should fail.
|
|
|
|
h.ackUpdate(&session.ID, 1, 1, wtdb.ErrCommittedUpdateNotFound)
|
|
|
|
|
|
|
|
// Acking a valid seqnum with a reverted last applied value should fail.
|
|
|
|
h.ackUpdate(&session.ID, 1, 0, wtdb.ErrLastAppliedReversion)
|
|
|
|
|
|
|
|
// Acking with a last applied greater than any allocated seqnum should
|
|
|
|
// fail.
|
|
|
|
h.ackUpdate(&session.ID, 4, 3, wtdb.ErrUnallocatedLastApplied)
|
|
|
|
|
|
|
|
// Assert that the ClientSession loaded from disk has one update in it's
|
|
|
|
// AckedUpdates map, and that the committed update has been removed.
|
2022-10-13 14:24:15 +02:00
|
|
|
h.assertUpdates(session.ID, nil, map[uint16]wtdb.BackupID{
|
2019-05-23 20:49:04 -07:00
|
|
|
1: update1.BackupID,
|
|
|
|
})
|
|
|
|
|
|
|
|
// Commit to another random update, and assert that the last applied
|
|
|
|
// value is 1, since this was what was provided in the last successful
|
|
|
|
// ack.
|
|
|
|
update2 := randCommittedUpdate(h.t, 2)
|
2022-12-23 11:14:01 +02:00
|
|
|
h.registerChan(update2.BackupID.ChanID, nil, nil)
|
2019-05-23 20:49:04 -07:00
|
|
|
lastApplied = h.commitUpdate(&session.ID, update2, nil)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.EqualValues(h.t, 1, lastApplied)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
// Ack seqnum 2.
|
|
|
|
h.ackUpdate(&session.ID, 2, 2, nil)
|
|
|
|
|
|
|
|
// Assert that both updates exist as AckedUpdates when loaded from disk.
|
2022-10-13 14:24:15 +02:00
|
|
|
h.assertUpdates(session.ID, nil, map[uint16]wtdb.BackupID{
|
2019-05-23 20:49:04 -07:00
|
|
|
1: update1.BackupID,
|
|
|
|
2: update2.BackupID,
|
|
|
|
})
|
|
|
|
|
|
|
|
// Acking again with a lower last applied should fail.
|
|
|
|
h.ackUpdate(&session.ID, 2, 1, wtdb.ErrLastAppliedReversion)
|
|
|
|
|
|
|
|
// Acking an unallocated seqnum should fail.
|
|
|
|
h.ackUpdate(&session.ID, 4, 2, wtdb.ErrCommittedUpdateNotFound)
|
|
|
|
|
|
|
|
// Acking with a last applied greater than any allocated seqnum should
|
|
|
|
// fail.
|
|
|
|
h.ackUpdate(&session.ID, 4, 3, wtdb.ErrUnallocatedLastApplied)
|
|
|
|
}
|
|
|
|
|
2022-10-13 14:24:15 +02:00
|
|
|
func (h *clientDBHarness) assertUpdates(id wtdb.SessionID,
|
|
|
|
expectedPending []wtdb.CommittedUpdate,
|
|
|
|
expectedAcked map[uint16]wtdb.BackupID) {
|
|
|
|
|
2022-12-23 11:14:01 +02:00
|
|
|
committedUpdates := h.fetchSessionCommittedUpdates(&id, nil)
|
|
|
|
checkCommittedUpdates(h.t, committedUpdates, expectedPending)
|
|
|
|
|
|
|
|
// Check acked updates.
|
|
|
|
numAcked := h.numAcked(&id, nil)
|
|
|
|
require.EqualValues(h.t, len(expectedAcked), numAcked)
|
|
|
|
for _, backupID := range expectedAcked {
|
|
|
|
isAcked := h.isAcked(&id, &backupID, nil)
|
|
|
|
require.True(h.t, isAcked)
|
|
|
|
}
|
2022-10-13 14:24:15 +02:00
|
|
|
}
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
// checkCommittedUpdates asserts that the CommittedUpdates on session match the
|
|
|
|
// expUpdates provided.
|
2022-09-30 11:47:54 +02:00
|
|
|
func checkCommittedUpdates(t *testing.T, actualUpdates,
|
2019-05-23 20:49:04 -07:00
|
|
|
expUpdates []wtdb.CommittedUpdate) {
|
|
|
|
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
// We promote nil expUpdates to an initialized slice since the database
|
|
|
|
// should never return a nil slice. This promotion is done purely out of
|
|
|
|
// convenience for the testing framework.
|
|
|
|
if expUpdates == nil {
|
|
|
|
expUpdates = make([]wtdb.CommittedUpdate, 0)
|
|
|
|
}
|
|
|
|
|
2022-09-30 11:47:54 +02:00
|
|
|
require.Equal(t, expUpdates, actualUpdates)
|
2019-05-23 20:49:04 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestClientDB asserts the behavior of a fresh client db, a reopened client db,
|
|
|
|
// and the mock implementation. This ensures that all databases function
|
|
|
|
// identically, especially in the negative paths.
|
|
|
|
func TestClientDB(t *testing.T) {
|
2021-08-03 09:57:33 +02:00
|
|
|
dbCfg := &kvdb.BoltConfig{DBTimeout: kvdb.DefaultDBTimeout}
|
2019-05-23 20:49:04 -07:00
|
|
|
dbs := []struct {
|
|
|
|
name string
|
|
|
|
init clientDBInit
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "fresh clientdb",
|
2022-08-15 21:08:45 +08:00
|
|
|
init: func(t *testing.T) wtclient.DB {
|
2021-08-03 09:57:33 +02:00
|
|
|
bdb, err := wtdb.NewBoltBackendCreator(
|
2022-08-15 21:08:45 +08:00
|
|
|
true, t.TempDir(), "wtclient.db",
|
2021-08-03 09:57:33 +02:00
|
|
|
)(dbCfg)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(t, err)
|
2021-08-03 09:57:33 +02:00
|
|
|
|
|
|
|
db, err := wtdb.OpenClientDB(bdb)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(t, err)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
2022-08-15 21:08:45 +08:00
|
|
|
t.Cleanup(func() {
|
2019-05-23 20:49:04 -07:00
|
|
|
db.Close()
|
2022-08-15 21:08:45 +08:00
|
|
|
})
|
2019-05-23 20:49:04 -07:00
|
|
|
|
2022-08-15 21:08:45 +08:00
|
|
|
return db
|
2019-05-23 20:49:04 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "reopened clientdb",
|
2022-08-15 21:08:45 +08:00
|
|
|
init: func(t *testing.T) wtclient.DB {
|
|
|
|
path := t.TempDir()
|
2019-05-23 20:49:04 -07:00
|
|
|
|
2021-08-03 09:57:33 +02:00
|
|
|
bdb, err := wtdb.NewBoltBackendCreator(
|
|
|
|
true, path, "wtclient.db",
|
|
|
|
)(dbCfg)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(t, err)
|
2021-08-03 09:57:33 +02:00
|
|
|
|
|
|
|
db, err := wtdb.OpenClientDB(bdb)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(t, err)
|
2019-05-23 20:49:04 -07:00
|
|
|
db.Close()
|
|
|
|
|
2021-08-03 09:57:33 +02:00
|
|
|
bdb, err = wtdb.NewBoltBackendCreator(
|
|
|
|
true, path, "wtclient.db",
|
|
|
|
)(dbCfg)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(t, err)
|
2021-08-03 09:57:33 +02:00
|
|
|
|
|
|
|
db, err = wtdb.OpenClientDB(bdb)
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(t, err)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
2022-08-15 21:08:45 +08:00
|
|
|
t.Cleanup(func() {
|
2019-05-23 20:49:04 -07:00
|
|
|
db.Close()
|
2022-08-15 21:08:45 +08:00
|
|
|
})
|
2019-05-23 20:49:04 -07:00
|
|
|
|
2022-08-15 21:08:45 +08:00
|
|
|
return db
|
2019-05-23 20:49:04 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "mock",
|
2022-08-15 21:08:45 +08:00
|
|
|
init: func(t *testing.T) wtclient.DB {
|
|
|
|
return wtmock.NewClientDB()
|
2019-05-23 20:49:04 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
run func(*clientDBHarness)
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "create client session",
|
|
|
|
run: testCreateClientSession,
|
|
|
|
},
|
2019-06-07 17:44:55 -07:00
|
|
|
{
|
|
|
|
name: "filter client sessions",
|
|
|
|
run: testFilterClientSessions,
|
|
|
|
},
|
2019-05-23 20:49:04 -07:00
|
|
|
{
|
|
|
|
name: "create tower",
|
|
|
|
run: testCreateTower,
|
|
|
|
},
|
2019-06-07 17:45:11 -07:00
|
|
|
{
|
|
|
|
name: "remove tower",
|
|
|
|
run: testRemoveTower,
|
|
|
|
},
|
2019-05-23 20:49:04 -07:00
|
|
|
{
|
|
|
|
name: "chan summaries",
|
|
|
|
run: testChanSummaries,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "commit update",
|
|
|
|
run: testCommitUpdate,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "ack update",
|
|
|
|
run: testAckUpdate,
|
|
|
|
},
|
2022-10-21 11:21:18 +02:00
|
|
|
{
|
|
|
|
name: "mark channel closed",
|
|
|
|
run: testMarkChannelClosed,
|
|
|
|
},
|
2019-05-23 20:49:04 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, database := range dbs {
|
|
|
|
db := database
|
|
|
|
t.Run(db.name, func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
2022-08-15 21:08:45 +08:00
|
|
|
h := newClientDBHarness(t, db.init)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
test.run(h)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// randCommittedUpdate generates a random committed update.
|
|
|
|
func randCommittedUpdate(t *testing.T, seqNum uint16) *wtdb.CommittedUpdate {
|
2022-10-21 11:21:18 +02:00
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
chanID := randChannelID(t)
|
|
|
|
|
|
|
|
return randCommittedUpdateForChannel(t, chanID, seqNum)
|
|
|
|
}
|
|
|
|
|
|
|
|
func randChannelID(t *testing.T) lnwire.ChannelID {
|
|
|
|
t.Helper()
|
|
|
|
|
2019-05-23 20:49:04 -07:00
|
|
|
var chanID lnwire.ChannelID
|
2022-10-10 12:47:08 +02:00
|
|
|
_, err := io.ReadFull(crand.Reader, chanID[:])
|
|
|
|
require.NoError(t, err)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
2022-10-21 11:21:18 +02:00
|
|
|
return chanID
|
|
|
|
}
|
|
|
|
|
|
|
|
// randCommittedUpdateForChannel generates a random committed update for the
|
|
|
|
// given channel ID.
|
|
|
|
func randCommittedUpdateForChannel(t *testing.T, chanID lnwire.ChannelID,
|
|
|
|
seqNum uint16) *wtdb.CommittedUpdate {
|
|
|
|
|
|
|
|
t.Helper()
|
|
|
|
|
2019-06-13 17:26:26 -07:00
|
|
|
var hint blob.BreachHint
|
2022-10-21 11:21:18 +02:00
|
|
|
_, err := io.ReadFull(crand.Reader, hint[:])
|
2022-10-10 12:47:08 +02:00
|
|
|
require.NoError(t, err)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
encBlob := make([]byte, blob.Size(blob.FlagCommitOutputs.Type()))
|
2022-10-10 12:47:08 +02:00
|
|
|
_, err = io.ReadFull(crand.Reader, encBlob)
|
|
|
|
require.NoError(t, err)
|
2019-05-23 20:49:04 -07:00
|
|
|
|
|
|
|
return &wtdb.CommittedUpdate{
|
|
|
|
SeqNum: seqNum,
|
|
|
|
CommittedUpdateBody: wtdb.CommittedUpdateBody{
|
|
|
|
BackupID: wtdb.BackupID{
|
|
|
|
ChanID: chanID,
|
|
|
|
CommitHeight: 666,
|
|
|
|
},
|
|
|
|
Hint: hint,
|
|
|
|
EncryptedBlob: encBlob,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2022-10-21 11:21:18 +02:00
|
|
|
|
|
|
|
func (h *clientDBHarness) randSession(t *testing.T,
|
|
|
|
towerID wtdb.TowerID, maxUpdates uint16) *wtdb.ClientSession {
|
|
|
|
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
var id wtdb.SessionID
|
|
|
|
rand.Read(id[:])
|
|
|
|
|
|
|
|
return &wtdb.ClientSession{
|
|
|
|
ClientSessionBody: wtdb.ClientSessionBody{
|
|
|
|
TowerID: towerID,
|
|
|
|
Policy: wtpolicy.Policy{
|
|
|
|
TxPolicy: wtpolicy.TxPolicy{
|
|
|
|
BlobType: blobType,
|
|
|
|
},
|
|
|
|
MaxUpdates: maxUpdates,
|
|
|
|
},
|
|
|
|
RewardPkScript: []byte{0x01, 0x02, 0x03},
|
|
|
|
KeyIndex: h.nextKeyIndex(towerID, blobType),
|
|
|
|
},
|
|
|
|
ID: id,
|
|
|
|
}
|
|
|
|
}
|