Merge pull request #6347 from yyforyongyu/5388-rm-revc-log

lnwallet+channeldb: store minimal info in revocation log bucket
This commit is contained in:
Olaoluwa Osuntokun 2022-05-05 15:52:59 -07:00 committed by GitHub
commit 0ec88b5346
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 3635 additions and 562 deletions

View File

@ -117,21 +117,14 @@ var (
// TODO(roasbeef): rename to commit chain?
commitDiffKey = []byte("commit-diff-key")
// revocationLogBucket is dedicated for storing the necessary delta
// state between channel updates required to re-construct a past state
// in order to punish a counterparty attempting a non-cooperative
// channel closure. This key should be accessed from within the
// sub-bucket of a target channel, identified by its channel point.
revocationLogBucket = []byte("revocation-log-key")
// frozenChanKey is the key where we store the information for any
// active "frozen" channels. This key is present only in the leaf
// bucket for a given channel.
frozenChanKey = []byte("frozen-chans")
// lastWasRevokeKey is a key that stores true when the last update we sent
// was a revocation and false when it was a commitment signature. This is
// nil in the case of new channels with no updates exchanged.
// lastWasRevokeKey is a key that stores true when the last update we
// sent was a revocation and false when it was a commitment signature.
// This is nil in the case of new channels with no updates exchanged.
lastWasRevokeKey = []byte("last-was-revoke")
)
@ -176,18 +169,9 @@ var (
// channel.
ErrChanBorked = fmt.Errorf("cannot mutate borked channel")
// ErrLogEntryNotFound is returned when we cannot find a log entry at
// the height requested in the revocation log.
ErrLogEntryNotFound = fmt.Errorf("log entry not found")
// ErrMissingIndexEntry is returned when a caller attempts to close a
// channel and the outpoint is missing from the index.
ErrMissingIndexEntry = fmt.Errorf("missing outpoint from index")
// errHeightNotFound is returned when a query for channel balances at
// a height that we have not reached yet is made.
errHeightNotReached = fmt.Errorf("height requested greater than " +
"current commit height")
)
const (
@ -657,6 +641,15 @@ type OpenChannel struct {
// received within this channel.
TotalMSatReceived lnwire.MilliSatoshi
// InitialLocalBalance is the balance we have during the channel
// opening. When we are not the initiator, this value represents the
// push amount.
InitialLocalBalance lnwire.MilliSatoshi
// InitialRemoteBalance is the balance they have during the channel
// opening.
InitialRemoteBalance lnwire.MilliSatoshi
// LocalChanCfg is the channel configuration for the local node.
LocalChanCfg ChannelConfig
@ -1644,44 +1637,6 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
return nil
}
// BalancesAtHeight returns the local and remote balances on our commitment
// transactions as of a given height.
//
// NOTE: these are our balances *after* subtracting the commitment fee and
// anchor outputs.
func (c *OpenChannel) BalancesAtHeight(height uint64) (lnwire.MilliSatoshi,
lnwire.MilliSatoshi, error) {
if height > c.LocalCommitment.CommitHeight &&
height > c.RemoteCommitment.CommitHeight {
return 0, 0, errHeightNotReached
}
// If our current commit is as the desired height, we can return our
// current balances.
if c.LocalCommitment.CommitHeight == height {
return c.LocalCommitment.LocalBalance,
c.LocalCommitment.RemoteBalance, nil
}
// If our current remote commit is at the desired height, we can return
// the current balances.
if c.RemoteCommitment.CommitHeight == height {
return c.RemoteCommitment.LocalBalance,
c.RemoteCommitment.RemoteBalance, nil
}
// If we are not currently on the height requested, we need to look up
// the previous height to obtain our balances at the given height.
commit, err := c.FindPreviousState(height)
if err != nil {
return 0, 0, err
}
return commit.LocalBalance, commit.RemoteBalance, nil
}
// ActiveHtlcs returns a slice of HTLC's which are currently active on *both*
// commitment transactions.
func (c *OpenChannel) ActiveHtlcs() []HTLC {
@ -1718,6 +1673,8 @@ func (c *OpenChannel) ActiveHtlcs() []HTLC {
//
// TODO(roasbeef): save space by using smaller ints at tail end?
type HTLC struct {
// TODO(yy): can embed an HTLCEntry here.
// Signature is the signature for the second level covenant transaction
// for this HTLC. The second level transaction is a timeout tx in the
// case that this is an outgoing HTLC, and a success tx in the case
@ -2349,7 +2306,7 @@ func (c *OpenChannel) InsertNextRevocation(revKey *btcec.PublicKey) error {
// set of local updates that the peer still needs to send us a signature for.
// We store this set of updates in case we go down.
func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg,
updates []LogUpdate) error {
updates []LogUpdate, ourOutputIndex, theirOutputIndex uint32) error {
c.Lock()
defer c.Unlock()
@ -2422,9 +2379,10 @@ func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg,
// With the commitment pointer swapped, we can now add the
// revoked (prior) state to the revocation log.
//
// TODO(roasbeef): store less
err = appendChannelLogEntry(logBucket, &c.RemoteCommitment)
err = putRevocationLog(
logBucket, &c.RemoteCommitment,
ourOutputIndex, theirOutputIndex,
)
if err != nil {
return err
}
@ -2608,22 +2566,24 @@ func (c *OpenChannel) RemoveFwdPkgs(heights ...uint64) error {
}, func() {})
}
// RevocationLogTail returns the "tail", or the end of the current revocation
// log. This entry represents the last previous state for the remote node's
// commitment chain. The ChannelDelta returned by this method will always lag
// one state behind the most current (unrevoked) state of the remote node's
// commitment chain.
func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
// revocationLogTailCommitHeight returns the commit height at the end of the
// revocation log. This entry represents the last previous state for the remote
// node's commitment chain. The ChannelDelta returned by this method will
// always lag one state behind the most current (unrevoked) state of the remote
// node's commitment chain.
// NOTE: used in unit test only.
func (c *OpenChannel) revocationLogTailCommitHeight() (uint64, error) {
c.RLock()
defer c.RUnlock()
var height uint64
// If we haven't created any state updates yet, then we'll exit early as
// there's nothing to be found on disk in the revocation bucket.
if c.RemoteCommitment.CommitHeight == 0 {
return nil, nil
return height, nil
}
var commit ChannelCommitment
if err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
@ -2632,33 +2592,25 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
return err
}
logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
if logBucket == nil {
return ErrNoPastDeltas
logBucket, err := fetchLogBucket(chanBucket)
if err != nil {
return err
}
// Once we have the bucket that stores the revocation log from
// this channel, we'll jump to the _last_ key in bucket. As we
// store the update number on disk in a big-endian format,
// this will retrieve the latest entry.
// this channel, we'll jump to the _last_ key in bucket. Since
// the key is the commit height, we'll decode the bytes and
// return it.
cursor := logBucket.ReadCursor()
_, tailLogEntry := cursor.Last()
logEntryReader := bytes.NewReader(tailLogEntry)
// Once we have the entry, we'll decode it into the channel
// delta pointer we created above.
var dbErr error
commit, dbErr = deserializeChanCommit(logEntryReader)
if dbErr != nil {
return dbErr
}
rawHeight, _ := cursor.Last()
height = byteOrder.Uint64(rawHeight)
return nil
}, func() {}); err != nil {
return nil, err
return height, err
}
return &commit, nil
return height, nil
}
// CommitmentHeight returns the current commitment height. The commitment
@ -2703,11 +2655,15 @@ func (c *OpenChannel) CommitmentHeight() (uint64, error) {
// intended to be used for obtaining the relevant data needed to claim all
// funds rightfully spendable in the case of an on-chain broadcast of the
// commitment transaction.
func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, error) {
func (c *OpenChannel) FindPreviousState(
updateNum uint64) (*RevocationLog, *ChannelCommitment, error) {
c.RLock()
defer c.RUnlock()
var commit ChannelCommitment
commit := &ChannelCommitment{}
rl := &RevocationLog{}
err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
@ -2716,24 +2672,24 @@ func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, e
return err
}
logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
if logBucket == nil {
return ErrNoPastDeltas
}
c, err := fetchChannelLogEntry(logBucket, updateNum)
// Find the revocation log from both the new and the old
// bucket.
r, c, err := fetchRevocationLogCompatible(chanBucket, updateNum)
if err != nil {
return err
}
rl = r
commit = c
return nil
}, func() {})
if err != nil {
return nil, err
return nil, nil, err
}
return &commit, nil
// Either the `rl` or the `commit` is nil here. We return them as-is
// and leave it to the caller to decide its following action.
return rl, commit, nil
}
// ClosureType is an enum like structure that details exactly _how_ a channel
@ -2930,12 +2886,8 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
// With the base channel data deleted, attempt to delete the
// information stored within the revocation log.
logBucket := chanBucket.NestedReadWriteBucket(revocationLogBucket)
if logBucket != nil {
err = chanBucket.DeleteNestedBucket(revocationLogBucket)
if err != nil {
return err
}
if err := deleteLogBucket(chanBucket); err != nil {
return err
}
err = chainBucket.DeleteNestedBucket(chanPointBuf.Bytes())
@ -3328,7 +3280,8 @@ func putChanInfo(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
channel.chanStatus, channel.FundingBroadcastHeight,
channel.NumConfsRequired, channel.ChannelFlags,
channel.IdentityPub, channel.Capacity, channel.TotalMSatSent,
channel.TotalMSatReceived,
channel.TotalMSatReceived, channel.InitialLocalBalance,
channel.InitialRemoteBalance,
); err != nil {
return err
}
@ -3515,7 +3468,8 @@ func fetchChanInfo(chanBucket kvdb.RBucket, channel *OpenChannel) error {
&channel.chanStatus, &channel.FundingBroadcastHeight,
&channel.NumConfsRequired, &channel.ChannelFlags,
&channel.IdentityPub, &channel.Capacity, &channel.TotalMSatSent,
&channel.TotalMSatReceived,
&channel.TotalMSatReceived, &channel.InitialLocalBalance,
&channel.InitialRemoteBalance,
); err != nil {
return err
}
@ -3690,31 +3644,6 @@ func makeLogKey(updateNum uint64) [8]byte {
return key
}
func appendChannelLogEntry(log kvdb.RwBucket,
commit *ChannelCommitment) error {
var b bytes.Buffer
if err := serializeChanCommit(&b, commit); err != nil {
return err
}
logEntrykey := makeLogKey(commit.CommitHeight)
return log.Put(logEntrykey[:], b.Bytes())
}
func fetchChannelLogEntry(log kvdb.RBucket,
updateNum uint64) (ChannelCommitment, error) {
logEntrykey := makeLogKey(updateNum)
commitBytes := log.Get(logEntrykey[:])
if commitBytes == nil {
return ChannelCommitment{}, ErrLogEntryNotFound
}
commitReader := bytes.NewReader(commitBytes)
return deserializeChanCommit(commitReader)
}
func fetchThawHeight(chanBucket kvdb.RBucket) (uint32, error) {
var height uint32

View File

@ -52,8 +52,17 @@ var (
Port: 18555,
}
// keyLocIndex is the KeyLocator Index we use for TestKeyLocatorEncoding.
// keyLocIndex is the KeyLocator Index we use for
// TestKeyLocatorEncoding.
keyLocIndex = uint32(2049)
// dummyLocalOutputIndex specifics a default value for our output index
// in this test.
dummyLocalOutputIndex = uint32(0)
// dummyRemoteOutIndex specifics a default value for their output index
// in this test.
dummyRemoteOutIndex = uint32(1)
)
// testChannelParams is a struct which details the specifics of how a channel
@ -78,25 +87,6 @@ type testChannelParams struct {
// default channel that is creates for testing.
type testChannelOption func(params *testChannelParams)
// channelCommitmentOption is an option which allows overwriting of the default
// commitment height and balances. The local boolean can be used to set these
// balances on the local or remote commit.
func channelCommitmentOption(height uint64, localBalance,
remoteBalance lnwire.MilliSatoshi, local bool) testChannelOption {
return func(params *testChannelParams) {
if local {
params.channel.LocalCommitment.CommitHeight = height
params.channel.LocalCommitment.LocalBalance = localBalance
params.channel.LocalCommitment.RemoteBalance = remoteBalance
} else {
params.channel.RemoteCommitment.CommitHeight = height
params.channel.RemoteCommitment.LocalBalance = localBalance
params.channel.RemoteCommitment.RemoteBalance = remoteBalance
}
}
}
// pendingHeightOption is an option which can be used to set the height the
// channel is marked as pending at.
func pendingHeightOption(height uint32) testChannelOption {
@ -352,6 +342,8 @@ func createTestChannelState(t *testing.T, cdb *ChannelStateDB) *OpenChannel {
Packager: NewChannelPackager(chanID),
FundingTxn: channels.TestFundingTx,
ThawHeight: uint32(defaultPendingHeight),
InitialLocalBalance: lnwire.MilliSatoshi(9000),
InitialRemoteBalance: lnwire.MilliSatoshi(3000),
}
}
@ -565,6 +557,32 @@ func assertCommitmentEqual(t *testing.T, a, b *ChannelCommitment) {
}
}
// assertRevocationLogEntryEqual asserts that, for all the fields of a given
// revocation log entry, their values match those on a given ChannelCommitment.
func assertRevocationLogEntryEqual(t *testing.T, c *ChannelCommitment,
r *RevocationLog) {
// Check the common fields.
require.EqualValues(
t, r.CommitTxHash, c.CommitTx.TxHash(), "CommitTx mismatch",
)
// Now check the common fields from the HTLCs.
require.Equal(t, len(r.HTLCEntries), len(c.Htlcs), "HTLCs len mismatch")
for i, rHtlc := range r.HTLCEntries {
cHtlc := c.Htlcs[i]
require.Equal(t, rHtlc.RHash, cHtlc.RHash, "RHash mismatch")
require.Equal(t, rHtlc.Amt, cHtlc.Amt.ToSatoshis(),
"Amt mismatch")
require.Equal(t, rHtlc.RefundTimeout, cHtlc.RefundTimeout,
"RefundTimeout mismatch")
require.EqualValues(t, rHtlc.OutputIndex, cHtlc.OutputIndex,
"OutputIndex mismatch")
require.Equal(t, rHtlc.Incoming, cHtlc.Incoming,
"Incoming mismatch")
}
}
func TestChannelStateTransition(t *testing.T) {
t.Parallel()
@ -765,7 +783,9 @@ func TestChannelStateTransition(t *testing.T) {
fwdPkg := NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight,
diskCommitDiff.LogUpdates, nil)
err = channel.AdvanceCommitChainTail(fwdPkg, nil)
err = channel.AdvanceCommitChainTail(
fwdPkg, nil, dummyLocalOutputIndex, dummyRemoteOutIndex,
)
if err != nil {
t.Fatalf("unable to append to revocation log: %v", err)
}
@ -778,24 +798,32 @@ func TestChannelStateTransition(t *testing.T) {
// We should be able to fetch the channel delta created above by its
// update number with all the state properly reconstructed.
diskPrevCommit, err := channel.FindPreviousState(
diskPrevCommit, _, err := channel.FindPreviousState(
oldRemoteCommit.CommitHeight,
)
if err != nil {
t.Fatalf("unable to fetch past delta: %v", err)
}
// Check the output indexes are saved as expected.
require.EqualValues(
t, dummyLocalOutputIndex, diskPrevCommit.OurOutputIndex,
)
require.EqualValues(
t, dummyRemoteOutIndex, diskPrevCommit.TheirOutputIndex,
)
// The two deltas (the original vs the on-disk version) should
// identical, and all HTLC data should properly be retained.
assertCommitmentEqual(t, &oldRemoteCommit, diskPrevCommit)
assertRevocationLogEntryEqual(t, &oldRemoteCommit, diskPrevCommit)
// The state number recovered from the tail of the revocation log
// should be identical to this current state.
logTail, err := channel.RevocationLogTail()
logTailHeight, err := channel.revocationLogTailCommitHeight()
if err != nil {
t.Fatalf("unable to retrieve log: %v", err)
}
if logTail.CommitHeight != oldRemoteCommit.CommitHeight {
if logTailHeight != oldRemoteCommit.CommitHeight {
t.Fatal("update number doesn't match")
}
@ -813,25 +841,38 @@ func TestChannelStateTransition(t *testing.T) {
fwdPkg = NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight, nil, nil)
err = channel.AdvanceCommitChainTail(fwdPkg, nil)
err = channel.AdvanceCommitChainTail(
fwdPkg, nil, dummyLocalOutputIndex, dummyRemoteOutIndex,
)
if err != nil {
t.Fatalf("unable to append to revocation log: %v", err)
}
// Once again, fetch the state and ensure it has been properly updated.
prevCommit, err := channel.FindPreviousState(oldRemoteCommit.CommitHeight)
prevCommit, _, err := channel.FindPreviousState(
oldRemoteCommit.CommitHeight,
)
if err != nil {
t.Fatalf("unable to fetch past delta: %v", err)
}
assertCommitmentEqual(t, &oldRemoteCommit, prevCommit)
// Check the output indexes are saved as expected.
require.EqualValues(
t, dummyLocalOutputIndex, diskPrevCommit.OurOutputIndex,
)
require.EqualValues(
t, dummyRemoteOutIndex, diskPrevCommit.TheirOutputIndex,
)
assertRevocationLogEntryEqual(t, &oldRemoteCommit, prevCommit)
// Once again, state number recovered from the tail of the revocation
// log should be identical to this current state.
logTail, err = channel.RevocationLogTail()
logTailHeight, err = channel.revocationLogTailCommitHeight()
if err != nil {
t.Fatalf("unable to retrieve log: %v", err)
}
if logTail.CommitHeight != oldRemoteCommit.CommitHeight {
if logTailHeight != oldRemoteCommit.CommitHeight {
t.Fatal("update number doesn't match")
}
@ -877,7 +918,9 @@ func TestChannelStateTransition(t *testing.T) {
// Attempting to find previous states on the channel should fail as the
// revocation log has been deleted.
_, err = updatedChannel[0].FindPreviousState(oldRemoteCommit.CommitHeight)
_, _, err = updatedChannel[0].FindPreviousState(
oldRemoteCommit.CommitHeight,
)
if err == nil {
t.Fatal("revocation log search should have failed")
}
@ -1413,174 +1456,6 @@ func TestCloseChannelStatus(t *testing.T) {
}
}
// TestBalanceAtHeight tests lookup of our local and remote balance at a given
// height.
func TestBalanceAtHeight(t *testing.T) {
const (
// Values that will be set on our current local commit in
// memory.
localHeight = 2
localLocalBalance = 1000
localRemoteBalance = 1500
// Values that will be set on our current remote commit in
// memory.
remoteHeight = 3
remoteLocalBalance = 2000
remoteRemoteBalance = 2500
// Values that will be written to disk in the revocation log.
oldHeight = 0
oldLocalBalance = 200
oldRemoteBalance = 300
// Heights to test error cases.
unknownHeight = 1
unreachedHeight = 4
)
// putRevokedState is a helper function used to put commitments is
// the revocation log bucket to test lookup of balances at heights that
// are not our current height.
putRevokedState := func(c *OpenChannel, height uint64, local,
remote lnwire.MilliSatoshi) error {
err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint,
c.ChainHash,
)
if err != nil {
return err
}
logKey := revocationLogBucket
logBucket, err := chanBucket.CreateBucketIfNotExists(
logKey,
)
if err != nil {
return err
}
// Make a copy of our current commitment so we do not
// need to re-fill all the required fields and copy in
// our new desired values.
commit := c.LocalCommitment
commit.CommitHeight = height
commit.LocalBalance = local
commit.RemoteBalance = remote
return appendChannelLogEntry(logBucket, &commit)
}, func() {})
return err
}
tests := []struct {
name string
targetHeight uint64
expectedLocalBalance lnwire.MilliSatoshi
expectedRemoteBalance lnwire.MilliSatoshi
expectedError error
}{
{
name: "target is current local height",
targetHeight: localHeight,
expectedLocalBalance: localLocalBalance,
expectedRemoteBalance: localRemoteBalance,
expectedError: nil,
},
{
name: "target is current remote height",
targetHeight: remoteHeight,
expectedLocalBalance: remoteLocalBalance,
expectedRemoteBalance: remoteRemoteBalance,
expectedError: nil,
},
{
name: "need to lookup commit",
targetHeight: oldHeight,
expectedLocalBalance: oldLocalBalance,
expectedRemoteBalance: oldRemoteBalance,
expectedError: nil,
},
{
name: "height not found",
targetHeight: unknownHeight,
expectedLocalBalance: 0,
expectedRemoteBalance: 0,
expectedError: ErrLogEntryNotFound,
},
{
name: "height not reached",
targetHeight: unreachedHeight,
expectedLocalBalance: 0,
expectedRemoteBalance: 0,
expectedError: errHeightNotReached,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test database: %v",
err)
}
defer cleanUp()
cdb := fullDB.ChannelStateDB()
// Create options to set the heights and balances of
// our local and remote commitments.
localCommitOpt := channelCommitmentOption(
localHeight, localLocalBalance,
localRemoteBalance, true,
)
remoteCommitOpt := channelCommitmentOption(
remoteHeight, remoteLocalBalance,
remoteRemoteBalance, false,
)
// Create an open channel.
channel := createTestChannel(
t, cdb, openChannelOption(),
localCommitOpt, remoteCommitOpt,
)
// Write an older commit to disk.
err = putRevokedState(channel, oldHeight,
oldLocalBalance, oldRemoteBalance)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
local, remote, err := channel.BalancesAtHeight(
test.targetHeight,
)
if err != test.expectedError {
t.Fatalf("expected: %v, got: %v",
test.expectedError, err)
}
if local != test.expectedLocalBalance {
t.Fatalf("expected local: %v, got: %v",
test.expectedLocalBalance, local)
}
if remote != test.expectedRemoteBalance {
t.Fatalf("expected remote: %v, got: %v",
test.expectedRemoteBalance, remote)
}
})
}
}
// TestHasChanStatus asserts the behavior of HasChanStatus by checking the
// behavior of various status flags in addition to the special case of
// ChanStatusDefault which is treated like a flag in the code base even though

View File

@ -19,6 +19,7 @@ import (
"github.com/lightningnetwork/lnd/channeldb/migration21"
"github.com/lightningnetwork/lnd/channeldb/migration23"
"github.com/lightningnetwork/lnd/channeldb/migration24"
"github.com/lightningnetwork/lnd/channeldb/migration25"
"github.com/lightningnetwork/lnd/channeldb/migration_01_to_11"
"github.com/lightningnetwork/lnd/clock"
"github.com/lightningnetwork/lnd/kvdb"
@ -205,6 +206,12 @@ var (
number: 24,
migration: migration24.MigrateFwdPkgCleanup,
},
{
// Save the initial local/remote balances in channel
// info.
number: 25,
migration: migration25.MigrateInitialBalances,
},
}
// Big endian is the preferred byte order, due to cursor scans over

View File

@ -409,7 +409,9 @@ func TestRestoreChannelShells(t *testing.T) {
if err != ErrNoRestoredChannelMutation {
t.Fatalf("able to mutate restored channel")
}
err = channel.AdvanceCommitChainTail(nil, nil)
err = channel.AdvanceCommitChainTail(
nil, nil, dummyLocalOutputIndex, dummyRemoteOutIndex,
)
if err != ErrNoRestoredChannelMutation {
t.Fatalf("able to mutate restored channel")
}

View File

@ -70,7 +70,7 @@ func MigrateFwdPkgCleanup(tx kvdb.RwTx) error {
// Iterate over all close channels and remove their forwarding packages.
for _, summery := range chanSummaries {
sourceBytes := makeLogKey(summery.ShortChanID.ToUint64())
sourceBytes := MakeLogKey(summery.ShortChanID.ToUint64())
// First, we will try to find the corresponding bucket. If there
// is not a nested bucket matching the ShortChanID, we will skip
@ -112,7 +112,7 @@ func deserializeCloseChannelSummary(
}
// makeLogKey converts a uint64 into an 8 byte array.
func makeLogKey(updateNum uint64) [8]byte {
func MakeLogKey(updateNum uint64) [8]byte {
var key [8]byte
binary.BigEndian.PutUint64(key[:], updateNum)
return key

View File

@ -187,7 +187,7 @@ func genAfterMigration(deleted, untouched []int) func(kvdb.RwTx) error {
// Reading deleted buckets should return nil
for _, id := range deleted {
chanID := lnwire.NewShortChanIDFromInt(uint64(id))
sourceKey := makeLogKey(chanID.ToUint64())
sourceKey := MakeLogKey(chanID.ToUint64())
sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
if sourceBkt != nil {
return fmt.Errorf(
@ -200,7 +200,7 @@ func genAfterMigration(deleted, untouched []int) func(kvdb.RwTx) error {
// Reading untouched buckets should return not nil
for _, id := range untouched {
chanID := lnwire.NewShortChanIDFromInt(uint64(id))
sourceKey := makeLogKey(chanID.ToUint64())
sourceKey := MakeLogKey(chanID.ToUint64())
sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
if sourceBkt == nil {
return fmt.Errorf(
@ -259,7 +259,7 @@ func createTestFwdPkgBucket(tx kvdb.RwTx, chanID lnwire.ShortChannelID) error {
return err
}
source := makeLogKey(chanID.ToUint64())
source := MakeLogKey(chanID.ToUint64())
if _, err := fwdPkgBkt.CreateBucketIfNotExists(source[:]); err != nil {
return err
}

View File

@ -0,0 +1,722 @@
package migration25
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
lnwire "github.com/lightningnetwork/lnd/channeldb/migration/lnwire21"
mig24 "github.com/lightningnetwork/lnd/channeldb/migration24"
mig "github.com/lightningnetwork/lnd/channeldb/migration_01_to_11"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/tlv"
)
const (
// A tlv type definition used to serialize and deserialize a KeyLocator
// from the database.
keyLocType tlv.Type = 1
)
var (
// chanCommitmentKey can be accessed within the sub-bucket for a
// particular channel. This key stores the up to date commitment state
// for a particular channel party. Appending a 0 to the end of this key
// indicates it's the commitment for the local party, and appending a 1
// to the end of this key indicates it's the commitment for the remote
// party.
chanCommitmentKey = []byte("chan-commitment-key")
// revocationLogBucketLegacy is the legacy bucket where we store the
// revocation log in old format.
revocationLogBucketLegacy = []byte("revocation-log-key")
// localUpfrontShutdownKey can be accessed within the bucket for a
// channel (identified by its chanPoint). This key stores an optional
// upfront shutdown script for the local peer.
localUpfrontShutdownKey = []byte("local-upfront-shutdown-key")
// remoteUpfrontShutdownKey can be accessed within the bucket for a
// channel (identified by its chanPoint). This key stores an optional
// upfront shutdown script for the remote peer.
remoteUpfrontShutdownKey = []byte("remote-upfront-shutdown-key")
// lastWasRevokeKey is a key that stores true when the last update we
// sent was a revocation and false when it was a commitment signature.
// This is nil in the case of new channels with no updates exchanged.
lastWasRevokeKey = []byte("last-was-revoke")
// ErrNoChanInfoFound is returned when a particular channel does not
// have any channels state.
ErrNoChanInfoFound = fmt.Errorf("no chan info found")
// ErrNoPastDeltas is returned when the channel delta bucket hasn't been
// created.
ErrNoPastDeltas = fmt.Errorf("channel has no recorded deltas")
// ErrLogEntryNotFound is returned when we cannot find a log entry at
// the height requested in the revocation log.
ErrLogEntryNotFound = fmt.Errorf("log entry not found")
// ErrNoCommitmentsFound is returned when a channel has not set
// commitment states.
ErrNoCommitmentsFound = fmt.Errorf("no commitments found")
)
// ChannelType is an enum-like type that describes one of several possible
// channel types. Each open channel is associated with a particular type as the
// channel type may determine how higher level operations are conducted such as
// fee negotiation, channel closing, the format of HTLCs, etc. Structure-wise,
// a ChannelType is a bit field, with each bit denoting a modification from the
// base channel type of single funder.
type ChannelType uint8
const (
// NOTE: iota isn't used here for this enum needs to be stable
// long-term as it will be persisted to the database.
// SingleFunderBit represents a channel wherein one party solely funds
// the entire capacity of the channel.
SingleFunderBit ChannelType = 0
// DualFunderBit represents a channel wherein both parties contribute
// funds towards the total capacity of the channel. The channel may be
// funded symmetrically or asymmetrically.
DualFunderBit ChannelType = 1 << 0
// SingleFunderTweaklessBit is similar to the basic SingleFunder channel
// type, but it omits the tweak for one's key in the commitment
// transaction of the remote party.
SingleFunderTweaklessBit ChannelType = 1 << 1
// NoFundingTxBit denotes if we have the funding transaction locally on
// disk. This bit may be on if the funding transaction was crafted by a
// wallet external to the primary daemon.
NoFundingTxBit ChannelType = 1 << 2
// AnchorOutputsBit indicates that the channel makes use of anchor
// outputs to bump the commitment transaction's effective feerate. This
// channel type also uses a delayed to_remote output script.
AnchorOutputsBit ChannelType = 1 << 3
// FrozenBit indicates that the channel is a frozen channel, meaning
// that only the responder can decide to cooperatively close the
// channel.
FrozenBit ChannelType = 1 << 4
// ZeroHtlcTxFeeBit indicates that the channel should use zero-fee
// second-level HTLC transactions.
ZeroHtlcTxFeeBit ChannelType = 1 << 5
// LeaseExpirationBit indicates that the channel has been leased for a
// period of time, constraining every output that pays to the channel
// initiator with an additional CLTV of the lease maturity.
LeaseExpirationBit ChannelType = 1 << 6
)
// IsSingleFunder returns true if the channel type if one of the known single
// funder variants.
func (c ChannelType) IsSingleFunder() bool {
return c&DualFunderBit == 0
}
// IsDualFunder returns true if the ChannelType has the DualFunderBit set.
func (c ChannelType) IsDualFunder() bool {
return c&DualFunderBit == DualFunderBit
}
// IsTweakless returns true if the target channel uses a commitment that
// doesn't tweak the key for the remote party.
func (c ChannelType) IsTweakless() bool {
return c&SingleFunderTweaklessBit == SingleFunderTweaklessBit
}
// HasFundingTx returns true if this channel type is one that has a funding
// transaction stored locally.
func (c ChannelType) HasFundingTx() bool {
return c&NoFundingTxBit == 0
}
// HasAnchors returns true if this channel type has anchor outputs on its
// commitment.
func (c ChannelType) HasAnchors() bool {
return c&AnchorOutputsBit == AnchorOutputsBit
}
// ZeroHtlcTxFee returns true if this channel type uses second-level HTLC
// transactions signed with zero-fee.
func (c ChannelType) ZeroHtlcTxFee() bool {
return c&ZeroHtlcTxFeeBit == ZeroHtlcTxFeeBit
}
// IsFrozen returns true if the channel is considered to be "frozen". A frozen
// channel means that only the responder can initiate a cooperative channel
// closure.
func (c ChannelType) IsFrozen() bool {
return c&FrozenBit == FrozenBit
}
// HasLeaseExpiration returns true if the channel originated from a lease.
func (c ChannelType) HasLeaseExpiration() bool {
return c&LeaseExpirationBit == LeaseExpirationBit
}
// ChannelStatus is a bit vector used to indicate whether an OpenChannel is in
// the default usable state, or a state where it shouldn't be used.
type ChannelStatus uint8
var (
// ChanStatusDefault is the normal state of an open channel.
ChanStatusDefault ChannelStatus
// ChanStatusBorked indicates that the channel has entered an
// irreconcilable state, triggered by a state desynchronization or
// channel breach. Channels in this state should never be added to the
// htlc switch.
ChanStatusBorked ChannelStatus = 1
// ChanStatusCommitBroadcasted indicates that a commitment for this
// channel has been broadcasted.
ChanStatusCommitBroadcasted ChannelStatus = 1 << 1
// ChanStatusLocalDataLoss indicates that we have lost channel state
// for this channel, and broadcasting our latest commitment might be
// considered a breach.
//
// TODO(halseh): actually enforce that we are not force closing such a
// channel.
ChanStatusLocalDataLoss ChannelStatus = 1 << 2
// ChanStatusRestored is a status flag that signals that the channel
// has been restored, and doesn't have all the fields a typical channel
// will have.
ChanStatusRestored ChannelStatus = 1 << 3
// ChanStatusCoopBroadcasted indicates that a cooperative close for
// this channel has been broadcasted. Older cooperatively closed
// channels will only have this status set. Newer ones will also have
// close initiator information stored using the local/remote initiator
// status. This status is set in conjunction with the initiator status
// so that we do not need to check multiple channel statues for
// cooperative closes.
ChanStatusCoopBroadcasted ChannelStatus = 1 << 4
// ChanStatusLocalCloseInitiator indicates that we initiated closing
// the channel.
ChanStatusLocalCloseInitiator ChannelStatus = 1 << 5
// ChanStatusRemoteCloseInitiator indicates that the remote node
// initiated closing the channel.
ChanStatusRemoteCloseInitiator ChannelStatus = 1 << 6
)
// chanStatusStrings maps a ChannelStatus to a human friendly string that
// describes that status.
var chanStatusStrings = map[ChannelStatus]string{
ChanStatusDefault: "ChanStatusDefault",
ChanStatusBorked: "ChanStatusBorked",
ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted",
ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss",
ChanStatusRestored: "ChanStatusRestored",
ChanStatusCoopBroadcasted: "ChanStatusCoopBroadcasted",
ChanStatusLocalCloseInitiator: "ChanStatusLocalCloseInitiator",
ChanStatusRemoteCloseInitiator: "ChanStatusRemoteCloseInitiator",
}
// orderedChanStatusFlags is an in-order list of all that channel status flags.
var orderedChanStatusFlags = []ChannelStatus{
ChanStatusBorked,
ChanStatusCommitBroadcasted,
ChanStatusLocalDataLoss,
ChanStatusRestored,
ChanStatusCoopBroadcasted,
ChanStatusLocalCloseInitiator,
ChanStatusRemoteCloseInitiator,
}
// String returns a human-readable representation of the ChannelStatus.
func (c ChannelStatus) String() string {
// If no flags are set, then this is the default case.
if c == ChanStatusDefault {
return chanStatusStrings[ChanStatusDefault]
}
// Add individual bit flags.
statusStr := ""
for _, flag := range orderedChanStatusFlags {
if c&flag == flag {
statusStr += chanStatusStrings[flag] + "|"
c -= flag
}
}
// Remove anything to the right of the final bar, including it as well.
statusStr = strings.TrimRight(statusStr, "|")
// Add any remaining flags which aren't accounted for as hex.
if c != 0 {
statusStr += "|0x" + strconv.FormatUint(uint64(c), 16)
}
// If this was purely an unknown flag, then remove the extra bar at the
// start of the string.
statusStr = strings.TrimLeft(statusStr, "|")
return statusStr
}
// OpenChannel embeds a mig.OpenChannel with the extra update-to-date fields.
//
// NOTE: doesn't have the Packager field as it's not used in current migration.
type OpenChannel struct {
mig.OpenChannel
// ChanType denotes which type of channel this is.
ChanType ChannelType
// chanStatus is the current status of this channel. If it is not in
// the state Default, it should not be used for forwarding payments.
chanStatus ChannelStatus
// InitialLocalBalance is the balance we have during the channel
// opening. When we are not the initiator, this value represents the
// push amount.
InitialLocalBalance lnwire.MilliSatoshi
// InitialRemoteBalance is the balance they have during the channel
// opening.
InitialRemoteBalance lnwire.MilliSatoshi
// LocalShutdownScript is set to a pre-set script if the channel was
// opened by the local node with option_upfront_shutdown_script set. If
// the option was not set, the field is empty.
LocalShutdownScript lnwire.DeliveryAddress
// RemoteShutdownScript is set to a pre-set script if the channel was
// opened by the remote node with option_upfront_shutdown_script set.
// If the option was not set, the field is empty.
RemoteShutdownScript lnwire.DeliveryAddress
// ThawHeight is the height when a frozen channel once again becomes a
// normal channel. If this is zero, then there're no restrictions on
// this channel. If the value is lower than 500,000, then it's
// interpreted as a relative height, or an absolute height otherwise.
ThawHeight uint32
// LastWasRevoke is a boolean that determines if the last update we
// sent was a revocation (true) or a commitment signature (false).
LastWasRevoke bool
// RevocationKeyLocator stores the KeyLocator information that we will
// need to derive the shachain root for this channel. This allows us to
// have private key isolation from lnd.
RevocationKeyLocator keychain.KeyLocator
}
func (c *OpenChannel) hasChanStatus(status ChannelStatus) bool {
// Special case ChanStatusDefualt since it isn't actually flag, but a
// particular combination (or lack-there-of) of flags.
if status == ChanStatusDefault {
return c.chanStatus == ChanStatusDefault
}
return c.chanStatus&status == status
}
// FundingTxPresent returns true if expect the funding transcation to be found
// on disk or already populated within the passed open channel struct.
func (c *OpenChannel) FundingTxPresent() bool {
chanType := c.ChanType
return chanType.IsSingleFunder() && chanType.HasFundingTx() &&
c.IsInitiator &&
!c.hasChanStatus(ChanStatusRestored)
}
// fetchChanInfo deserializes the channel info based on the legacy boolean.
func fetchChanInfo(chanBucket kvdb.RBucket, c *OpenChannel, legacy bool) error {
infoBytes := chanBucket.Get(chanInfoKey)
if infoBytes == nil {
return ErrNoChanInfoFound
}
r := bytes.NewReader(infoBytes)
var (
chanType mig.ChannelType
chanStatus mig.ChannelStatus
)
if err := mig.ReadElements(r,
&chanType, &c.ChainHash, &c.FundingOutpoint,
&c.ShortChannelID, &c.IsPending, &c.IsInitiator,
&chanStatus, &c.FundingBroadcastHeight,
&c.NumConfsRequired, &c.ChannelFlags,
&c.IdentityPub, &c.Capacity, &c.TotalMSatSent,
&c.TotalMSatReceived,
); err != nil {
return err
}
c.ChanType = ChannelType(chanType)
c.chanStatus = ChannelStatus(chanStatus)
// If this is not the legacy format, we need to read the extra two new
// fields.
if !legacy {
if err := mig.ReadElements(r,
&c.InitialLocalBalance, &c.InitialRemoteBalance,
); err != nil {
return err
}
}
// For single funder channels that we initiated and have the funding
// transaction to, read the funding txn.
if c.FundingTxPresent() {
if err := mig.ReadElement(r, &c.FundingTxn); err != nil {
return err
}
}
if err := mig.ReadChanConfig(r, &c.LocalChanCfg); err != nil {
return err
}
if err := mig.ReadChanConfig(r, &c.RemoteChanCfg); err != nil {
return err
}
// Retrieve the boolean stored under lastWasRevokeKey.
lastWasRevokeBytes := chanBucket.Get(lastWasRevokeKey)
if lastWasRevokeBytes == nil {
// If nothing has been stored under this key, we store false in
// the OpenChannel struct.
c.LastWasRevoke = false
} else {
// Otherwise, read the value into the LastWasRevoke field.
revokeReader := bytes.NewReader(lastWasRevokeBytes)
err := mig.ReadElements(revokeReader, &c.LastWasRevoke)
if err != nil {
return err
}
}
keyLocRecord := MakeKeyLocRecord(keyLocType, &c.RevocationKeyLocator)
tlvStream, err := tlv.NewStream(keyLocRecord)
if err != nil {
return err
}
if err := tlvStream.Decode(r); err != nil {
return err
}
// Finally, read the optional shutdown scripts.
if err := GetOptionalUpfrontShutdownScript(
chanBucket, localUpfrontShutdownKey, &c.LocalShutdownScript,
); err != nil {
return err
}
return GetOptionalUpfrontShutdownScript(
chanBucket, remoteUpfrontShutdownKey, &c.RemoteShutdownScript,
)
}
// fetchChanInfo serializes the channel info based on the legacy boolean and
// saves it to disk.
func putChanInfo(chanBucket kvdb.RwBucket, c *OpenChannel, legacy bool) error {
var w bytes.Buffer
if err := mig.WriteElements(&w,
mig.ChannelType(c.ChanType), c.ChainHash, c.FundingOutpoint,
c.ShortChannelID, c.IsPending, c.IsInitiator,
mig.ChannelStatus(c.chanStatus), c.FundingBroadcastHeight,
c.NumConfsRequired, c.ChannelFlags,
c.IdentityPub, c.Capacity, c.TotalMSatSent,
c.TotalMSatReceived,
); err != nil {
return err
}
// If this is not legacy format, we need to write the extra two fields.
if !legacy {
if err := mig.WriteElements(&w,
c.InitialLocalBalance, c.InitialRemoteBalance,
); err != nil {
return err
}
}
// For single funder channels that we initiated, and we have the
// funding transaction, then write the funding txn.
if c.FundingTxPresent() {
if err := mig.WriteElement(&w, c.FundingTxn); err != nil {
return err
}
}
if err := mig.WriteChanConfig(&w, &c.LocalChanCfg); err != nil {
return err
}
if err := mig.WriteChanConfig(&w, &c.RemoteChanCfg); err != nil {
return err
}
// Write the RevocationKeyLocator as the first entry in a tlv stream.
keyLocRecord := MakeKeyLocRecord(
keyLocType, &c.RevocationKeyLocator,
)
tlvStream, err := tlv.NewStream(keyLocRecord)
if err != nil {
return err
}
if err := tlvStream.Encode(&w); err != nil {
return err
}
if err := chanBucket.Put(chanInfoKey, w.Bytes()); err != nil {
return err
}
// Finally, add optional shutdown scripts for the local and remote peer
// if they are present.
if err := PutOptionalUpfrontShutdownScript(
chanBucket, localUpfrontShutdownKey, c.LocalShutdownScript,
); err != nil {
return err
}
return PutOptionalUpfrontShutdownScript(
chanBucket, remoteUpfrontShutdownKey, c.RemoteShutdownScript,
)
}
// EKeyLocator is an encoder for keychain.KeyLocator.
func EKeyLocator(w io.Writer, val interface{}, buf *[8]byte) error {
if v, ok := val.(*keychain.KeyLocator); ok {
err := tlv.EUint32T(w, uint32(v.Family), buf)
if err != nil {
return err
}
return tlv.EUint32T(w, v.Index, buf)
}
return tlv.NewTypeForEncodingErr(val, "keychain.KeyLocator")
}
// DKeyLocator is a decoder for keychain.KeyLocator.
func DKeyLocator(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
if v, ok := val.(*keychain.KeyLocator); ok {
var family uint32
err := tlv.DUint32(r, &family, buf, 4)
if err != nil {
return err
}
v.Family = keychain.KeyFamily(family)
return tlv.DUint32(r, &v.Index, buf, 4)
}
return tlv.NewTypeForDecodingErr(val, "keychain.KeyLocator", l, 8)
}
// MakeKeyLocRecord creates a Record out of a KeyLocator using the passed
// Type and the EKeyLocator and DKeyLocator functions. The size will always be
// 8 as KeyFamily is uint32 and the Index is uint32.
func MakeKeyLocRecord(typ tlv.Type, keyLoc *keychain.KeyLocator) tlv.Record {
return tlv.MakeStaticRecord(typ, keyLoc, 8, EKeyLocator, DKeyLocator)
}
// PutOptionalUpfrontShutdownScript adds a shutdown script under the key
// provided if it has a non-zero length.
func PutOptionalUpfrontShutdownScript(chanBucket kvdb.RwBucket, key []byte,
script []byte) error {
// If the script is empty, we do not need to add anything.
if len(script) == 0 {
return nil
}
var w bytes.Buffer
if err := mig.WriteElement(&w, script); err != nil {
return err
}
return chanBucket.Put(key, w.Bytes())
}
// GetOptionalUpfrontShutdownScript reads the shutdown script stored under the
// key provided if it is present. Upfront shutdown scripts are optional, so the
// function returns with no error if the key is not present.
func GetOptionalUpfrontShutdownScript(chanBucket kvdb.RBucket, key []byte,
script *lnwire.DeliveryAddress) error {
// Return early if the bucket does not exit, a shutdown script was not
// set.
bs := chanBucket.Get(key)
if bs == nil {
return nil
}
var tempScript []byte
r := bytes.NewReader(bs)
if err := mig.ReadElement(r, &tempScript); err != nil {
return err
}
*script = tempScript
return nil
}
// FetchChanCommitments fetches both the local and remote commitments. This
// function is exported so it can be used by later migrations.
func FetchChanCommitments(chanBucket kvdb.RBucket, channel *OpenChannel) error {
var err error
// If this is a restored channel, then we don't have any commitments to
// read.
if channel.hasChanStatus(ChanStatusRestored) {
return nil
}
channel.LocalCommitment, err = FetchChanCommitment(chanBucket, true)
if err != nil {
return err
}
channel.RemoteCommitment, err = FetchChanCommitment(chanBucket, false)
if err != nil {
return err
}
return nil
}
// FetchChanCommitment fetches a channel commitment. This function is exported
// so it can be used by later migrations.
func FetchChanCommitment(chanBucket kvdb.RBucket,
local bool) (mig.ChannelCommitment, error) {
commitKey := chanCommitmentKey
if local {
commitKey = append(commitKey, byte(0x00))
} else {
commitKey = append(commitKey, byte(0x01))
}
commitBytes := chanBucket.Get(commitKey)
if commitBytes == nil {
return mig.ChannelCommitment{}, ErrNoCommitmentsFound
}
r := bytes.NewReader(commitBytes)
return mig.DeserializeChanCommit(r)
}
func PutChanCommitment(chanBucket kvdb.RwBucket, c *mig.ChannelCommitment,
local bool) error {
commitKey := chanCommitmentKey
if local {
commitKey = append(commitKey, byte(0x00))
} else {
commitKey = append(commitKey, byte(0x01))
}
var b bytes.Buffer
if err := mig.SerializeChanCommit(&b, c); err != nil {
return err
}
return chanBucket.Put(commitKey, b.Bytes())
}
func PutChanCommitments(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
// If this is a restored channel, then we don't have any commitments to
// write.
if channel.hasChanStatus(ChanStatusRestored) {
return nil
}
err := PutChanCommitment(
chanBucket, &channel.LocalCommitment, true,
)
if err != nil {
return err
}
return PutChanCommitment(
chanBucket, &channel.RemoteCommitment, false,
)
}
// balancesAtHeight returns the local and remote balances on our commitment
// transactions as of a given height. This function is not exported as it's
// deprecated.
//
// NOTE: these are our balances *after* subtracting the commitment fee and
// anchor outputs.
func (c *OpenChannel) balancesAtHeight(chanBucket kvdb.RBucket,
height uint64) (lnwire.MilliSatoshi, lnwire.MilliSatoshi, error) {
// If our current commit is as the desired height, we can return our
// current balances.
if c.LocalCommitment.CommitHeight == height {
return c.LocalCommitment.LocalBalance,
c.LocalCommitment.RemoteBalance, nil
}
// If our current remote commit is at the desired height, we can return
// the current balances.
if c.RemoteCommitment.CommitHeight == height {
return c.RemoteCommitment.LocalBalance,
c.RemoteCommitment.RemoteBalance, nil
}
// If we are not currently on the height requested, we need to look up
// the previous height to obtain our balances at the given height.
commit, err := c.FindPreviousStateLegacy(chanBucket, height)
if err != nil {
return 0, 0, err
}
return commit.LocalBalance, commit.RemoteBalance, nil
}
// FindPreviousStateLegacy scans through the append-only log in an attempt to
// recover the previous channel state indicated by the update number. This
// method is intended to be used for obtaining the relevant data needed to
// claim all funds rightfully spendable in the case of an on-chain broadcast of
// the commitment transaction.
func (c *OpenChannel) FindPreviousStateLegacy(chanBucket kvdb.RBucket,
updateNum uint64) (*mig.ChannelCommitment, error) {
c.RLock()
defer c.RUnlock()
logBucket := chanBucket.NestedReadBucket(revocationLogBucketLegacy)
if logBucket == nil {
return nil, ErrNoPastDeltas
}
commit, err := fetchChannelLogEntry(logBucket, updateNum)
if err != nil {
return nil, err
}
return &commit, nil
}
func fetchChannelLogEntry(log kvdb.RBucket,
updateNum uint64) (mig.ChannelCommitment, error) {
logEntrykey := mig24.MakeLogKey(updateNum)
commitBytes := log.Get(logEntrykey[:])
if commitBytes == nil {
return mig.ChannelCommitment{}, ErrLogEntryNotFound
}
commitReader := bytes.NewReader(commitBytes)
return mig.DeserializeChanCommit(commitReader)
}

View File

@ -0,0 +1,14 @@
package migration25
import (
"github.com/btcsuite/btclog"
)
// log is a logger that is initialized as disabled. This means the package will
// not perform any logging by default until a logger is set.
var log = btclog.Disabled
// UseLogger uses a specified Logger to output package logging info.
func UseLogger(logger btclog.Logger) {
log = logger
}

View File

@ -0,0 +1,211 @@
package migration25
import (
"bytes"
"fmt"
mig "github.com/lightningnetwork/lnd/channeldb/migration_01_to_11"
"github.com/lightningnetwork/lnd/kvdb"
)
var (
// openChanBucket stores all the currently open channels. This bucket
// has a second, nested bucket which is keyed by a node's ID. Within
// that node ID bucket, all attributes required to track, update, and
// close a channel are stored.
openChannelBucket = []byte("open-chan-bucket")
// chanInfoKey can be accessed within the bucket for a channel
// (identified by its chanPoint). This key stores all the static
// information for a channel which is decided at the end of the
// funding flow.
chanInfoKey = []byte("chan-info-key")
// ErrNoChanDBExists is returned when a channel bucket hasn't been
// created.
ErrNoChanDBExists = fmt.Errorf("channel db has not yet been created")
// ErrNoActiveChannels is returned when there is no active (open)
// channels within the database.
ErrNoActiveChannels = fmt.Errorf("no active channels exist")
// ErrChannelNotFound is returned when we attempt to locate a channel
// for a specific chain, but it is not found.
ErrChannelNotFound = fmt.Errorf("channel not found")
)
// MigrateInitialBalances patches the two new fields, InitialLocalBalance and
// InitialRemoteBalance, for all the open channels. It does so by reading the
// revocation log at height 0 to learn the initial balances and then updates
// the channel's info.
// The channel info is saved in the nested bucket which is accessible via
// nodePub:chainHash:chanPoint. If any of the sub-buckets turns out to be nil,
// we will log the error and continue to process the rest.
func MigrateInitialBalances(tx kvdb.RwTx) error {
log.Infof("Migrating initial local and remote balances...")
openChanBucket := tx.ReadWriteBucket(openChannelBucket)
// If no bucket is found, we can exit early.
if openChanBucket == nil {
return nil
}
// Read a list of open channels.
channels, err := findOpenChannels(openChanBucket)
if err != nil {
return err
}
// Migrate the balances.
for _, c := range channels {
if err := migrateBalances(tx, c); err != nil {
return err
}
}
return err
}
// findOpenChannels finds all open channels.
func findOpenChannels(openChanBucket kvdb.RBucket) ([]*OpenChannel, error) {
channels := []*OpenChannel{}
// readChannel is a helper closure that reads the channel info from the
// channel bucket.
readChannel := func(chainBucket kvdb.RBucket, cp []byte) error {
c := &OpenChannel{}
// Read the sub-bucket level 3.
chanBucket := chainBucket.NestedReadBucket(
cp,
)
if chanBucket == nil {
log.Errorf("unable to read bucket for chanPoint=%x", cp)
return nil
}
// Get the old channel info.
if err := fetchChanInfo(chanBucket, c, true); err != nil {
return fmt.Errorf("unable to fetch chan info: %v", err)
}
// Fetch the channel commitments, which are useful for freshly
// open channels as they don't have any revocation logs and
// their current commitments reflect the initial balances.
if err := FetchChanCommitments(chanBucket, c); err != nil {
return fmt.Errorf("unable to fetch chan commits: %v",
err)
}
channels = append(channels, c)
return nil
}
// Iterate the root bucket.
err := openChanBucket.ForEach(func(nodePub, v []byte) error {
// Ensure that this is a key the same size as a pubkey, and
// also that it leads directly to a bucket.
if len(nodePub) != 33 || v != nil {
return nil
}
// Read the sub-bucket level 1.
nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
if nodeChanBucket == nil {
log.Errorf("no bucket for node %x", nodePub)
return nil
}
// Iterate the bucket.
return nodeChanBucket.ForEach(func(chainHash, _ []byte) error {
// Read the sub-bucket level 2.
chainBucket := nodeChanBucket.NestedReadBucket(
chainHash,
)
if chainBucket == nil {
log.Errorf("unable to read bucket for chain=%x",
chainHash)
return nil
}
// Iterate the bucket.
return chainBucket.ForEach(func(cp, _ []byte) error {
return readChannel(chainBucket, cp)
})
})
})
if err != nil {
return nil, err
}
return channels, nil
}
// migrateBalances queries the revocation log at height 0 to find the initial
// balances and save them to the channel info.
func migrateBalances(tx kvdb.RwTx, c *OpenChannel) error {
// Get the bucket.
chanBucket, err := fetchChanBucket(tx, c)
if err != nil {
return err
}
// Get the initial balances.
localAmt, remoteAmt, err := c.balancesAtHeight(chanBucket, 0)
if err != nil {
return fmt.Errorf("unable to get initial balances: %v", err)
}
c.InitialLocalBalance = localAmt
c.InitialRemoteBalance = remoteAmt
// Update the channel info.
if err := putChanInfo(chanBucket, c, false); err != nil {
return fmt.Errorf("unable to put chan info: %v", err)
}
return nil
}
// fetchChanBucket is a helper function that returns the bucket where a
// channel's data resides in given: the public key for the node, the outpoint,
// and the chainhash that the channel resides on.
func fetchChanBucket(tx kvdb.RwTx, c *OpenChannel) (kvdb.RwBucket, error) {
// First fetch the top level bucket which stores all data related to
// current, active channels.
openChanBucket := tx.ReadWriteBucket(openChannelBucket)
if openChanBucket == nil {
return nil, ErrNoChanDBExists
}
// Within this top level bucket, fetch the bucket dedicated to storing
// open channel data specific to the remote node.
nodePub := c.IdentityPub.SerializeCompressed()
nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub)
if nodeChanBucket == nil {
return nil, ErrNoActiveChannels
}
// We'll then recurse down an additional layer in order to fetch the
// bucket for this particular chain.
chainBucket := nodeChanBucket.NestedReadWriteBucket(c.ChainHash[:])
if chainBucket == nil {
return nil, ErrNoActiveChannels
}
// With the bucket for the node and chain fetched, we can now go down
// another level, for this channel itself.
var chanPointBuf bytes.Buffer
err := mig.WriteOutpoint(&chanPointBuf, &c.FundingOutpoint)
if err != nil {
return nil, err
}
chanBucket := chainBucket.NestedReadWriteBucket(chanPointBuf.Bytes())
if chanBucket == nil {
return nil, ErrChannelNotFound
}
return chanBucket, nil
}

View File

@ -0,0 +1,391 @@
package migration25
import (
"bytes"
"fmt"
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
lnwire "github.com/lightningnetwork/lnd/channeldb/migration/lnwire21"
mig24 "github.com/lightningnetwork/lnd/channeldb/migration24"
mig "github.com/lightningnetwork/lnd/channeldb/migration_01_to_11"
"github.com/lightningnetwork/lnd/channeldb/migtest"
"github.com/lightningnetwork/lnd/kvdb"
)
var (
// Create dummy values to be stored in db.
dummyPrivKey, _ = btcec.NewPrivateKey()
dummyPubKey = dummyPrivKey.PubKey()
dummySig = []byte{1, 2, 3}
dummyTx = &wire.MsgTx{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: chainhash.Hash{},
Index: 0xffffffff,
},
Sequence: 0xffffffff,
},
},
TxOut: []*wire.TxOut{
{
Value: 5000000000,
},
},
LockTime: 5,
}
dummyOp = wire.OutPoint{
Hash: chainhash.Hash{},
Index: 9,
}
dummyHTLC = mig.HTLC{
Signature: dummySig,
RHash: [32]byte{},
Amt: 100_000,
RefundTimeout: 731583,
OutputIndex: 1,
Incoming: true,
HtlcIndex: 1,
LogIndex: 1,
}
// ourAmt and theirAmt are the initial balances found in the local
// channel commitment at height 0.
ourAmt = lnwire.MilliSatoshi(500_000)
theirAmt = lnwire.MilliSatoshi(1000_000)
// ourAmtRevoke and theirAmtRevoke are the initial balances found in
// the revocation log at height 0.
//
// NOTE: they are made differently such that we can easily check the
// source when patching the balances.
ourAmtRevoke = lnwire.MilliSatoshi(501_000)
theirAmtRevoke = lnwire.MilliSatoshi(1001_000)
// remoteCommit0 is the channel commitment at commit height 0. This is
// also the revocation log we will use to patch the initial balances.
remoteCommit0 = mig.ChannelCommitment{
LocalBalance: ourAmtRevoke,
RemoteBalance: theirAmtRevoke,
CommitFee: btcutil.Amount(1),
FeePerKw: btcutil.Amount(1),
CommitTx: dummyTx,
CommitSig: dummySig,
Htlcs: []mig.HTLC{},
}
// localCommit0 is the channel commitment at commit height 0. This is
// the channel commitment we will use to patch the initial balances
// when there are no revocation logs.
localCommit0 = mig.ChannelCommitment{
LocalBalance: ourAmt,
RemoteBalance: theirAmt,
CommitFee: btcutil.Amount(1),
FeePerKw: btcutil.Amount(1),
CommitTx: dummyTx,
CommitSig: dummySig,
Htlcs: []mig.HTLC{},
}
// remoteCommit1 and localCommit1 are the channel commitment at commit
// height 1.
remoteCommit1 = mig.ChannelCommitment{
CommitHeight: 1,
LocalLogIndex: 1,
LocalHtlcIndex: 1,
RemoteLogIndex: 1,
RemoteHtlcIndex: 1,
LocalBalance: ourAmt - dummyHTLC.Amt,
RemoteBalance: theirAmt + dummyHTLC.Amt,
CommitFee: btcutil.Amount(1),
FeePerKw: btcutil.Amount(1),
CommitTx: dummyTx,
CommitSig: dummySig,
Htlcs: []mig.HTLC{dummyHTLC},
}
localCommit1 = mig.ChannelCommitment{
CommitHeight: 1,
LocalLogIndex: 1,
LocalHtlcIndex: 1,
RemoteLogIndex: 1,
RemoteHtlcIndex: 1,
LocalBalance: ourAmt - dummyHTLC.Amt,
RemoteBalance: theirAmt + dummyHTLC.Amt,
CommitFee: btcutil.Amount(1),
FeePerKw: btcutil.Amount(1),
CommitTx: dummyTx,
CommitSig: dummySig,
Htlcs: []mig.HTLC{dummyHTLC},
}
// openChannel0 is the OpenChannel at commit height 0. When this
// variable is used, we expect to patch the initial balances from its
// commitments.
openChannel0 = &OpenChannel{
OpenChannel: mig.OpenChannel{
IdentityPub: dummyPubKey,
FundingOutpoint: dummyOp,
LocalCommitment: localCommit0,
RemoteCommitment: remoteCommit0,
},
}
// openChannel1 is the OpenChannel at commit height 1. When this
// variable is used, we expect to patch the initial balances from the
// remote commitment at height 0.
openChannel1 = &OpenChannel{
OpenChannel: mig.OpenChannel{
IdentityPub: dummyPubKey,
FundingOutpoint: dummyOp,
LocalCommitment: localCommit1,
RemoteCommitment: remoteCommit1,
},
}
)
// TestMigrateInitialBalances checks that the proper initial balances are
// patched to the channel info.
func TestMigrateInitialBalances(t *testing.T) {
testCases := []struct {
name string
beforeMigrationFunc func(kvdb.RwTx) error
afterMigrationFunc func(kvdb.RwTx) error
shouldFail bool
}{
{
// Test that we patch the initial balances using the
// revocation log.
name: "patch balance from revoke log",
beforeMigrationFunc: genBeforeMigration(
openChannel1, &remoteCommit0,
),
afterMigrationFunc: genAfterMigration(
ourAmtRevoke, theirAmtRevoke, openChannel1,
),
},
{
// Test that we patch the initial balances using the
// channel's local commitment since at height 0,
// balances found in LocalCommitment reflect the
// initial balances.
name: "patch balance from local commit",
beforeMigrationFunc: genBeforeMigration(
openChannel0, nil,
),
afterMigrationFunc: genAfterMigration(
ourAmt, theirAmt, openChannel0,
),
},
{
// Test that we patch the initial balances using the
// channel's local commitment even when there is a
// revocation log available.
name: "patch balance from local commit only",
beforeMigrationFunc: genBeforeMigration(
openChannel0, &remoteCommit0,
),
afterMigrationFunc: genAfterMigration(
ourAmt, theirAmt, openChannel0,
),
},
{
// Test that when there is no revocation log the
// migration would fail.
name: "patch balance error on no revoke log",
beforeMigrationFunc: genBeforeMigration(
// Use nil to specify no revocation log will be
// created.
openChannel1, nil,
),
afterMigrationFunc: genAfterMigration(
// Use nil to specify skipping the
// afterMigrationFunc.
0, 0, nil,
),
shouldFail: true,
},
{
// Test that when the saved revocation log is not what
// we want the migration would fail.
name: "patch balance error on wrong revoke log",
beforeMigrationFunc: genBeforeMigration(
// Use the revocation log with the wrong
// height.
openChannel1, &remoteCommit1,
),
afterMigrationFunc: genAfterMigration(
// Use nil to specify skipping the
// afterMigrationFunc.
0, 0, nil,
),
shouldFail: true,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
migtest.ApplyMigration(
t,
tc.beforeMigrationFunc,
tc.afterMigrationFunc,
MigrateInitialBalances,
tc.shouldFail,
)
})
}
}
func genBeforeMigration(c *OpenChannel,
commit *mig.ChannelCommitment) func(kvdb.RwTx) error {
return func(tx kvdb.RwTx) error {
if c.InitialLocalBalance != 0 {
return fmt.Errorf("non zero initial local amount")
}
if c.InitialRemoteBalance != 0 {
return fmt.Errorf("non zero initial local amount")
}
// Create the channel bucket.
chanBucket, err := createChanBucket(tx, c)
if err != nil {
return err
}
// Save the channel info using legacy format.
if err := putChanInfo(chanBucket, c, true); err != nil {
return err
}
// Save the channel commitments.
if err := PutChanCommitments(chanBucket, c); err != nil {
return err
}
// If we have a remote commitment, save it as our revocation
// log.
if commit != nil {
err := putChannelLogEntryLegacy(chanBucket, commit)
if err != nil {
return err
}
}
return nil
}
}
func genAfterMigration(ourAmt, theirAmt lnwire.MilliSatoshi,
c *OpenChannel) func(kvdb.RwTx) error {
return func(tx kvdb.RwTx) error {
// If the passed OpenChannel is nil, we will skip the
// afterMigrationFunc as it indicates an error is expected
// during the migration.
if c == nil {
return nil
}
chanBucket, err := fetchChanBucket(tx, c)
if err != nil {
return err
}
newChan := &OpenChannel{}
// Fetch the channel info using the new format.
err = fetchChanInfo(chanBucket, newChan, false)
if err != nil {
return err
}
// Check our initial amount is correct.
if newChan.InitialLocalBalance != ourAmt {
return fmt.Errorf("wrong local balance, got %d, "+
"want %d", newChan.InitialLocalBalance, ourAmt)
}
// Check their initial amount is correct.
if newChan.InitialRemoteBalance != theirAmt {
return fmt.Errorf("wrong remote balance, got %d, "+
"want %d", newChan.InitialRemoteBalance,
theirAmt)
}
// We also check the relevant channel info fields stay the
// same.
if !newChan.IdentityPub.IsEqual(c.IdentityPub) {
return fmt.Errorf("wrong IdentityPub")
}
if newChan.FundingOutpoint != c.FundingOutpoint {
return fmt.Errorf("wrong FundingOutpoint")
}
return nil
}
}
func createChanBucket(tx kvdb.RwTx, c *OpenChannel) (kvdb.RwBucket, error) {
// First fetch the top level bucket which stores all data related to
// current, active channels.
openChanBucket, err := tx.CreateTopLevelBucket(openChannelBucket)
if err != nil {
return nil, err
}
// Within this top level bucket, fetch the bucket dedicated to storing
// open channel data specific to the remote node.
nodePub := c.IdentityPub.SerializeCompressed()
nodeChanBucket, err := openChanBucket.CreateBucketIfNotExists(nodePub)
if err != nil {
return nil, err
}
// We'll then recurse down an additional layer in order to fetch the
// bucket for this particular chain.
chainBucket, err := nodeChanBucket.CreateBucketIfNotExists(
c.ChainHash[:],
)
if err != nil {
return nil, err
}
var chanPointBuf bytes.Buffer
err = mig.WriteOutpoint(&chanPointBuf, &c.FundingOutpoint)
if err != nil {
return nil, err
}
// With the bucket for the node fetched, we can now go down another
// level, creating the bucket for this channel itself.
return chainBucket.CreateBucketIfNotExists(chanPointBuf.Bytes())
}
// putChannelLogEntryLegacy saves an old format revocation log to the bucket.
func putChannelLogEntryLegacy(chanBucket kvdb.RwBucket,
commit *mig.ChannelCommitment) error {
logBucket, err := chanBucket.CreateBucketIfNotExists(
revocationLogBucketLegacy,
)
if err != nil {
return err
}
var b bytes.Buffer
if err := mig.SerializeChanCommit(&b, commit); err != nil {
return err
}
logEntrykey := mig24.MakeLogKey(commit.CommitHeight)
return logBucket.Put(logEntrykey[:], b.Bytes())
}

View File

@ -616,7 +616,7 @@ func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error {
return err
}
if err := writeChanConfig(w, &cs.LocalChanConfig); err != nil {
if err := WriteChanConfig(w, &cs.LocalChanConfig); err != nil {
return err
}
@ -680,7 +680,7 @@ func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, error) {
return nil, err
}
if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
if err := ReadChanConfig(r, &c.LocalChanConfig); err != nil {
return nil, err
}
@ -731,7 +731,7 @@ func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, error) {
return c, nil
}
func writeChanConfig(b io.Writer, c *ChannelConfig) error {
func WriteChanConfig(b io.Writer, c *ChannelConfig) error {
return WriteElements(b,
c.DustLimit, c.MaxPendingAmount, c.ChanReserve, c.MinHTLC,
c.MaxAcceptedHtlcs, c.CsvDelay, c.MultiSigKey,
@ -740,7 +740,7 @@ func writeChanConfig(b io.Writer, c *ChannelConfig) error {
)
}
func readChanConfig(b io.Reader, c *ChannelConfig) error {
func ReadChanConfig(b io.Reader, c *ChannelConfig) error {
return ReadElements(b,
&c.DustLimit, &c.MaxPendingAmount, &c.ChanReserve,
&c.MinHTLC, &c.MaxAcceptedHtlcs, &c.CsvDelay,
@ -749,3 +749,92 @@ func readChanConfig(b io.Reader, c *ChannelConfig) error {
&c.HtlcBasePoint,
)
}
func DeserializeChanCommit(r io.Reader) (ChannelCommitment, error) {
var c ChannelCommitment
err := ReadElements(r,
&c.CommitHeight, &c.LocalLogIndex, &c.LocalHtlcIndex, &c.RemoteLogIndex,
&c.RemoteHtlcIndex, &c.LocalBalance, &c.RemoteBalance,
&c.CommitFee, &c.FeePerKw, &c.CommitTx, &c.CommitSig,
)
if err != nil {
return c, err
}
c.Htlcs, err = DeserializeHtlcs(r)
if err != nil {
return c, err
}
return c, nil
}
// DeserializeHtlcs attempts to read out a slice of HTLC's from the passed
// io.Reader. The bytes within the passed reader MUST have been previously
// written to using the SerializeHtlcs function.
//
// NOTE: This API is NOT stable, the on-disk format will likely change in the
// future.
func DeserializeHtlcs(r io.Reader) ([]HTLC, error) {
var numHtlcs uint16
if err := ReadElement(r, &numHtlcs); err != nil {
return nil, err
}
var htlcs []HTLC
if numHtlcs == 0 {
return htlcs, nil
}
htlcs = make([]HTLC, numHtlcs)
for i := uint16(0); i < numHtlcs; i++ {
if err := ReadElements(r,
&htlcs[i].Signature, &htlcs[i].RHash, &htlcs[i].Amt,
&htlcs[i].RefundTimeout, &htlcs[i].OutputIndex,
&htlcs[i].Incoming, &htlcs[i].OnionBlob,
&htlcs[i].HtlcIndex, &htlcs[i].LogIndex,
); err != nil {
return htlcs, err
}
}
return htlcs, nil
}
func SerializeChanCommit(w io.Writer, c *ChannelCommitment) error {
if err := WriteElements(w,
c.CommitHeight, c.LocalLogIndex, c.LocalHtlcIndex,
c.RemoteLogIndex, c.RemoteHtlcIndex, c.LocalBalance,
c.RemoteBalance, c.CommitFee, c.FeePerKw, c.CommitTx,
c.CommitSig,
); err != nil {
return err
}
return SerializeHtlcs(w, c.Htlcs...)
}
// SerializeHtlcs writes out the passed set of HTLC's into the passed writer
// using the current default on-disk serialization format.
//
// NOTE: This API is NOT stable, the on-disk format will likely change in the
// future.
func SerializeHtlcs(b io.Writer, htlcs ...HTLC) error {
numHtlcs := uint16(len(htlcs))
if err := WriteElement(b, numHtlcs); err != nil {
return err
}
for _, htlc := range htlcs {
if err := WriteElements(b,
htlc.Signature, htlc.RHash, htlc.Amt, htlc.RefundTimeout,
htlc.OutputIndex, htlc.Incoming, htlc.OnionBlob[:],
htlc.HtlcIndex, htlc.LogIndex,
); err != nil {
return err
}
}
return nil
}

View File

@ -15,9 +15,9 @@ import (
"github.com/lightningnetwork/lnd/shachain"
)
// writeOutpoint writes an outpoint to the passed writer using the minimal
// WriteOutpoint writes an outpoint to the passed writer using the minimal
// amount of bytes possible.
func writeOutpoint(w io.Writer, o *wire.OutPoint) error {
func WriteOutpoint(w io.Writer, o *wire.OutPoint) error {
if _, err := w.Write(o.Hash[:]); err != nil {
return err
}
@ -28,9 +28,9 @@ func writeOutpoint(w io.Writer, o *wire.OutPoint) error {
return nil
}
// readOutpoint reads an outpoint from the passed reader that was previously
// ReadOutpoint reads an outpoint from the passed reader that was previously
// written using the writeOutpoint struct.
func readOutpoint(r io.Reader, o *wire.OutPoint) error {
func ReadOutpoint(r io.Reader, o *wire.OutPoint) error {
if _, err := io.ReadFull(r, o.Hash[:]); err != nil {
return err
}
@ -88,7 +88,7 @@ func WriteElement(w io.Writer, element interface{}) error {
}
case wire.OutPoint:
return writeOutpoint(w, &e)
return WriteOutpoint(w, &e)
case lnwire.ShortChannelID:
if err := binary.Write(w, byteOrder, e.ToUint64()); err != nil {
@ -258,7 +258,7 @@ func ReadElement(r io.Reader, element interface{}) error {
}
case *wire.OutPoint:
return readOutpoint(r, e)
return ReadOutpoint(r, e)
case *lnwire.ShortChannelID:
var a uint64

View File

@ -836,7 +836,7 @@ func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) {
}
edgeInfo.ChannelPoint = wire.OutPoint{}
if err := readOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
if err := ReadOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
return ChannelEdgeInfo{}, err
}
if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {

View File

@ -34,7 +34,7 @@ func deserializeCloseChannelSummaryV6(r io.Reader) (*ChannelCloseSummary, error)
return nil, err
}
if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
if err := ReadChanConfig(r, &c.LocalChanConfig); err != nil {
return nil, err
}

View File

@ -203,7 +203,7 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
}
var chanPointBuf bytes.Buffer
err = writeOutpoint(&chanPointBuf, &chanState.FundingOutpoint)
err = WriteOutpoint(&chanPointBuf, &chanState.FundingOutpoint)
if err != nil {
t.Fatalf("unable to write outpoint: %v", err)
}
@ -303,7 +303,7 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
t.Fatal(err)
}
err = writeChanConfig(&buf, &cs.LocalChanConfig)
err = WriteChanConfig(&buf, &cs.LocalChanConfig)
if err != nil {
t.Fatal(err)
}
@ -354,7 +354,7 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
t.Fatal(err)
}
err = writeChanConfig(&buf, &cs.LocalChanConfig)
err = WriteChanConfig(&buf, &cs.LocalChanConfig)
if err != nil {
t.Fatal(err)
}

547
channeldb/revocation_log.go Normal file
View File

@ -0,0 +1,547 @@
package channeldb
import (
"bytes"
"errors"
"io"
"math"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/tlv"
)
// OutputIndexEmpty is used when the output index doesn't exist.
const OutputIndexEmpty = math.MaxUint16
var (
// revocationLogBucketDeprecated is dedicated for storing the necessary
// delta state between channel updates required to re-construct a past
// state in order to punish a counterparty attempting a non-cooperative
// channel closure. This key should be accessed from within the
// sub-bucket of a target channel, identified by its channel point.
//
// Deprecated: This bucket is kept for read-only in case the user
// choose not to migrate the old data.
revocationLogBucketDeprecated = []byte("revocation-log-key")
// revocationLogBucket is a sub-bucket under openChannelBucket. This
// sub-bucket is dedicated for storing the minimal info required to
// re-construct a past state in order to punish a counterparty
// attempting a non-cooperative channel closure.
revocationLogBucket = []byte("revocation-log")
// ErrLogEntryNotFound is returned when we cannot find a log entry at
// the height requested in the revocation log.
ErrLogEntryNotFound = errors.New("log entry not found")
// ErrOutputIndexTooBig is returned when the output index is greater
// than uint16.
ErrOutputIndexTooBig = errors.New("output index is over uint16")
)
// HTLCEntry specifies the minimal info needed to be stored on disk for ALL the
// historical HTLCs, which is useful for constructing RevocationLog when a
// breach is detected.
// The actual size of each HTLCEntry varies based on its RHash and Amt(sat),
// summarized as follows,
//
// | RHash empty | Amt<=252 | Amt<=65,535 | Amt<=4,294,967,295 | otherwise |
// |:-----------:|:--------:|:-----------:|:------------------:|:---------:|
// | true | 19 | 21 | 23 | 26 |
// | false | 51 | 53 | 55 | 58 |
//
// So the size varies from 19 bytes to 58 bytes, where most likely to be 23 or
// 55 bytes.
//
// NOTE: all the fields saved to disk use the primitive go types so they can be
// made into tlv records without further conversion.
type HTLCEntry struct {
// RHash is the payment hash of the HTLC.
RHash [32]byte
// RefundTimeout is the absolute timeout on the HTLC that the sender
// must wait before reclaiming the funds in limbo.
RefundTimeout uint32
// OutputIndex is the output index for this particular HTLC output
// within the commitment transaction.
//
// NOTE: we use uint16 instead of int32 here to save us 2 bytes, which
// gives us a max number of HTLCs of 65K.
OutputIndex uint16
// Incoming denotes whether we're the receiver or the sender of this
// HTLC.
//
// NOTE: this field is the memory representation of the field
// incomingUint.
Incoming bool
// Amt is the amount of satoshis this HTLC escrows.
//
// NOTE: this field is the memory representation of the field amtUint.
Amt btcutil.Amount
// amtTlv is the uint64 format of Amt. This field is created so we can
// easily make it into a tlv record and save it to disk.
//
// NOTE: we keep this field for accounting purpose only. If the disk
// space becomes an issue, we could delete this field to save us extra
// 8 bytes.
amtTlv uint64
// incomingTlv is the uint8 format of Incoming. This field is created
// so we can easily make it into a tlv record and save it to disk.
incomingTlv uint8
}
// RHashLen is used by MakeDynamicRecord to return the size of the RHash.
//
// NOTE: for zero hash, we return a length 0.
func (h *HTLCEntry) RHashLen() uint64 {
if h.RHash == lntypes.ZeroHash {
return 0
}
return 32
}
// RHashEncoder is the customized encoder which skips encoding the empty hash.
func RHashEncoder(w io.Writer, val interface{}, buf *[8]byte) error {
v, ok := val.(*[32]byte)
if !ok {
return tlv.NewTypeForEncodingErr(val, "RHash")
}
// If the value is an empty hash, we will skip encoding it.
if *v == lntypes.ZeroHash {
return nil
}
return tlv.EBytes32(w, v, buf)
}
// RHashDecoder is the customized decoder which skips decoding the empty hash.
func RHashDecoder(r io.Reader, val interface{}, buf *[8]byte, l uint64) error {
v, ok := val.(*[32]byte)
if !ok {
return tlv.NewTypeForEncodingErr(val, "RHash")
}
// If the length is zero, we will skip encoding the empty hash.
if l == 0 {
return nil
}
return tlv.DBytes32(r, v, buf, 32)
}
// toTlvStream converts an HTLCEntry record into a tlv representation.
func (h *HTLCEntry) toTlvStream() (*tlv.Stream, error) {
const (
// A set of tlv type definitions used to serialize htlc entries
// to the database. We define it here instead of the head of
// the file to avoid naming conflicts.
//
// NOTE: A migration should be added whenever this list
// changes.
rHashType tlv.Type = 0
refundTimeoutType tlv.Type = 1
outputIndexType tlv.Type = 2
incomingType tlv.Type = 3
amtType tlv.Type = 4
)
return tlv.NewStream(
tlv.MakeDynamicRecord(
rHashType, &h.RHash, h.RHashLen,
RHashEncoder, RHashDecoder,
),
tlv.MakePrimitiveRecord(
refundTimeoutType, &h.RefundTimeout,
),
tlv.MakePrimitiveRecord(
outputIndexType, &h.OutputIndex,
),
tlv.MakePrimitiveRecord(incomingType, &h.incomingTlv),
// We will save 3 bytes if the amount is less or equal to
// 4,294,967,295 msat, or roughly 0.043 bitcoin.
tlv.MakeBigSizeRecord(amtType, &h.amtTlv),
)
}
// RevocationLog stores the info needed to construct a breach retribution. Its
// fields can be viewed as a subset of a ChannelCommitment's. In the database,
// all historical versions of the RevocationLog are saved using the
// CommitHeight as the key.
//
// NOTE: all the fields use the primitive go types so they can be made into tlv
// records without further conversion.
type RevocationLog struct {
// OurOutputIndex specifies our output index in this commitment. In a
// remote commitment transaction, this is the to remote output index.
OurOutputIndex uint16
// TheirOutputIndex specifies their output index in this commitment. In
// a remote commitment transaction, this is the to local output index.
TheirOutputIndex uint16
// CommitTxHash is the hash of the latest version of the commitment
// state, broadcast able by us.
CommitTxHash [32]byte
// HTLCEntries is the set of HTLCEntry's that are pending at this
// particular commitment height.
HTLCEntries []*HTLCEntry
}
// toTlvStream converts an RevocationLog record into a tlv representation.
func (rl *RevocationLog) toTlvStream() (*tlv.Stream, error) {
const (
// A set of tlv type definitions used to serialize the body of
// revocation logs to the database. We define it here instead
// of the head of the file to avoid naming conflicts.
//
// NOTE: A migration should be added whenever this list
// changes.
ourOutputIndexType tlv.Type = 0
theirOutputIndexType tlv.Type = 1
commitTxHashType tlv.Type = 2
)
return tlv.NewStream(
tlv.MakePrimitiveRecord(ourOutputIndexType, &rl.OurOutputIndex),
tlv.MakePrimitiveRecord(
theirOutputIndexType, &rl.TheirOutputIndex,
),
tlv.MakePrimitiveRecord(commitTxHashType, &rl.CommitTxHash),
)
}
// putRevocationLog uses the fields `CommitTx` and `Htlcs` from a
// ChannelCommitment to construct a revocation log entry and saves them to
// disk. It also saves our output index and their output index, which are
// useful when creating breach retribution.
func putRevocationLog(bucket kvdb.RwBucket, commit *ChannelCommitment,
ourOutputIndex, theirOutputIndex uint32) error {
// Sanity check that the output indexes can be safely converted.
if ourOutputIndex > math.MaxUint16 {
return ErrOutputIndexTooBig
}
if theirOutputIndex > math.MaxUint16 {
return ErrOutputIndexTooBig
}
rl := &RevocationLog{
OurOutputIndex: uint16(ourOutputIndex),
TheirOutputIndex: uint16(theirOutputIndex),
CommitTxHash: commit.CommitTx.TxHash(),
HTLCEntries: make([]*HTLCEntry, 0, len(commit.Htlcs)),
}
for _, htlc := range commit.Htlcs {
// Skip dust HTLCs.
if htlc.OutputIndex < 0 {
continue
}
// Sanity check that the output indexes can be safely
// converted.
if htlc.OutputIndex > math.MaxUint16 {
return ErrOutputIndexTooBig
}
entry := &HTLCEntry{
RHash: htlc.RHash,
RefundTimeout: htlc.RefundTimeout,
Incoming: htlc.Incoming,
OutputIndex: uint16(htlc.OutputIndex),
Amt: htlc.Amt.ToSatoshis(),
}
rl.HTLCEntries = append(rl.HTLCEntries, entry)
}
var b bytes.Buffer
err := serializeRevocationLog(&b, rl)
if err != nil {
return err
}
logEntrykey := makeLogKey(commit.CommitHeight)
return bucket.Put(logEntrykey[:], b.Bytes())
}
// fetchRevocationLog queries the revocation log bucket to find an log entry.
// Return an error if not found.
func fetchRevocationLog(log kvdb.RBucket,
updateNum uint64) (RevocationLog, error) {
logEntrykey := makeLogKey(updateNum)
commitBytes := log.Get(logEntrykey[:])
if commitBytes == nil {
return RevocationLog{}, ErrLogEntryNotFound
}
commitReader := bytes.NewReader(commitBytes)
return deserializeRevocationLog(commitReader)
}
// serializeRevocationLog serializes a RevocationLog record based on tlv
// format.
func serializeRevocationLog(w io.Writer, rl *RevocationLog) error {
// Create the tlv stream.
tlvStream, err := rl.toTlvStream()
if err != nil {
return err
}
// Write the tlv stream.
if err := writeTlvStream(w, tlvStream); err != nil {
return err
}
// Write the HTLCs.
return serializeHTLCEntries(w, rl.HTLCEntries)
}
// serializeHTLCEntries serializes a list of HTLCEntry records based on tlv
// format.
func serializeHTLCEntries(w io.Writer, htlcs []*HTLCEntry) error {
for _, htlc := range htlcs {
// Patch the incomingTlv field.
if htlc.Incoming {
htlc.incomingTlv = 1
}
// Patch the amtTlv field.
htlc.amtTlv = uint64(htlc.Amt)
// Create the tlv stream.
tlvStream, err := htlc.toTlvStream()
if err != nil {
return err
}
// Write the tlv stream.
if err := writeTlvStream(w, tlvStream); err != nil {
return err
}
}
return nil
}
// deserializeRevocationLog deserializes a RevocationLog based on tlv format.
func deserializeRevocationLog(r io.Reader) (RevocationLog, error) {
var rl RevocationLog
// Create the tlv stream.
tlvStream, err := rl.toTlvStream()
if err != nil {
return rl, err
}
// Read the tlv stream.
if err := readTlvStream(r, tlvStream); err != nil {
return rl, err
}
// Read the HTLC entries.
rl.HTLCEntries, err = deserializeHTLCEntries(r)
return rl, err
}
// deserializeHTLCEntries deserializes a list of HTLC entries based on tlv
// format.
func deserializeHTLCEntries(r io.Reader) ([]*HTLCEntry, error) {
var htlcs []*HTLCEntry
for {
var htlc HTLCEntry
// Create the tlv stream.
tlvStream, err := htlc.toTlvStream()
if err != nil {
return nil, err
}
// Read the HTLC entry.
if err := readTlvStream(r, tlvStream); err != nil {
// We've reached the end when hitting an EOF.
if err == io.ErrUnexpectedEOF {
break
}
return nil, err
}
// Patch the Incoming field.
if htlc.incomingTlv == 1 {
htlc.Incoming = true
}
// Patch the Amt field.
htlc.Amt = btcutil.Amount(htlc.amtTlv)
// Append the entry.
htlcs = append(htlcs, &htlc)
}
return htlcs, nil
}
// writeTlvStream is a helper function that encodes the tlv stream into the
// writer.
func writeTlvStream(w io.Writer, s *tlv.Stream) error {
var b bytes.Buffer
if err := s.Encode(&b); err != nil {
return err
}
// Write the stream's length as a varint.
err := tlv.WriteVarInt(w, uint64(b.Len()), &[8]byte{})
if err != nil {
return err
}
if _, err = w.Write(b.Bytes()); err != nil {
return err
}
return nil
}
// readTlvStream is a helper function that decodes the tlv stream from the
// reader.
func readTlvStream(r io.Reader, s *tlv.Stream) error {
var bodyLen uint64
// Read the stream's length.
bodyLen, err := tlv.ReadVarInt(r, &[8]byte{})
switch {
// We'll convert any EOFs to ErrUnexpectedEOF, since this results in an
// invalid record.
case err == io.EOF:
return io.ErrUnexpectedEOF
// Other unexpected errors.
case err != nil:
return err
}
// TODO(yy): add overflow check.
lr := io.LimitReader(r, int64(bodyLen))
return s.Decode(lr)
}
// fetchOldRevocationLog finds the revocation log from the deprecated
// sub-bucket.
func fetchOldRevocationLog(log kvdb.RBucket,
updateNum uint64) (ChannelCommitment, error) {
logEntrykey := makeLogKey(updateNum)
commitBytes := log.Get(logEntrykey[:])
if commitBytes == nil {
return ChannelCommitment{}, ErrLogEntryNotFound
}
commitReader := bytes.NewReader(commitBytes)
return deserializeChanCommit(commitReader)
}
// fetchRevocationLogCompatible finds the revocation log from both the
// revocationLogBucket and revocationLogBucketDeprecated for compatibility
// concern. It returns three values,
// - RevocationLog, if this is non-nil, it means we've found the log in the
// new bucket.
// - ChannelCommitment, if this is non-nil, it means we've found the log in the
// old bucket.
// - error, this can happen if the log cannot be found in neither buckets.
func fetchRevocationLogCompatible(chanBucket kvdb.RBucket,
updateNum uint64) (*RevocationLog, *ChannelCommitment, error) {
// Look into the new bucket first.
logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
if logBucket != nil {
rl, err := fetchRevocationLog(logBucket, updateNum)
// We've found the record, no need to visit the old bucket.
if err == nil {
return &rl, nil, nil
}
// Return the error if it doesn't say the log cannot be found.
if err != ErrLogEntryNotFound {
return nil, nil, err
}
}
// Otherwise, look into the old bucket and try to find the log there.
oldBucket := chanBucket.NestedReadBucket(revocationLogBucketDeprecated)
if oldBucket != nil {
c, err := fetchOldRevocationLog(oldBucket, updateNum)
if err != nil {
return nil, nil, err
}
// Found an old record and return it.
return nil, &c, nil
}
// If both the buckets are nil, then the sub-buckets haven't been
// created yet.
if logBucket == nil && oldBucket == nil {
return nil, nil, ErrNoPastDeltas
}
// Otherwise, we've tried to query the new bucket but the log cannot be
// found.
return nil, nil, ErrLogEntryNotFound
}
// fetchLogBucket returns a read bucket by visiting both the new and the old
// bucket.
func fetchLogBucket(chanBucket kvdb.RBucket) (kvdb.RBucket, error) {
logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
if logBucket == nil {
logBucket = chanBucket.NestedReadBucket(
revocationLogBucketDeprecated,
)
if logBucket == nil {
return nil, ErrNoPastDeltas
}
}
return logBucket, nil
}
// deleteLogBucket deletes the both the new and old revocation log buckets.
func deleteLogBucket(chanBucket kvdb.RwBucket) error {
// Check if the bucket exists and delete it.
logBucket := chanBucket.NestedReadWriteBucket(
revocationLogBucket,
)
if logBucket != nil {
err := chanBucket.DeleteNestedBucket(revocationLogBucket)
if err != nil {
return err
}
}
// We also check whether the old revocation log bucket exists
// and delete it if so.
oldLogBucket := chanBucket.NestedReadWriteBucket(
revocationLogBucketDeprecated,
)
if oldLogBucket != nil {
err := chanBucket.DeleteNestedBucket(
revocationLogBucketDeprecated,
)
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,622 @@
package channeldb
import (
"bytes"
"io"
"math"
"math/rand"
"testing"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lntest/channels"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/tlv"
"github.com/stretchr/testify/require"
)
const (
// testType is used for creating a testing tlv record type. We use 10
// here so it's easier to be recognized by its hex value, 0xa.
testType tlv.Type = 10
)
var (
// testValue is used for creating tlv record.
testValue = uint8(255) // 0xff
// testValueBytes is the tlv encoded testValue.
testValueBytes = []byte{
0x3, // total length = 3
0xa, // type = 10
0x1, // length = 1
0xff, // value = 255
}
testHTLCEntry = HTLCEntry{
RefundTimeout: 740_000,
OutputIndex: 10,
Incoming: true,
Amt: 1000_000,
amtTlv: 1000_000,
incomingTlv: 1,
}
testHTLCEntryBytes = []byte{
// Body length 23.
0x16,
// Rhash tlv.
0x0, 0x0,
// RefundTimeout tlv.
0x1, 0x4, 0x0, 0xb, 0x4a, 0xa0,
// OutputIndex tlv.
0x2, 0x2, 0x0, 0xa,
// Incoming tlv.
0x3, 0x1, 0x1,
// Amt tlv.
0x4, 0x5, 0xfe, 0x0, 0xf, 0x42, 0x40,
}
testChannelCommit = ChannelCommitment{
CommitHeight: 999,
LocalBalance: lnwire.MilliSatoshi(9000),
RemoteBalance: lnwire.MilliSatoshi(3000),
CommitFee: btcutil.Amount(rand.Int63()),
FeePerKw: btcutil.Amount(5000),
CommitTx: channels.TestFundingTx,
CommitSig: bytes.Repeat([]byte{1}, 71),
Htlcs: []HTLC{{
RefundTimeout: testHTLCEntry.RefundTimeout,
OutputIndex: int32(testHTLCEntry.OutputIndex),
Incoming: testHTLCEntry.Incoming,
Amt: lnwire.NewMSatFromSatoshis(
testHTLCEntry.Amt,
),
}},
}
testRevocationLog = RevocationLog{
OurOutputIndex: 0,
TheirOutputIndex: 1,
CommitTxHash: testChannelCommit.CommitTx.TxHash(),
HTLCEntries: []*HTLCEntry{&testHTLCEntry},
}
testRevocationLogBytes = []byte{
// Body length 42.
0x2a,
// OurOutputIndex tlv.
0x0, 0x2, 0x0, 0x0,
// TheirOutputIndex tlv.
0x1, 0x2, 0x0, 0x1,
// CommitTxHash tlv.
0x2, 0x20,
0x28, 0x76, 0x2, 0x59, 0x1d, 0x9d, 0x64, 0x86,
0x6e, 0x60, 0x29, 0x23, 0x1d, 0x5e, 0xc5, 0xe6,
0xbd, 0xf7, 0xd3, 0x9b, 0x16, 0x7d, 0x0, 0xff,
0xc8, 0x22, 0x51, 0xb1, 0x5b, 0xa0, 0xbf, 0xd,
}
)
func TestWriteTLVStream(t *testing.T) {
t.Parallel()
// Create a dummy tlv stream for testing.
ts, err := tlv.NewStream(
tlv.MakePrimitiveRecord(testType, &testValue),
)
require.NoError(t, err)
// Write the tlv stream.
buf := bytes.NewBuffer([]byte{})
err = writeTlvStream(buf, ts)
require.NoError(t, err)
// Check the bytes are written as expected.
require.Equal(t, testValueBytes, buf.Bytes())
}
func TestReadTLVStream(t *testing.T) {
t.Parallel()
var valueRead uint8
// Create a dummy tlv stream for testing.
ts, err := tlv.NewStream(
tlv.MakePrimitiveRecord(testType, &valueRead),
)
require.NoError(t, err)
// Read the tlv stream.
buf := bytes.NewBuffer(testValueBytes)
err = readTlvStream(buf, ts)
require.NoError(t, err)
// Check the bytes are read as expected.
require.Equal(t, testValue, valueRead)
}
func TestReadTLVStreamErr(t *testing.T) {
t.Parallel()
var valueRead uint8
// Create a dummy tlv stream for testing.
ts, err := tlv.NewStream(
tlv.MakePrimitiveRecord(testType, &valueRead),
)
require.NoError(t, err)
// Use empty bytes to cause an EOF.
b := []byte{}
// Read the tlv stream.
buf := bytes.NewBuffer(b)
err = readTlvStream(buf, ts)
require.ErrorIs(t, err, io.ErrUnexpectedEOF)
// Check the bytes are not read.
require.Zero(t, valueRead)
}
func TestSerializeHTLCEntriesEmptyRHash(t *testing.T) {
t.Parallel()
// Copy the testHTLCEntry.
entry := testHTLCEntry
// Set the internal fields to empty values so we can test the bytes are
// padded.
entry.incomingTlv = 0
entry.amtTlv = 0
// Write the tlv stream.
buf := bytes.NewBuffer([]byte{})
err := serializeHTLCEntries(buf, []*HTLCEntry{&entry})
require.NoError(t, err)
// Check the bytes are read as expected.
require.Equal(t, testHTLCEntryBytes, buf.Bytes())
}
func TestSerializeHTLCEntries(t *testing.T) {
t.Parallel()
// Copy the testHTLCEntry.
entry := testHTLCEntry
// Create a fake rHash.
rHashBytes := bytes.Repeat([]byte{10}, 32)
copy(entry.RHash[:], rHashBytes)
// Construct the serialized bytes.
//
// Exclude the first 3 bytes, which are total length, RHash type and
// RHash length(0).
partialBytes := testHTLCEntryBytes[3:]
// Write the total length and RHash tlv.
expectedBytes := []byte{0x36, 0x0, 0x20}
expectedBytes = append(expectedBytes, rHashBytes...)
// Append the rest.
expectedBytes = append(expectedBytes, partialBytes...)
buf := bytes.NewBuffer([]byte{})
err := serializeHTLCEntries(buf, []*HTLCEntry{&entry})
require.NoError(t, err)
// Check the bytes are read as expected.
require.Equal(t, expectedBytes, buf.Bytes())
}
func TestSerializeRevocationLog(t *testing.T) {
t.Parallel()
// Copy the testRevocationLog.
rl := testRevocationLog
// Write the tlv stream.
buf := bytes.NewBuffer([]byte{})
err := serializeRevocationLog(buf, &rl)
require.NoError(t, err)
// Check the expected bytes on the body of the revocation log.
bodyIndex := buf.Len() - len(testHTLCEntryBytes)
require.Equal(t, testRevocationLogBytes, buf.Bytes()[:bodyIndex])
}
func TestDerializeHTLCEntriesEmptyRHash(t *testing.T) {
t.Parallel()
// Read the tlv stream.
buf := bytes.NewBuffer(testHTLCEntryBytes)
htlcs, err := deserializeHTLCEntries(buf)
require.NoError(t, err)
// Check the bytes are read as expected.
require.Len(t, htlcs, 1)
require.Equal(t, &testHTLCEntry, htlcs[0])
}
func TestDerializeHTLCEntries(t *testing.T) {
t.Parallel()
// Copy the testHTLCEntry.
entry := testHTLCEntry
// Create a fake rHash.
rHashBytes := bytes.Repeat([]byte{10}, 32)
copy(entry.RHash[:], rHashBytes)
// Construct the serialized bytes.
//
// Exclude the first 3 bytes, which are total length, RHash type and
// RHash length(0).
partialBytes := testHTLCEntryBytes[3:]
// Write the total length and RHash tlv.
testBytes := append([]byte{0x36, 0x0, 0x20}, rHashBytes...)
// Append the rest.
testBytes = append(testBytes, partialBytes...)
// Read the tlv stream.
buf := bytes.NewBuffer(testBytes)
htlcs, err := deserializeHTLCEntries(buf)
require.NoError(t, err)
// Check the bytes are read as expected.
require.Len(t, htlcs, 1)
require.Equal(t, &entry, htlcs[0])
}
func TestDerializeRevocationLog(t *testing.T) {
t.Parallel()
// Construct the full bytes.
b := testRevocationLogBytes
b = append(b, testHTLCEntryBytes...)
// Read the tlv stream.
buf := bytes.NewBuffer(b)
rl, err := deserializeRevocationLog(buf)
require.NoError(t, err)
// Check the bytes are read as expected.
require.Len(t, rl.HTLCEntries, 1)
require.Equal(t, testRevocationLog, rl)
}
func TestFetchLogBucket(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
require.NoError(t, err)
defer cleanUp()
backend := fullDB.ChannelStateDB().backend
// Test that when neither of the buckets exists, an error is returned.
err = kvdb.Update(backend, func(tx kvdb.RwTx) error {
chanBucket, err := tx.CreateTopLevelBucket(openChannelBucket)
require.NoError(t, err)
// Check an error is returned when there's no sub bucket.
_, err = fetchLogBucket(chanBucket)
return err
}, func() {})
require.ErrorIs(t, err, ErrNoPastDeltas)
// Test a successful fetch.
err = kvdb.Update(backend, func(tx kvdb.RwTx) error {
chanBucket, err := tx.CreateTopLevelBucket(openChannelBucket)
require.NoError(t, err)
_, err = chanBucket.CreateBucket(revocationLogBucket)
require.NoError(t, err)
// Check an error is returned when there's no sub bucket.
_, err = fetchLogBucket(chanBucket)
return err
}, func() {})
require.NoError(t, err)
}
func TestDeleteLogBucket(t *testing.T) {
t.Parallel()
fullDB, cleanUp, err := MakeTestDB()
require.NoError(t, err)
defer cleanUp()
backend := fullDB.ChannelStateDB().backend
err = kvdb.Update(backend, func(tx kvdb.RwTx) error {
// Create the buckets.
chanBucket, _, err := createTestRevocatoinLogBuckets(tx)
require.NoError(t, err)
// Create the buckets again should give us an error.
_, _, err = createTestRevocatoinLogBuckets(tx)
require.ErrorIs(t, err, kvdb.ErrBucketExists)
// Delete both buckets.
err = deleteLogBucket(chanBucket)
require.NoError(t, err)
// Create the buckets again should give us NO error.
_, _, err = createTestRevocatoinLogBuckets(tx)
return err
}, func() {})
require.NoError(t, err)
}
func TestPutRevocationLog(t *testing.T) {
t.Parallel()
// Create a test commit that has a large htlc output index.
testHtlc := HTLC{OutputIndex: math.MaxUint16 + 1}
testCommit := testChannelCommit
testCommit.Htlcs = []HTLC{testHtlc}
// Create a test commit that has a dust HTLC.
testHtlcDust := HTLC{OutputIndex: -1}
testCommitDust := testChannelCommit
testCommitDust.Htlcs = append(testCommitDust.Htlcs, testHtlcDust)
testCases := []struct {
name string
commit ChannelCommitment
ourIndex uint32
theirIndex uint32
expectedErr error
expectedLog RevocationLog
}{
{
// Test a normal put operation.
name: "successful put",
commit: testChannelCommit,
ourIndex: 0,
theirIndex: 1,
expectedErr: nil,
expectedLog: testRevocationLog,
},
{
// Test our index too big.
name: "our index too big",
commit: testChannelCommit,
ourIndex: math.MaxUint16 + 1,
theirIndex: 1,
expectedErr: ErrOutputIndexTooBig,
expectedLog: RevocationLog{},
},
{
// Test their index too big.
name: "their index too big",
commit: testChannelCommit,
ourIndex: 0,
theirIndex: math.MaxUint16 + 1,
expectedErr: ErrOutputIndexTooBig,
expectedLog: RevocationLog{},
},
{
// Test htlc output index too big.
name: "htlc index too big",
commit: testCommit,
ourIndex: 0,
theirIndex: 1,
expectedErr: ErrOutputIndexTooBig,
expectedLog: RevocationLog{},
},
{
// Test dust htlc is not saved.
name: "dust htlc not saved",
commit: testCommitDust,
ourIndex: 0,
theirIndex: 1,
expectedErr: nil,
expectedLog: testRevocationLog,
},
}
for _, tc := range testCases {
tc := tc
fullDB, cleanUp, err := MakeTestDB()
require.NoError(t, err)
defer cleanUp()
backend := fullDB.ChannelStateDB().backend
// Construct the testing db transaction.
dbTx := func(tx kvdb.RwTx) (RevocationLog, error) {
// Create the buckets.
_, bucket, err := createTestRevocatoinLogBuckets(tx)
require.NoError(t, err)
// Save the log.
err = putRevocationLog(
bucket, &tc.commit, tc.ourIndex, tc.theirIndex,
)
if err != nil {
return RevocationLog{}, err
}
// Read the saved log.
return fetchRevocationLog(
bucket, tc.commit.CommitHeight,
)
}
t.Run(tc.name, func(t *testing.T) {
var rl RevocationLog
err := kvdb.Update(backend, func(tx kvdb.RwTx) error {
record, err := dbTx(tx)
rl = record
return err
}, func() {})
require.Equal(t, tc.expectedErr, err)
require.Equal(t, tc.expectedLog, rl)
})
}
}
func TestFetchRevocationLogCompatible(t *testing.T) {
t.Parallel()
knownHeight := testChannelCommit.CommitHeight
unknownHeight := knownHeight + 1
logKey := makeLogKey(knownHeight)
testCases := []struct {
name string
updateNum uint64
expectedErr error
createRl bool
createCommit bool
expectRl bool
expectCommit bool
}{
{
// Test we can fetch the new log.
name: "fetch new log",
updateNum: knownHeight,
expectedErr: nil,
createRl: true,
expectRl: true,
},
{
// Test we can fetch the legacy log.
name: "fetch legacy log",
updateNum: knownHeight,
expectedErr: nil,
createCommit: true,
expectCommit: true,
},
{
// Test we only fetch the new log when both logs exist.
name: "fetch new log only",
updateNum: knownHeight,
expectedErr: nil,
createRl: true,
createCommit: true,
expectRl: true,
},
{
// Test no past deltas when the buckets do not exist.
name: "no buckets created",
updateNum: unknownHeight,
expectedErr: ErrNoPastDeltas,
},
{
// Test no logs found when the height is unknown.
name: "no log found",
updateNum: unknownHeight,
expectedErr: ErrLogEntryNotFound,
createRl: true,
createCommit: true,
},
}
for _, tc := range testCases {
tc := tc
fullDB, cleanUp, err := MakeTestDB()
require.NoError(t, err)
defer cleanUp()
backend := fullDB.ChannelStateDB().backend
var (
rl *RevocationLog
commit *ChannelCommitment
)
// Setup the buckets and fill the test data if specified.
err = kvdb.Update(backend, func(tx kvdb.RwTx) error {
// Create the root bucket.
cb, err := tx.CreateTopLevelBucket(openChannelBucket)
require.NoError(t, err)
// Create the revocation log if specified.
if tc.createRl {
lb, err := cb.CreateBucket(revocationLogBucket)
require.NoError(t, err)
err = putRevocationLog(
lb, &testChannelCommit, 0, 1,
)
require.NoError(t, err)
}
// Create the channel commit if specified.
if tc.createCommit {
legacyBucket, err := cb.CreateBucket(
revocationLogBucketDeprecated,
)
require.NoError(t, err)
buf := bytes.NewBuffer([]byte{})
err = serializeChanCommit(
buf, &testChannelCommit,
)
require.NoError(t, err)
err = legacyBucket.Put(logKey[:], buf.Bytes())
require.NoError(t, err)
}
return nil
}, func() {})
// Construct the testing db transaction.
dbTx := func(tx kvdb.RTx) error {
cb := tx.ReadBucket(openChannelBucket)
rl, commit, err = fetchRevocationLogCompatible(
cb, tc.updateNum,
)
return err
}
t.Run(tc.name, func(t *testing.T) {
err := kvdb.View(backend, dbTx, func() {})
require.Equal(t, tc.expectedErr, err)
// Check the expected revocation log is returned.
if tc.expectRl {
require.NotNil(t, rl)
} else {
require.Nil(t, rl)
}
// Check the expected channel commit is returned.
if tc.expectCommit {
require.NotNil(t, commit)
} else {
require.Nil(t, commit)
}
})
}
}
func createTestRevocatoinLogBuckets(tx kvdb.RwTx) (kvdb.RwBucket,
kvdb.RwBucket, error) {
chanBucket, err := tx.CreateTopLevelBucket(openChannelBucket)
if err != nil {
return nil, nil, err
}
logBucket, err := chanBucket.CreateBucket(revocationLogBucket)
if err != nil {
return nil, nil, err
}
_, err = chanBucket.CreateBucket(revocationLogBucketDeprecated)
if err != nil {
return nil, nil, err
}
return chanBucket, logBucket, nil
}

View File

@ -47,11 +47,6 @@ var (
// procedure, we can recover and continue from the persisted state.
retributionBucket = []byte("retribution")
// justiceTxnBucket holds the finalized justice transactions for all
// breached contracts. Entries are added to the justice txn bucket just
// before broadcasting the sweep txn.
justiceTxnBucket = []byte("justice-txn")
// errBrarShuttingDown is an error returned if the breacharbiter has
// been signalled to exit.
errBrarShuttingDown = errors.New("breacharbiter shutting down")
@ -1248,7 +1243,7 @@ func newRetributionInfo(chanPoint *wire.OutPoint,
}
return &retributionInfo{
commitHash: breachInfo.BreachTransaction.TxHash(),
commitHash: breachInfo.BreachTxHash,
chainHash: breachInfo.ChainHash,
chanPoint: *chanPoint,
breachedOutputs: breachedOutputs,
@ -1578,18 +1573,7 @@ func (rs *RetributionStore) Remove(chanPoint *wire.OutPoint) error {
// Remove the persisted retribution info and finalized justice
// transaction.
if err := retBucket.Delete(chanBytes); err != nil {
return err
}
// If we have not finalized this channel breach, we can exit
// early.
justiceBkt := tx.ReadWriteBucket(justiceTxnBucket)
if justiceBkt == nil {
return nil
}
return justiceBkt.Delete(chanBytes)
return retBucket.Delete(chanBytes)
}, func() {})
}

View File

@ -1051,7 +1051,7 @@ func TestBreachHandoffSuccess(t *testing.T) {
processACK <- brarErr
},
BreachRetribution: &lnwallet.BreachRetribution{
BreachTransaction: bobClose.CloseTx,
BreachTxHash: bobClose.CloseTx.TxHash(),
LocalOutputSignDesc: &input.SignDescriptor{
Output: &wire.TxOut{
PkScript: breachKeys[0],
@ -1085,7 +1085,7 @@ func TestBreachHandoffSuccess(t *testing.T) {
processACK <- brarErr
},
BreachRetribution: &lnwallet.BreachRetribution{
BreachTransaction: bobClose.CloseTx,
BreachTxHash: bobClose.CloseTx.TxHash(),
LocalOutputSignDesc: &input.SignDescriptor{
Output: &wire.TxOut{
PkScript: breachKeys[0],
@ -1137,7 +1137,7 @@ func TestBreachHandoffFail(t *testing.T) {
processACK <- brarErr
},
BreachRetribution: &lnwallet.BreachRetribution{
BreachTransaction: bobClose.CloseTx,
BreachTxHash: bobClose.CloseTx.TxHash(),
LocalOutputSignDesc: &input.SignDescriptor{
Output: &wire.TxOut{
PkScript: breachKeys[0],
@ -1179,7 +1179,7 @@ func TestBreachHandoffFail(t *testing.T) {
processACK <- brarErr
},
BreachRetribution: &lnwallet.BreachRetribution{
BreachTransaction: bobClose.CloseTx,
BreachTxHash: bobClose.CloseTx.TxHash(),
LocalOutputSignDesc: &input.SignDescriptor{
Output: &wire.TxOut{
PkScript: breachKeys[0],
@ -1621,7 +1621,7 @@ func testBreachSpends(t *testing.T, test breachTest) {
// Notify the breach arbiter about the breach.
retribution, err := lnwallet.NewBreachRetribution(
alice.State(), height, 1,
alice.State(), height, 1, forceCloseTx,
)
if err != nil {
t.Fatalf("unable to create breach retribution: %v", err)
@ -1837,7 +1837,7 @@ func TestBreachDelayedJusticeConfirmation(t *testing.T) {
// Notify the breach arbiter about the breach.
retribution, err := lnwallet.NewBreachRetribution(
alice.State(), height, uint32(blockHeight),
alice.State(), height, uint32(blockHeight), forceCloseTx,
)
if err != nil {
t.Fatalf("unable to create breach retribution: %v", err)

View File

@ -729,7 +729,6 @@ func (c *chainWatcher) handleKnownRemoteState(
commitTxBroadcast := commitSpend.SpendingTx
commitHash := commitTxBroadcast.TxHash()
spendHeight := uint32(commitSpend.SpendingHeight)
switch {
// If the spending transaction matches the current latest state, then
@ -780,10 +779,22 @@ func (c *chainWatcher) handleKnownRemoteState(
return true, nil
}
// This is neither a remote force close or a "future" commitment, we
// now check whether it's a remote breach and properly handle it.
return c.handlePossibleBreach(commitSpend, broadcastStateNum, chainSet)
}
// handlePossibleBreach checks whether the remote has breached and dispatches a
// breach resolution to claim funds.
func (c *chainWatcher) handlePossibleBreach(commitSpend *chainntnfs.SpendDetail,
broadcastStateNum uint64, chainSet *chainSet) (bool, error) {
// We check if we have a revoked state at this state num that matches
// the spend transaction.
spendHeight := uint32(commitSpend.SpendingHeight)
retribution, err := lnwallet.NewBreachRetribution(
c.cfg.chanState, broadcastStateNum, spendHeight,
commitSpend.SpendingTx,
)
switch {
@ -801,7 +812,8 @@ func (c *chainWatcher) handleKnownRemoteState(
// We found a revoked state at this height, but it could still be our
// own broadcasted state we are looking at. Therefore check that the
// commit matches before assuming it was a breach.
if retribution.BreachTransaction.TxHash() != commitHash {
commitHash := commitSpend.SpendingTx.TxHash()
if retribution.BreachTxHash != commitHash {
return false, nil
}

View File

@ -278,6 +278,11 @@ from occurring that would result in an erroneous force close.](https://github.co
* Add [htlc expiry protection](https://github.com/lightningnetwork/lnd/pull/6212)
to the htlc interceptor API.
* In order to safely advance commitment state, `lnd` saves the past states and
constructs a justice transaction in case of a remote breach. The states can
grow very large on disk given a busy operating channel, [which is now changed
with a space deduction over (at least) 96 percents.](https://github.com/lightningnetwork/lnd/pull/6347)
## Documentation
* Improved instructions on [how to build lnd for mobile](https://github.com/lightningnetwork/lnd/pull/6085).

4
go.mod
View File

@ -40,12 +40,12 @@ require (
github.com/lightningnetwork/lnd/kvdb v1.3.1
github.com/lightningnetwork/lnd/queue v1.1.0
github.com/lightningnetwork/lnd/ticker v1.1.0
github.com/lightningnetwork/lnd/tlv v1.0.2
github.com/lightningnetwork/lnd/tlv v1.0.3
github.com/lightningnetwork/lnd/tor v1.0.0
github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796
github.com/miekg/dns v1.1.43
github.com/prometheus/client_golang v1.11.0
github.com/stretchr/testify v1.7.0
github.com/stretchr/testify v1.7.1
github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02
github.com/urfave/cli v1.22.4
go.etcd.io/etcd/client/pkg/v3 v3.5.0

6
go.sum
View File

@ -489,8 +489,9 @@ github.com/lightningnetwork/lnd/queue v1.1.0/go.mod h1:YTkTVZCxz8tAYreH27EO3s857
github.com/lightningnetwork/lnd/ticker v1.0.0/go.mod h1:iaLXJiVgI1sPANIF2qYYUJXjoksPNvGNYowB8aRbpX0=
github.com/lightningnetwork/lnd/ticker v1.1.0 h1:ShoBiRP3pIxZHaETndfQ5kEe+S4NdAY1hiX7YbZ4QE4=
github.com/lightningnetwork/lnd/ticker v1.1.0/go.mod h1:ubqbSVCn6RlE0LazXuBr7/Zi6QT0uQo++OgIRBxQUrk=
github.com/lightningnetwork/lnd/tlv v1.0.2 h1:LG7H3Uw/mHYGnEeHRPg+STavAH+UsFvuBflD0PzcYFQ=
github.com/lightningnetwork/lnd/tlv v1.0.2/go.mod h1:fICAfsqk1IOsC1J7G9IdsWX1EqWRMqEDCNxZJSKr9C4=
github.com/lightningnetwork/lnd/tlv v1.0.3 h1:0xBZcPuXagP6f7TY/RnLNR4igE21ov6qUdTr5NyvhhI=
github.com/lightningnetwork/lnd/tlv v1.0.3/go.mod h1:dzR/aZetBri+ZY/fHbwV06fNn/3UID6htQzbHfREFdo=
github.com/lightningnetwork/lnd/tor v1.0.0 h1:wvEc7I+Y7IOtPglVP3cVBbYhiVhc7uTd7cMF9gQRzwA=
github.com/lightningnetwork/lnd/tor v1.0.0/go.mod h1:RDtaAdwfAm+ONuPYwUhNIH1RAvKPv+75lHPOegUcz64=
github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796 h1:sjOGyegMIhvgfq5oaue6Td+hxZuf3tDC8lAPrFldqFw=
@ -639,8 +640,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=

View File

@ -1882,7 +1882,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
// If both commitment chains are fully synced from our PoV,
// then we don't need to reply with a signature as both sides
// already have a commitment with the latest accepted.
if !l.channel.OweCommitment(true) {
if !l.channel.OweCommitment() {
return
}
@ -1897,9 +1897,16 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
// We've received a revocation from the remote chain, if valid,
// this moves the remote chain forward, and expands our
// revocation window.
fwdPkg, adds, settleFails, remoteHTLCs, err := l.channel.ReceiveRevocation(
msg,
)
//
// Before advancing our remote chain, we will record the
// current commit tx, which is used by the TowerClient to
// create backups.
oldCommitTx := l.channel.State().RemoteCommitment.CommitTx
// We now process the message and advance our remote commit
// chain.
fwdPkg, adds, settleFails, remoteHTLCs, err := l.channel.
ReceiveRevocation(msg)
if err != nil {
// TODO(halseth): force close?
l.fail(LinkFailureError{code: ErrInvalidRevocation},
@ -1928,10 +1935,13 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
}
// If we have a tower client for this channel type, we'll
// create a backup for the current state.
if l.cfg.TowerClient != nil {
state := l.channel.State()
breachInfo, err := lnwallet.NewBreachRetribution(
state, state.RemoteCommitment.CommitHeight-1, 0,
// OldCommitTx is the breaching tx at height-1.
oldCommitTx,
)
if err != nil {
l.fail(LinkFailureError{code: ErrInternalError},
@ -1967,7 +1977,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
// processRemoteAdds. Also in case there are no local updates,
// but there are still remote updates that are not in the remote
// commit tx yet, send out an update.
if l.channel.OweCommitment(true) {
if l.channel.OweCommitment() {
if !l.updateCommitTxOrFail() {
return
}

View File

@ -340,8 +340,8 @@ func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) {
// testListChannels checks that the response from ListChannels is correct. It
// tests the values in all ChannelConstraints are returned as expected. Once
// ListChannels becomes mature, a test against all fields in ListChannels should
// be performed.
// ListChannels becomes mature, a test against all fields in ListChannels
// should be performed.
func testListChannels(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
@ -369,8 +369,8 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) {
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice)
// Open a channel with 100k satoshis between Alice and Bob with Alice
// being the sole funder of the channel. The minial HTLC amount is set to
// 4200 msats.
// being the sole funder of the channel. The minial HTLC amount is set
// to 4200 msats.
const customizedMinHtlc = 4200
chanAmt := btcutil.Amount(100000)
@ -414,11 +414,19 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) {
// Check the returned response is correct.
aliceChannel := resp.Channels[0]
// Since Alice is the initiator, she pays the commit fee.
aliceBalance := int64(chanAmt) - aliceChannel.CommitFee
// Check the balance related fields are correct.
require.Equal(t.t, aliceBalance, aliceChannel.LocalBalance)
require.Zero(t.t, aliceChannel.RemoteBalance)
require.Zero(t.t, aliceChannel.PushAmountSat)
// Calculate the dust limit we'll use for the test.
dustLimit := lnwallet.DustLimitForSize(input.UnknownWitnessSize)
// defaultConstraints is a ChannelConstraints with default values. It is
// used to test against Alice's local channel constraints.
// defaultConstraints is a ChannelConstraints with default values. It
// is used to test against Alice's local channel constraints.
defaultConstraints := &lnrpc.ChannelConstraints{
CsvDelay: 4,
ChanReserveSat: 1000,
@ -464,9 +472,14 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) {
)
}
// Check channel constraints match. Alice's local channel constraint should
// be equal to Bob's remote channel constraint, and her remote one should
// be equal to Bob's local one.
// Check the balance related fields are correct.
require.Equal(t.t, aliceBalance, bobChannel.RemoteBalance)
require.Zero(t.t, bobChannel.LocalBalance)
require.Zero(t.t, bobChannel.PushAmountSat)
// Check channel constraints match. Alice's local channel constraint
// should be equal to Bob's remote channel constraint, and her remote
// one should be equal to Bob's local one.
assertChannelConstraintsEqual(
t, aliceChannel.LocalConstraints, bobChannel.RemoteConstraints,
)

View File

@ -96,6 +96,17 @@ var (
// both parties can retrieve their funds.
ErrCommitSyncRemoteDataLoss = fmt.Errorf("possible remote commitment " +
"state data loss")
// ErrNoRevocationLogFound is returned when both the returned logs are
// nil from querying the revocation log bucket. In theory this should
// never happen as the query will return `ErrLogEntryNotFound`, yet
// we'd still perform a sanity check to make sure at least one of the
// logs is non-nil.
ErrNoRevocationLogFound = errors.New("no revocation log found")
// ErrOutputIndexOutOfRange is returned when an output index is greater
// than or equal to the length of a given transaction's outputs.
ErrOutputIndexOutOfRange = errors.New("output index is out of range")
)
// ErrCommitSyncLocalDataLoss is returned in the case that we receive a valid
@ -1036,26 +1047,29 @@ type updateLog struct {
logIndex uint64
// htlcCounter is a monotonically increasing integer that tracks the
// total number of offered HTLC's by the owner of this update log. We
// use a distinct index for this purpose, as update's that remove
// entries from the log will be indexed using this counter.
// total number of offered HTLC's by the owner of this update log,
// hence the `Add` update type. We use a distinct index for this
// purpose, as update's that remove entries from the log will be
// indexed using this counter.
htlcCounter uint64
// List is the updatelog itself, we embed this value so updateLog has
// access to all the method of a list.List.
*list.List
// updateIndex is an index that maps a particular entries index to the
// list element within the list.List above.
// updateIndex maps a `logIndex` to a particular update entry. It
// deals with the four update types:
// `Fail|MalformedFail|Settle|FeeUpdate`
updateIndex map[uint64]*list.Element
// offerIndex is an index that maps the counter for offered HTLC's to
// their list element within the main list.List.
// htlcIndex maps a `htlcCounter` to an offered HTLC entry, hence the
// `Add` update.
htlcIndex map[uint64]*list.Element
// modifiedHtlcs is a set that keeps track of all the current modified
// htlcs. A modified HTLC is one that's present in the log, and has as
// a pending fail or settle that's attempting to consume it.
// htlcs, hence update types `Fail|MalformedFail|Settle`. A modified
// HTLC is one that's present in the log, and has as a pending fail or
// settle that's attempting to consume it.
modifiedHtlcs map[uint64]struct{}
}
@ -2210,10 +2224,10 @@ type HtlcRetribution struct {
// transaction. The BreachRetribution is then sent over the ContractBreach
// channel in order to allow the subscriber of the channel to dispatch justice.
type BreachRetribution struct {
// BreachTransaction is the transaction which breached the channel
// BreachTxHash is the transaction hash which breached the channel
// contract by spending from the funding multi-sig with a revoked
// commitment transaction.
BreachTransaction *wire.MsgTx
BreachTxHash chainhash.Hash
// BreachHeight records the block height confirming the breach
// transaction, used as a height hint when registering for
@ -2228,13 +2242,9 @@ type BreachRetribution struct {
// RevokedStateNum is the revoked state number which was broadcast.
RevokedStateNum uint64
// PendingHTLCs is a slice of the HTLCs which were pending at this
// point within the channel's history transcript.
PendingHTLCs []channeldb.HTLC
// LocalOutputSignDesc is a SignDescriptor which is capable of
// generating the signature necessary to sweep the output within the
// BreachTransaction that pays directly us.
// breach transaction that pays directly us.
//
// NOTE: A nil value indicates that the local output is considered dust
// according to the remote party's dust limit.
@ -2279,16 +2289,22 @@ type BreachRetribution struct {
// passed channel, at a particular revoked state number, and one which targets
// the passed commitment transaction.
func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64,
breachHeight uint32) (*BreachRetribution, error) {
breachHeight uint32, spendTx *wire.MsgTx) (*BreachRetribution, error) {
// Query the on-disk revocation log for the snapshot which was recorded
// at this particular state num.
revokedSnapshot, err := chanState.FindPreviousState(stateNum)
// at this particular state num. Based on whether a legacy revocation
// log is returned or not, we will process them differently.
revokedLog, revokedLogLegacy, err := chanState.FindPreviousState(
stateNum,
)
if err != nil {
return nil, err
}
commitHash := revokedSnapshot.CommitTx.TxHash()
// Sanity check that at least one of the logs is returned.
if revokedLog == nil && revokedLogLegacy == nil {
return nil, ErrNoRevocationLogFound
}
// With the state number broadcast known, we can now derive/restore the
// proper revocation preimage necessary to sweep the remote party's
@ -2311,22 +2327,14 @@ func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64,
// Next, reconstruct the scripts as they were present at this state
// number so we can have the proper witness script to sign and include
// within the final witness.
theirDelay := uint32(chanState.RemoteChanCfg.CsvDelay)
isRemoteInitiator := !chanState.IsInitiator
var leaseExpiry uint32
if chanState.ChanType.HasLeaseExpiration() {
leaseExpiry = chanState.ThawHeight
}
theirScript, err := CommitScriptToSelf(
chanState.ChanType, isRemoteInitiator, keyRing.ToLocalKey,
keyRing.RevocationKey, theirDelay, leaseExpiry,
)
if err != nil {
return nil, err
}
// Since it is the remote breach we are reconstructing, the output going
// to us will be a to-remote script with our local params.
// Since it is the remote breach we are reconstructing, the output
// going to us will be a to-remote script with our local params.
isRemoteInitiator := !chanState.IsInitiator
ourScript, ourDelay, err := CommitScriptToRemote(
chanState.ChanType, isRemoteInitiator, keyRing.ToRemoteKey,
leaseExpiry,
@ -2335,15 +2343,248 @@ func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64,
return nil, err
}
// In order to fully populate the breach retribution struct, we'll need
// to find the exact index of the commitment outputs.
theirDelay := uint32(chanState.RemoteChanCfg.CsvDelay)
theirScript, err := CommitScriptToSelf(
chanState.ChanType, isRemoteInitiator, keyRing.ToLocalKey,
keyRing.RevocationKey, theirDelay, leaseExpiry,
)
if err != nil {
return nil, err
}
// Define an empty breach retribution that will be overwritten based on
// different version of the revocation log found.
var br *BreachRetribution
// Define our and their amounts, that will be overwritten below.
var ourAmt, theirAmt int64
// If the returned *RevocationLog is non-nil, use it to derive the info
// we need.
if revokedLog != nil {
br, ourAmt, theirAmt, err = createBreachRetribution(
revokedLog, spendTx, chanState, keyRing,
commitmentSecret, leaseExpiry,
)
if err != nil {
return nil, err
}
} else {
// The returned revocation log is in legacy format, which is a
// *ChannelCommitment.
//
// NOTE: this branch is kept for compatibility such that for
// old nodes which refuse to migrate the legacy revocation log
// data can still function. This branch can be deleted once we
// are confident that no legacy format is in use.
br, ourAmt, theirAmt, err = createBreachRetributionLegacy(
revokedLogLegacy, chanState, keyRing, commitmentSecret,
ourScript, theirScript, leaseExpiry,
)
if err != nil {
return nil, err
}
}
// Conditionally instantiate a sign descriptor for each of the
// commitment outputs. If either is considered dust using the remote
// party's dust limit, the respective sign descriptor will be nil.
//
// If our balance exceeds the remote party's dust limit, instantiate
// the sign descriptor for our output.
if ourAmt >= int64(chanState.RemoteChanCfg.DustLimit) {
br.LocalOutputSignDesc = &input.SignDescriptor{
SingleTweak: keyRing.LocalCommitKeyTweak,
KeyDesc: chanState.LocalChanCfg.PaymentBasePoint,
WitnessScript: ourScript.WitnessScript,
Output: &wire.TxOut{
PkScript: ourScript.PkScript,
Value: ourAmt,
},
HashType: txscript.SigHashAll,
}
}
// Similarly, if their balance exceeds the remote party's dust limit,
// assemble the sign descriptor for their output, which we can sweep.
if theirAmt >= int64(chanState.RemoteChanCfg.DustLimit) {
br.RemoteOutputSignDesc = &input.SignDescriptor{
KeyDesc: chanState.LocalChanCfg.
RevocationBasePoint,
DoubleTweak: commitmentSecret,
WitnessScript: theirScript.WitnessScript,
Output: &wire.TxOut{
PkScript: theirScript.PkScript,
Value: theirAmt,
},
HashType: txscript.SigHashAll,
}
}
// Finally, with all the necessary data constructed, we can pad the
// BreachRetribution struct which houses all the data necessary to
// swiftly bring justice to the cheating remote party.
br.BreachHeight = breachHeight
br.RevokedStateNum = stateNum
br.LocalDelay = ourDelay
br.RemoteDelay = theirDelay
return br, nil
}
// createHtlcRetribution is a helper function to construct an HtlcRetribution
// based on the passed params.
func createHtlcRetribution(chanState *channeldb.OpenChannel,
keyRing *CommitmentKeyRing, commitHash chainhash.Hash,
commitmentSecret *btcec.PrivateKey, leaseExpiry uint32,
htlc *channeldb.HTLCEntry) (HtlcRetribution, error) {
var emptyRetribution HtlcRetribution
theirDelay := uint32(chanState.RemoteChanCfg.CsvDelay)
isRemoteInitiator := !chanState.IsInitiator
// We'll generate the original second level witness script now, as
// we'll need it if we're revoking an HTLC output on the remote
// commitment transaction, and *they* go to the second level.
secondLevelScript, err := SecondLevelHtlcScript(
chanState.ChanType, isRemoteInitiator,
keyRing.RevocationKey, keyRing.ToLocalKey, theirDelay,
leaseExpiry,
)
if err != nil {
return emptyRetribution, err
}
// If this is an incoming HTLC, then this means that they were the
// sender of the HTLC (relative to us). So we'll re-generate the sender
// HTLC script. Otherwise, is this was an outgoing HTLC that we sent,
// then from the PoV of the remote commitment state, they're the
// receiver of this HTLC.
htlcPkScript, htlcWitnessScript, err := genHtlcScript(
chanState.ChanType, htlc.Incoming, false,
htlc.RefundTimeout, htlc.RHash, keyRing,
)
if err != nil {
return emptyRetribution, err
}
return HtlcRetribution{
SignDesc: input.SignDescriptor{
KeyDesc: chanState.LocalChanCfg.
RevocationBasePoint,
DoubleTweak: commitmentSecret,
WitnessScript: htlcWitnessScript,
Output: &wire.TxOut{
PkScript: htlcPkScript,
Value: int64(htlc.Amt),
},
HashType: txscript.SigHashAll,
},
OutPoint: wire.OutPoint{
Hash: commitHash,
Index: uint32(htlc.OutputIndex),
},
SecondLevelWitnessScript: secondLevelScript.WitnessScript,
IsIncoming: htlc.Incoming,
}, nil
}
// createBreachRetribution creates a partially initiated BreachRetribution
// using a RevocationLog. Returns the constructed retribution, our amount,
// their amount, and a possible non-nil error.
func createBreachRetribution(revokedLog *channeldb.RevocationLog,
spendTx *wire.MsgTx, chanState *channeldb.OpenChannel,
keyRing *CommitmentKeyRing, commitmentSecret *btcec.PrivateKey,
leaseExpiry uint32) (*BreachRetribution, int64, int64, error) {
commitHash := revokedLog.CommitTxHash
// Create the htlc retributions.
htlcRetributions := make([]HtlcRetribution, len(revokedLog.HTLCEntries))
for i, htlc := range revokedLog.HTLCEntries {
hr, err := createHtlcRetribution(
chanState, keyRing, commitHash,
commitmentSecret, leaseExpiry, htlc,
)
if err != nil {
return nil, 0, 0, err
}
htlcRetributions[i] = hr
}
var ourAmt, theirAmt int64
// Construct the our outpoint.
ourOutpoint := wire.OutPoint{
Hash: commitHash,
}
if revokedLog.OurOutputIndex != channeldb.OutputIndexEmpty {
ourOutpoint.Index = uint32(revokedLog.OurOutputIndex)
// Sanity check that OurOutputIndex is within range.
if int(ourOutpoint.Index) >= len(spendTx.TxOut) {
return nil, 0, 0, fmt.Errorf("%w: ours=%v, "+
"len(TxOut)=%v", ErrOutputIndexOutOfRange,
ourOutpoint.Index, len(spendTx.TxOut),
)
}
// Read the amounts from the breach transaction.
//
// NOTE: ourAmt here includes commit fee and anchor amount(if
// enabled).
ourAmt = spendTx.TxOut[ourOutpoint.Index].Value
}
// Construct the their outpoint.
theirOutpoint := wire.OutPoint{
Hash: commitHash,
}
if revokedLog.TheirOutputIndex != channeldb.OutputIndexEmpty {
theirOutpoint.Index = uint32(revokedLog.TheirOutputIndex)
// Sanity check that TheirOutputIndex is within range.
if int(revokedLog.TheirOutputIndex) >= len(spendTx.TxOut) {
return nil, 0, 0, fmt.Errorf("%w: theirs=%v, "+
"len(TxOut)=%v", ErrOutputIndexOutOfRange,
revokedLog.TheirOutputIndex, len(spendTx.TxOut),
)
}
// Read the amounts from the breach transaction.
theirAmt = spendTx.TxOut[theirOutpoint.Index].Value
}
return &BreachRetribution{
BreachTxHash: commitHash,
ChainHash: chanState.ChainHash,
LocalOutpoint: ourOutpoint,
RemoteOutpoint: theirOutpoint,
HtlcRetributions: htlcRetributions,
KeyRing: keyRing,
}, ourAmt, theirAmt, nil
}
// createBreachRetributionLegacy creates a partially initiated
// BreachRetribution using a ChannelCommitment. Returns the constructed
// retribution, our amount, their amount, and a possible non-nil error.
func createBreachRetributionLegacy(revokedLog *channeldb.ChannelCommitment,
chanState *channeldb.OpenChannel, keyRing *CommitmentKeyRing,
commitmentSecret *btcec.PrivateKey,
ourScript, theirScript *ScriptInfo,
leaseExpiry uint32) (*BreachRetribution, int64, int64, error) {
commitHash := revokedLog.CommitTx.TxHash()
ourOutpoint := wire.OutPoint{
Hash: commitHash,
}
theirOutpoint := wire.OutPoint{
Hash: commitHash,
}
for i, txOut := range revokedSnapshot.CommitTx.TxOut {
// In order to fully populate the breach retribution struct, we'll need
// to find the exact index of the commitment outputs.
for i, txOut := range revokedLog.CommitTx.TxOut {
switch {
case bytes.Equal(txOut.PkScript, ourScript.PkScript):
ourOutpoint.Index = uint32(i)
@ -2352,127 +2593,52 @@ func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64,
}
}
// Conditionally instantiate a sign descriptor for each of the
// commitment outputs. If either is considered dust using the remote
// party's dust limit, the respective sign descriptor will be nil.
var (
ourSignDesc *input.SignDescriptor
theirSignDesc *input.SignDescriptor
)
// Compute the balances in satoshis.
ourAmt := revokedSnapshot.LocalBalance.ToSatoshis()
theirAmt := revokedSnapshot.RemoteBalance.ToSatoshis()
// If our balance exceeds the remote party's dust limit, instantiate
// the sign descriptor for our output.
if ourAmt >= chanState.RemoteChanCfg.DustLimit {
ourSignDesc = &input.SignDescriptor{
SingleTweak: keyRing.LocalCommitKeyTweak,
KeyDesc: chanState.LocalChanCfg.PaymentBasePoint,
WitnessScript: ourScript.WitnessScript,
Output: &wire.TxOut{
PkScript: ourScript.PkScript,
Value: int64(ourAmt),
},
HashType: txscript.SigHashAll,
}
}
// Similarly, if their balance exceeds the remote party's dust limit,
// assemble the sign descriptor for their output, which we can sweep.
if theirAmt >= chanState.RemoteChanCfg.DustLimit {
theirSignDesc = &input.SignDescriptor{
KeyDesc: chanState.LocalChanCfg.RevocationBasePoint,
DoubleTweak: commitmentSecret,
WitnessScript: theirScript.WitnessScript,
Output: &wire.TxOut{
PkScript: theirScript.PkScript,
Value: int64(theirAmt),
},
HashType: txscript.SigHashAll,
}
}
// With the commitment outputs located, we'll now generate all the
// retribution structs for each of the HTLC transactions active on the
// remote commitment transaction.
htlcRetributions := make([]HtlcRetribution, 0, len(revokedSnapshot.Htlcs))
for _, htlc := range revokedSnapshot.Htlcs {
htlcRetributions := make([]HtlcRetribution, len(revokedLog.Htlcs))
for i, htlc := range revokedLog.Htlcs {
// If the HTLC is dust, then we'll skip it as it doesn't have
// an output on the commitment transaction.
if HtlcIsDust(
chanState.ChanType, htlc.Incoming, false,
chainfee.SatPerKWeight(revokedSnapshot.FeePerKw),
htlc.Amt.ToSatoshis(), chanState.RemoteChanCfg.DustLimit,
chainfee.SatPerKWeight(revokedLog.FeePerKw),
htlc.Amt.ToSatoshis(),
chanState.RemoteChanCfg.DustLimit,
) {
continue
}
// We'll generate the original second level witness script now,
// as we'll need it if we're revoking an HTLC output on the
// remote commitment transaction, and *they* go to the second
// level.
secondLevelScript, err := SecondLevelHtlcScript(
chanState.ChanType, isRemoteInitiator,
keyRing.RevocationKey, keyRing.ToLocalKey, theirDelay,
leaseExpiry,
entry := &channeldb.HTLCEntry{
RHash: htlc.RHash,
RefundTimeout: htlc.RefundTimeout,
OutputIndex: uint16(htlc.OutputIndex),
Incoming: htlc.Incoming,
Amt: htlc.Amt.ToSatoshis(),
}
hr, err := createHtlcRetribution(
chanState, keyRing, commitHash,
commitmentSecret, leaseExpiry, entry,
)
if err != nil {
return nil, err
return nil, 0, 0, err
}
// If this is an incoming HTLC, then this means that they were
// the sender of the HTLC (relative to us). So we'll
// re-generate the sender HTLC script. Otherwise, is this was
// an outgoing HTLC that we sent, then from the PoV of the
// remote commitment state, they're the receiver of this HTLC.
htlcPkScript, htlcWitnessScript, err := genHtlcScript(
chanState.ChanType, htlc.Incoming, false,
htlc.RefundTimeout, htlc.RHash, keyRing,
)
if err != nil {
return nil, err
}
htlcRetributions = append(htlcRetributions, HtlcRetribution{
SignDesc: input.SignDescriptor{
KeyDesc: chanState.LocalChanCfg.RevocationBasePoint,
DoubleTweak: commitmentSecret,
WitnessScript: htlcWitnessScript,
Output: &wire.TxOut{
PkScript: htlcPkScript,
Value: int64(htlc.Amt.ToSatoshis()),
},
HashType: txscript.SigHashAll,
},
OutPoint: wire.OutPoint{
Hash: commitHash,
Index: uint32(htlc.OutputIndex),
},
SecondLevelWitnessScript: secondLevelScript.WitnessScript,
IsIncoming: htlc.Incoming,
})
htlcRetributions[i] = hr
}
// Finally, with all the necessary data constructed, we can create the
// BreachRetribution struct which houses all the data necessary to
// swiftly bring justice to the cheating remote party.
// Compute the balances in satoshis.
ourAmt := int64(revokedLog.LocalBalance.ToSatoshis())
theirAmt := int64(revokedLog.RemoteBalance.ToSatoshis())
return &BreachRetribution{
ChainHash: chanState.ChainHash,
BreachTransaction: revokedSnapshot.CommitTx,
BreachHeight: breachHeight,
RevokedStateNum: stateNum,
PendingHTLCs: revokedSnapshot.Htlcs,
LocalOutpoint: ourOutpoint,
LocalOutputSignDesc: ourSignDesc,
LocalDelay: ourDelay,
RemoteOutpoint: theirOutpoint,
RemoteOutputSignDesc: theirSignDesc,
RemoteDelay: theirDelay,
HtlcRetributions: htlcRetributions,
KeyRing: keyRing,
}, nil
BreachTxHash: commitHash,
ChainHash: chanState.ChainHash,
LocalOutpoint: ourOutpoint,
RemoteOutpoint: theirOutpoint,
HtlcRetributions: htlcRetributions,
KeyRing: keyRing,
}, ourAmt, theirAmt, nil
}
// HtlcIsDust determines if an HTLC output is dust or not depending on two
@ -3822,7 +3988,7 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
// but died before the signature was sent. We re-transmit our
// revocation, but also initiate a state transition to re-sync
// them.
if lc.OweCommitment(true) {
if lc.OweCommitment() {
commitSig, htlcSigs, _, err := lc.SignNextCommitment()
switch {
@ -4536,11 +4702,11 @@ func (lc *LightningChannel) IsChannelClean() bool {
// out a commitment signature because there are outstanding local updates and/or
// updates in the local commit tx that aren't reflected in the remote commit tx
// yet.
func (lc *LightningChannel) OweCommitment(local bool) bool {
func (lc *LightningChannel) OweCommitment() bool {
lc.RLock()
defer lc.RUnlock()
return lc.oweCommitment(local)
return lc.oweCommitment(true)
}
// oweCommitment is the internal version of OweCommitment. This function expects
@ -4874,12 +5040,28 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) (
source, remoteChainTail, addUpdates, settleFailUpdates,
)
// We will soon be saving the current remote commitment to revocation
// log bucket, which is `lc.channelState.RemoteCommitment`. After that,
// the `RemoteCommitment` will be replaced with a newer version found
// in `CommitDiff`. Thus we need to compute the output indexes here
// before the change since the indexes are meant for the current,
// revoked remote commitment.
ourOutputIndex, theirOutputIndex, err := findOutputIndexesFromRemote(
revocation, lc.channelState,
)
if err != nil {
return nil, nil, nil, nil, err
}
// At this point, the revocation has been accepted, and we've rotated
// the current revocation key+hash for the remote party. Therefore we
// sync now to ensure the revocation producer state is consistent with
// the current commitment height and also to advance the on-disk
// commitment chain.
err = lc.channelState.AdvanceCommitChainTail(fwdPkg, localPeerUpdates)
err = lc.channelState.AdvanceCommitChainTail(
fwdPkg, localPeerUpdates,
ourOutputIndex, theirOutputIndex,
)
if err != nil {
return nil, nil, nil, nil, err
}

View File

@ -14,6 +14,7 @@ import (
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
@ -7106,8 +7107,9 @@ func TestNewBreachRetributionSkipsDustHtlcs(t *testing.T) {
// At this point, we'll now simulate a contract breach by Bob using the
// NewBreachRetribution method.
breachTx := aliceChannel.channelState.RemoteCommitment.CommitTx
breachRet, err := NewBreachRetribution(
aliceChannel.channelState, revokedStateNum, 100,
aliceChannel.channelState, revokedStateNum, 100, breachTx,
)
if err != nil {
t.Fatalf("unable to create breach retribution: %v", err)
@ -10249,3 +10251,388 @@ func testGetDustSum(t *testing.T, chantype channeldb.ChannelType) {
checkDust(bobChannel, htlc2Amt+htlc3Amt, htlc2Amt+htlc3Amt)
}
}
// deriveDummyRetributionParams is a helper function that derives a list of
// dummy params to assist retribution creation related tests.
func deriveDummyRetributionParams(chanState *channeldb.OpenChannel) (uint32,
*CommitmentKeyRing, chainhash.Hash) {
config := chanState.RemoteChanCfg
commitHash := chanState.RemoteCommitment.CommitTx.TxHash()
keyRing := DeriveCommitmentKeys(
config.RevocationBasePoint.PubKey, false, chanState.ChanType,
&chanState.LocalChanCfg, &chanState.RemoteChanCfg,
)
leaseExpiry := chanState.ThawHeight
return leaseExpiry, keyRing, commitHash
}
// TestCreateHtlcRetribution checks that `createHtlcRetribution` behaves as
// epxected.
func TestCreateHtlcRetribution(t *testing.T) {
t.Parallel()
// Create a dummy private key and an HTLC amount for testing.
dummyPrivate, _ := btcec.PrivKeyFromBytes([]byte{1})
testAmt := btcutil.Amount(100)
// Create a test channel.
aliceChannel, _, cleanUp, err := CreateTestChannels(
channeldb.ZeroHtlcTxFeeBit,
)
require.NoError(t, err)
defer cleanUp()
// Prepare the params needed to call the function. Note that the values
// here are not necessary "cryptography-correct", we just use them to
// construct the htlc retribution.
leaseExpiry, keyRing, commitHash := deriveDummyRetributionParams(
aliceChannel.channelState,
)
htlc := &channeldb.HTLCEntry{
Amt: testAmt,
Incoming: true,
OutputIndex: 1,
}
// Create the htlc retribution.
hr, err := createHtlcRetribution(
aliceChannel.channelState, keyRing, commitHash,
dummyPrivate, leaseExpiry, htlc,
)
// Expect no error.
require.NoError(t, err)
// Check the fields have expected values.
require.EqualValues(t, testAmt, hr.SignDesc.Output.Value)
require.Equal(t, commitHash, hr.OutPoint.Hash)
require.EqualValues(t, htlc.OutputIndex, hr.OutPoint.Index)
require.Equal(t, htlc.Incoming, hr.IsIncoming)
}
// TestCreateBreachRetribution checks that `createBreachRetribution` behaves as
// epxected.
func TestCreateBreachRetribution(t *testing.T) {
t.Parallel()
// Create dummy values for the test.
dummyPrivate, _ := btcec.PrivKeyFromBytes([]byte{1})
testAmt := int64(100)
ourAmt := int64(1000)
theirAmt := int64(2000)
localIndex := uint32(0)
remoteIndex := uint32(1)
htlcIndex := uint32(2)
// Create a dummy breach tx, which has our output located at index 0
// and theirs at 1.
spendTx := &wire.MsgTx{
TxOut: []*wire.TxOut{
{Value: ourAmt},
{Value: theirAmt},
{Value: testAmt},
},
}
// Create a test channel.
aliceChannel, _, cleanUp, err := CreateTestChannels(
channeldb.ZeroHtlcTxFeeBit,
)
require.NoError(t, err)
defer cleanUp()
// Prepare the params needed to call the function. Note that the values
// here are not necessary "cryptography-correct", we just use them to
// construct the retribution.
leaseExpiry, keyRing, commitHash := deriveDummyRetributionParams(
aliceChannel.channelState,
)
htlc := &channeldb.HTLCEntry{
Amt: btcutil.Amount(testAmt),
Incoming: true,
OutputIndex: uint16(htlcIndex),
}
// Create a dummy revocation log.
revokedLog := channeldb.RevocationLog{
CommitTxHash: commitHash,
OurOutputIndex: uint16(localIndex),
TheirOutputIndex: uint16(remoteIndex),
HTLCEntries: []*channeldb.HTLCEntry{htlc},
}
// Create a log with an empty local output index.
revokedLogNoLocal := revokedLog
revokedLogNoLocal.OurOutputIndex = channeldb.OutputIndexEmpty
// Create a log with an empty remote output index.
revokedLogNoRemote := revokedLog
revokedLogNoRemote.TheirOutputIndex = channeldb.OutputIndexEmpty
testCases := []struct {
name string
revocationLog *channeldb.RevocationLog
expectedErr error
expectedOurAmt int64
expectedTheirAmt int64
}{
{
name: "create retribution successfully",
revocationLog: &revokedLog,
expectedErr: nil,
expectedOurAmt: ourAmt,
expectedTheirAmt: theirAmt,
},
{
name: "fail due to our index too big",
revocationLog: &channeldb.RevocationLog{
OurOutputIndex: uint16(htlcIndex + 1),
},
expectedErr: ErrOutputIndexOutOfRange,
},
{
name: "fail due to their index too big",
revocationLog: &channeldb.RevocationLog{
TheirOutputIndex: uint16(htlcIndex + 1),
},
expectedErr: ErrOutputIndexOutOfRange,
},
{
name: "empty local output index",
revocationLog: &revokedLogNoLocal,
expectedErr: nil,
expectedOurAmt: 0,
expectedTheirAmt: theirAmt,
},
{
name: "empty remote output index",
revocationLog: &revokedLogNoRemote,
expectedErr: nil,
expectedOurAmt: ourAmt,
expectedTheirAmt: 0,
},
}
// assertRetribution is a helper closure that checks a given breach
// retribution has the expected values on certain fields.
assertRetribution := func(br *BreachRetribution, our, their int64) {
chainHash := aliceChannel.channelState.ChainHash
require.Equal(t, commitHash, br.BreachTxHash)
require.Equal(t, chainHash, br.ChainHash)
// Construct local outpoint, we only have the index when the
// amount is not zero.
local := wire.OutPoint{
Hash: commitHash,
}
if our != 0 {
local.Index = localIndex
}
// Construct remote outpoint, we only have the index when the
// amount is not zero.
remote := wire.OutPoint{
Hash: commitHash,
}
if their != 0 {
remote.Index = remoteIndex
}
require.Equal(t, local, br.LocalOutpoint)
require.Equal(t, remote, br.RemoteOutpoint)
for _, hr := range br.HtlcRetributions {
require.EqualValues(t, testAmt,
hr.SignDesc.Output.Value)
require.Equal(t, commitHash, hr.OutPoint.Hash)
require.EqualValues(t, htlcIndex, hr.OutPoint.Index)
require.Equal(t, htlc.Incoming, hr.IsIncoming)
}
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
br, our, their, err := createBreachRetribution(
tc.revocationLog, spendTx,
aliceChannel.channelState, keyRing,
dummyPrivate, leaseExpiry,
)
// Check the error if expected.
if tc.expectedErr != nil {
require.ErrorIs(t, err, tc.expectedErr)
} else {
// Otherwise we expect no error.
require.NoError(t, err)
// Check the amounts and the contructed partial
// retribution are returned as expected.
require.Equal(t, tc.expectedOurAmt, our)
require.Equal(t, tc.expectedTheirAmt, their)
assertRetribution(br, our, their)
}
})
}
}
// TestCreateBreachRetributionLegacy checks that
// `createBreachRetributionLegacy` behaves as expected.
func TestCreateBreachRetributionLegacy(t *testing.T) {
t.Parallel()
// Create dummy values for the test.
dummyPrivate, _ := btcec.PrivKeyFromBytes([]byte{1})
// Create a test channel.
aliceChannel, _, cleanUp, err := CreateTestChannels(
channeldb.ZeroHtlcTxFeeBit,
)
require.NoError(t, err)
defer cleanUp()
// Prepare the params needed to call the function. Note that the values
// here are not necessary "cryptography-correct", we just use them to
// construct the retribution.
leaseExpiry, keyRing, _ := deriveDummyRetributionParams(
aliceChannel.channelState,
)
// Use the remote commitment as our revocation log.
revokedLog := aliceChannel.channelState.RemoteCommitment
ourOp := revokedLog.CommitTx.TxOut[0]
theirOp := revokedLog.CommitTx.TxOut[1]
// Create the dummy scripts.
ourScript := &ScriptInfo{
PkScript: ourOp.PkScript,
}
theirScript := &ScriptInfo{
PkScript: theirOp.PkScript,
}
// Create the breach retribution using the legacy format.
br, ourAmt, theirAmt, err := createBreachRetributionLegacy(
&revokedLog, aliceChannel.channelState, keyRing,
dummyPrivate, ourScript, theirScript, leaseExpiry,
)
require.NoError(t, err)
// Check the commitHash and chainHash.
commitHash := revokedLog.CommitTx.TxHash()
chainHash := aliceChannel.channelState.ChainHash
require.Equal(t, commitHash, br.BreachTxHash)
require.Equal(t, chainHash, br.ChainHash)
// Check the outpoints.
local := wire.OutPoint{
Hash: commitHash,
Index: 0,
}
remote := wire.OutPoint{
Hash: commitHash,
Index: 1,
}
require.Equal(t, local, br.LocalOutpoint)
require.Equal(t, remote, br.RemoteOutpoint)
// Validate the amounts, note that in the legacy format, our amount is
// not directly the amount found in the to local output. Rather, it's
// the local output value minus the commit fee and anchor value(if
// present).
require.EqualValues(t, revokedLog.LocalBalance.ToSatoshis(), ourAmt)
require.Equal(t, theirOp.Value, theirAmt)
}
// TestNewBreachRetribution tests that the function `NewBreachRetribution`
// behaves as expected.
func TestNewBreachRetribution(t *testing.T) {
t.Run("non-anchor", func(t *testing.T) {
testNewBreachRetribution(t, channeldb.ZeroHtlcTxFeeBit)
})
t.Run("anchor", func(t *testing.T) {
chanType := channeldb.SingleFunderTweaklessBit |
channeldb.AnchorOutputsBit
testNewBreachRetribution(t, chanType)
})
}
// testNewBreachRetribution takes a channel type and tests the function
// `NewBreachRetribution`.
func testNewBreachRetribution(t *testing.T, chanType channeldb.ChannelType) {
t.Parallel()
aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(chanType)
require.NoError(t, err)
defer cleanUp()
breachHeight := uint32(101)
stateNum := uint64(0)
chainHash := aliceChannel.channelState.ChainHash
theirDelay := uint32(aliceChannel.channelState.RemoteChanCfg.CsvDelay)
breachTx := aliceChannel.channelState.RemoteCommitment.CommitTx
// Create a breach retribution at height 0, which should give us an
// error as there are no past delta state saved as revocation logs yet.
_, err = NewBreachRetribution(
aliceChannel.channelState, stateNum, breachHeight, breachTx,
)
require.ErrorIs(t, err, channeldb.ErrNoPastDeltas)
// We now force a state transition which will give us a revocation log
// at height 0.
txid := aliceChannel.channelState.RemoteCommitment.CommitTx.TxHash()
err = ForceStateTransition(aliceChannel, bobChannel)
require.NoError(t, err)
// assertRetribution is a helper closure that checks a given breach
// retribution has the expected values on certain fields.
assertRetribution := func(br *BreachRetribution,
localIndex, remoteIndex uint32) {
require.Equal(t, txid, br.BreachTxHash)
require.Equal(t, chainHash, br.ChainHash)
require.Equal(t, breachHeight, br.BreachHeight)
require.Equal(t, stateNum, br.RevokedStateNum)
require.Equal(t, theirDelay, br.RemoteDelay)
local := wire.OutPoint{
Hash: txid,
Index: localIndex,
}
remote := wire.OutPoint{
Hash: txid,
Index: remoteIndex,
}
if chanType.HasAnchors() {
// For anchor channels, we expect the local delay to be
// 1 otherwise 0.
require.EqualValues(t, 1, br.LocalDelay)
} else {
require.Zero(t, br.LocalDelay)
}
require.Equal(t, local, br.LocalOutpoint)
require.Equal(t, remote, br.RemoteOutpoint)
}
// Create the retribution again and we should expect it to be created
// successfully.
br, err := NewBreachRetribution(
aliceChannel.channelState, stateNum, breachHeight, breachTx,
)
require.NoError(t, err)
// Check the retribution is as expected.
t.Log(spew.Sdump(breachTx))
assertRetribution(br, 1, 0)
// Create the retribution using a stateNum+1 and we should expect an
// error.
_, err = NewBreachRetribution(
aliceChannel.channelState, stateNum+1, breachHeight, breachTx,
)
require.ErrorIs(t, err, channeldb.ErrLogEntryNotFound)
}

View File

@ -1,11 +1,13 @@
package lnwallet
import (
"bytes"
"fmt"
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
@ -960,3 +962,76 @@ func addHTLC(commitTx *wire.MsgTx, ourCommit bool,
return nil
}
// findOutputIndexesFromRemote finds the index of our and their outputs from
// the remote commitment transaction. It derives the key ring to compute the
// output scripts and compares them against the outputs inside the commitment
// to find the match.
func findOutputIndexesFromRemote(revocationPreimage *chainhash.Hash,
chanState *channeldb.OpenChannel) (uint32, uint32, error) {
// Init the output indexes as empty.
ourIndex := uint32(channeldb.OutputIndexEmpty)
theirIndex := uint32(channeldb.OutputIndexEmpty)
chanCommit := chanState.RemoteCommitment
_, commitmentPoint := btcec.PrivKeyFromBytes(revocationPreimage[:])
// With the commitment point generated, we can now derive the king ring
// which will be used to generate the output scripts.
keyRing := DeriveCommitmentKeys(
commitmentPoint, false, chanState.ChanType,
&chanState.LocalChanCfg, &chanState.RemoteChanCfg,
)
// Since it's remote commitment chain, we'd used the mirrored values.
//
// We use the remote's channel config for the csv delay.
theirDelay := uint32(chanState.RemoteChanCfg.CsvDelay)
// If we are the initiator of this channel, then it's be false from the
// remote's PoV.
isRemoteInitiator := !chanState.IsInitiator
var leaseExpiry uint32
if chanState.ChanType.HasLeaseExpiration() {
leaseExpiry = chanState.ThawHeight
}
// Map the scripts from our PoV. When facing a local commitment, the to
// local output belongs to us and the to remote output belongs to them.
// When facing a remote commitment, the to local output belongs to them
// and the to remote output belongs to us.
// Compute the to local script. From our PoV, when facing a remote
// commitment, the to local output belongs to them.
theirScript, err := CommitScriptToSelf(
chanState.ChanType, isRemoteInitiator, keyRing.ToLocalKey,
keyRing.RevocationKey, theirDelay, leaseExpiry,
)
if err != nil {
return ourIndex, theirIndex, err
}
// Compute the to remote script. From our PoV, when facing a remote
// commitment, the to remote output belongs to us.
ourScript, _, err := CommitScriptToRemote(
chanState.ChanType, isRemoteInitiator, keyRing.ToRemoteKey,
leaseExpiry,
)
if err != nil {
return ourIndex, theirIndex, err
}
// Now compare the scripts to find our/their output index.
for i, txOut := range chanCommit.CommitTx.TxOut {
switch {
case bytes.Equal(txOut.PkScript, ourScript.PkScript):
ourIndex = uint32(i)
case bytes.Equal(txOut.PkScript, theirScript.PkScript):
theirIndex = uint32(i)
}
}
return ourIndex, theirIndex, nil
}

View File

@ -403,8 +403,10 @@ func NewChannelReservation(capacity, localFundingAmt btcutil.Amount,
FeePerKw: btcutil.Amount(commitFeePerKw),
CommitFee: commitFee,
},
ThawHeight: thawHeight,
Db: wallet.Cfg.Database,
ThawHeight: thawHeight,
Db: wallet.Cfg.Database,
InitialLocalBalance: ourBalance,
InitialRemoteBalance: theirBalance,
},
pushMSat: pushMSat,
pendingChanID: pendingChanID,

View File

@ -4081,21 +4081,13 @@ func createRPCOpenChannel(r *rpcServer, dbChannel *channeldb.OpenChannel,
channel.UnsettledBalance += channel.PendingHtlcs[i].Amount
}
// Lookup our balances at height 0, because they will reflect any
// push amounts that may have been present when this channel was
// created.
localBalance, remoteBalance, err := dbChannel.BalancesAtHeight(0)
if err != nil {
return nil, err
}
// If we initiated opening the channel, the zero height remote balance
// is the push amount. Otherwise, our starting balance is the push
// amount. If there is no push amount, these values will simply be zero.
if dbChannel.IsInitiator {
channel.PushAmountSat = uint64(remoteBalance.ToSatoshis())
channel.PushAmountSat = uint64(dbChannel.InitialRemoteBalance)
} else {
channel.PushAmountSat = uint64(localBalance.ToSatoshis())
channel.PushAmountSat = uint64(dbChannel.InitialLocalBalance)
}
if len(dbChannel.LocalShutdownScript) > 0 {

View File

@ -346,7 +346,7 @@ func (t *backupTask) craftSessionPayload(
}
}
breachTxID := t.breachInfo.BreachTransaction.TxHash()
breachTxID := t.breachInfo.BreachTxHash
// Compute the breach key as SHA256(txid).
hint, key := blob.NewBreachHintAndKeyFromHash(&breachTxID)

View File

@ -124,8 +124,8 @@ func genTaskTest(
// the breach transaction, which we will continue to modify.
breachTxn := wire.NewMsgTx(2)
breachInfo := &lnwallet.BreachRetribution{
RevokedStateNum: stateNum,
BreachTransaction: breachTxn,
RevokedStateNum: stateNum,
BreachTxHash: breachTxn.TxHash(),
KeyRing: &lnwallet.CommitmentKeyRing{
RevocationKey: revPK,
ToLocalKey: toLocalPK,
@ -607,7 +607,7 @@ func testBackupTask(t *testing.T, test backupTaskTest) {
}
// Verify that the breach hint matches the breach txid's prefix.
breachTxID := test.breachInfo.BreachTransaction.TxHash()
breachTxID := test.breachInfo.BreachTxHash
expHint := blob.NewBreachHintFromHash(&breachTxID)
if hint != expHint {
t.Fatalf("breach hint mismatch, want: %x, got: %v",

View File

@ -10,6 +10,7 @@ import (
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
@ -300,7 +301,7 @@ func (c *mockChannel) createRemoteCommitTx(t *testing.T) {
}
retribution := &lnwallet.BreachRetribution{
BreachTransaction: commitTxn,
BreachTxHash: commitTxn.TxHash(),
RevokedStateNum: c.commitHeight,
KeyRing: commitKeyRing,
RemoteDelay: c.csvDelay,
@ -360,13 +361,15 @@ func (c *mockChannel) receivePayment(t *testing.T, amt lnwire.MilliSatoshi) {
}
// getState retrieves the channel's commitment and retribution at state i.
func (c *mockChannel) getState(i uint64) (*wire.MsgTx, *lnwallet.BreachRetribution) {
func (c *mockChannel) getState(
i uint64) (chainhash.Hash, *lnwallet.BreachRetribution) {
c.mu.Lock()
defer c.mu.Unlock()
retribution := c.retributions[i]
return retribution.BreachTransaction, retribution
return retribution.BreachTxHash, retribution
}
type testHarness struct {
@ -608,8 +611,7 @@ func (h *testHarness) advanceChannelN(id uint64, n int) []blob.BreachHint {
var hints []blob.BreachHint
for i := uint64(0); i < uint64(n); i++ {
channel.advanceState(h.t)
commitTx, _ := h.channel(id).getState(i)
breachTxID := commitTx.TxHash()
breachTxID, _ := h.channel(id).getState(i)
hints = append(hints, blob.NewBreachHintFromHash(&breachTxID))
}
@ -654,8 +656,7 @@ func (h *testHarness) sendPayments(id, from, to uint64,
var hints []blob.BreachHint
for i := from; i < to; i++ {
h.channel(id).sendPayment(h.t, amt)
commitTx, _ := channel.getState(i)
breachTxID := commitTx.TxHash()
breachTxID, _ := channel.getState(i)
hints = append(hints, blob.NewBreachHintFromHash(&breachTxID))
}
@ -675,8 +676,7 @@ func (h *testHarness) recvPayments(id, from, to uint64,
var hints []blob.BreachHint
for i := from; i < to; i++ {
channel.receivePayment(h.t, amt)
commitTx, _ := channel.getState(i)
breachTxID := commitTx.TxHash()
breachTxID, _ := channel.getState(i)
hints = append(hints, blob.NewBreachHintFromHash(&breachTxID))
}