mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-03-04 17:55:36 +01:00
Merge pull request #9044 from ProofOfKeags/refactor/stfu-prefactor
Refactor/stfu prefactor
This commit is contained in:
commit
e8c5e7d5ce
3 changed files with 474 additions and 413 deletions
|
@ -958,7 +958,7 @@ func (l *channelLink) resolveFwdPkgs() error {
|
|||
|
||||
// If any of our reprocessing steps require an update to the commitment
|
||||
// txn, we initiate a state transition to capture all relevant changes.
|
||||
if l.channel.PendingLocalUpdateCount() > 0 {
|
||||
if l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote) > 0 {
|
||||
return l.updateCommitTx()
|
||||
}
|
||||
|
||||
|
@ -1086,6 +1086,83 @@ func (l *channelLink) loadAndRemove() error {
|
|||
return l.channel.RemoveFwdPkgs(removeHeights...)
|
||||
}
|
||||
|
||||
// handleChanSyncErr performs the error handling logic in the case where we
|
||||
// could not successfully syncChanStates with our channel peer.
|
||||
func (l *channelLink) handleChanSyncErr(err error) {
|
||||
l.log.Warnf("error when syncing channel states: %v", err)
|
||||
|
||||
var errDataLoss *lnwallet.ErrCommitSyncLocalDataLoss
|
||||
|
||||
switch {
|
||||
case errors.Is(err, ErrLinkShuttingDown):
|
||||
l.log.Debugf("unable to sync channel states, link is " +
|
||||
"shutting down")
|
||||
return
|
||||
|
||||
// We failed syncing the commit chains, probably because the remote has
|
||||
// lost state. We should force close the channel.
|
||||
case errors.Is(err, lnwallet.ErrCommitSyncRemoteDataLoss):
|
||||
fallthrough
|
||||
|
||||
// The remote sent us an invalid last commit secret, we should force
|
||||
// close the channel.
|
||||
// TODO(halseth): and permanently ban the peer?
|
||||
case errors.Is(err, lnwallet.ErrInvalidLastCommitSecret):
|
||||
fallthrough
|
||||
|
||||
// The remote sent us a commit point different from what they sent us
|
||||
// before.
|
||||
// TODO(halseth): ban peer?
|
||||
case errors.Is(err, lnwallet.ErrInvalidLocalUnrevokedCommitPoint):
|
||||
// We'll fail the link and tell the peer to force close the
|
||||
// channel. Note that the database state is not updated here,
|
||||
// but will be updated when the close transaction is ready to
|
||||
// avoid that we go down before storing the transaction in the
|
||||
// db.
|
||||
l.failf(
|
||||
LinkFailureError{
|
||||
code: ErrSyncError,
|
||||
FailureAction: LinkFailureForceClose,
|
||||
},
|
||||
"unable to synchronize channel states: %v", err,
|
||||
)
|
||||
|
||||
// We have lost state and cannot safely force close the channel. Fail
|
||||
// the channel and wait for the remote to hopefully force close it. The
|
||||
// remote has sent us its latest unrevoked commitment point, and we'll
|
||||
// store it in the database, such that we can attempt to recover the
|
||||
// funds if the remote force closes the channel.
|
||||
case errors.As(err, &errDataLoss):
|
||||
err := l.channel.MarkDataLoss(
|
||||
errDataLoss.CommitPoint,
|
||||
)
|
||||
if err != nil {
|
||||
l.log.Errorf("unable to mark channel data loss: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
// We determined the commit chains were not possible to sync. We
|
||||
// cautiously fail the channel, but don't force close.
|
||||
// TODO(halseth): can we safely force close in any cases where this
|
||||
// error is returned?
|
||||
case errors.Is(err, lnwallet.ErrCannotSyncCommitChains):
|
||||
if err := l.channel.MarkBorked(); err != nil {
|
||||
l.log.Errorf("unable to mark channel borked: %v", err)
|
||||
}
|
||||
|
||||
// Other, unspecified error.
|
||||
default:
|
||||
}
|
||||
|
||||
l.failf(
|
||||
LinkFailureError{
|
||||
code: ErrRecoveryError,
|
||||
FailureAction: LinkFailureForceNone,
|
||||
},
|
||||
"unable to synchronize channel states: %v", err,
|
||||
)
|
||||
}
|
||||
|
||||
// htlcManager is the primary goroutine which drives a channel's commitment
|
||||
// update state-machine in response to messages received via several channels.
|
||||
// This goroutine reads messages from the upstream (remote) peer, and also from
|
||||
|
@ -1121,89 +1198,7 @@ func (l *channelLink) htlcManager() {
|
|||
if l.cfg.SyncStates {
|
||||
err := l.syncChanStates()
|
||||
if err != nil {
|
||||
l.log.Warnf("error when syncing channel states: %v", err)
|
||||
|
||||
errDataLoss, localDataLoss :=
|
||||
err.(*lnwallet.ErrCommitSyncLocalDataLoss)
|
||||
|
||||
switch {
|
||||
case err == ErrLinkShuttingDown:
|
||||
l.log.Debugf("unable to sync channel states, " +
|
||||
"link is shutting down")
|
||||
return
|
||||
|
||||
// We failed syncing the commit chains, probably
|
||||
// because the remote has lost state. We should force
|
||||
// close the channel.
|
||||
case err == lnwallet.ErrCommitSyncRemoteDataLoss:
|
||||
fallthrough
|
||||
|
||||
// The remote sent us an invalid last commit secret, we
|
||||
// should force close the channel.
|
||||
// TODO(halseth): and permanently ban the peer?
|
||||
case err == lnwallet.ErrInvalidLastCommitSecret:
|
||||
fallthrough
|
||||
|
||||
// The remote sent us a commit point different from
|
||||
// what they sent us before.
|
||||
// TODO(halseth): ban peer?
|
||||
case err == lnwallet.ErrInvalidLocalUnrevokedCommitPoint:
|
||||
// We'll fail the link and tell the peer to
|
||||
// force close the channel. Note that the
|
||||
// database state is not updated here, but will
|
||||
// be updated when the close transaction is
|
||||
// ready to avoid that we go down before
|
||||
// storing the transaction in the db.
|
||||
l.failf(
|
||||
//nolint:lll
|
||||
LinkFailureError{
|
||||
code: ErrSyncError,
|
||||
FailureAction: LinkFailureForceClose,
|
||||
},
|
||||
"unable to synchronize channel "+
|
||||
"states: %v", err,
|
||||
)
|
||||
return
|
||||
|
||||
// We have lost state and cannot safely force close the
|
||||
// channel. Fail the channel and wait for the remote to
|
||||
// hopefully force close it. The remote has sent us its
|
||||
// latest unrevoked commitment point, and we'll store
|
||||
// it in the database, such that we can attempt to
|
||||
// recover the funds if the remote force closes the
|
||||
// channel.
|
||||
case localDataLoss:
|
||||
err := l.channel.MarkDataLoss(
|
||||
errDataLoss.CommitPoint,
|
||||
)
|
||||
if err != nil {
|
||||
l.log.Errorf("unable to mark channel "+
|
||||
"data loss: %v", err)
|
||||
}
|
||||
|
||||
// We determined the commit chains were not possible to
|
||||
// sync. We cautiously fail the channel, but don't
|
||||
// force close.
|
||||
// TODO(halseth): can we safely force close in any
|
||||
// cases where this error is returned?
|
||||
case err == lnwallet.ErrCannotSyncCommitChains:
|
||||
if err := l.channel.MarkBorked(); err != nil {
|
||||
l.log.Errorf("unable to mark channel "+
|
||||
"borked: %v", err)
|
||||
}
|
||||
|
||||
// Other, unspecified error.
|
||||
default:
|
||||
}
|
||||
|
||||
l.failf(
|
||||
LinkFailureError{
|
||||
code: ErrRecoveryError,
|
||||
FailureAction: LinkFailureForceNone,
|
||||
},
|
||||
"unable to synchronize channel "+
|
||||
"states: %v", err,
|
||||
)
|
||||
l.handleChanSyncErr(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -1291,15 +1286,19 @@ func (l *channelLink) htlcManager() {
|
|||
// the batch ticker so that it can be cleared. Otherwise pause
|
||||
// the ticker to prevent waking up the htlcManager while the
|
||||
// batch is empty.
|
||||
if l.channel.PendingLocalUpdateCount() > 0 {
|
||||
numUpdates := l.channel.NumPendingUpdates(
|
||||
lntypes.Local, lntypes.Remote,
|
||||
)
|
||||
if numUpdates > 0 {
|
||||
l.cfg.BatchTicker.Resume()
|
||||
l.log.Tracef("BatchTicker resumed, "+
|
||||
"PendingLocalUpdateCount=%d",
|
||||
l.channel.PendingLocalUpdateCount())
|
||||
"NumPendingUpdates(Local, Remote)=%d",
|
||||
numUpdates,
|
||||
)
|
||||
} else {
|
||||
l.cfg.BatchTicker.Pause()
|
||||
l.log.Trace("BatchTicker paused due to zero " +
|
||||
"PendingLocalUpdateCount")
|
||||
"NumPendingUpdates(Local, Remote)")
|
||||
}
|
||||
|
||||
select {
|
||||
|
@ -1657,7 +1656,7 @@ func (l *channelLink) handleDownstreamUpdateAdd(pkt *htlcPacket) error {
|
|||
l.log.Tracef("received downstream htlc: payment_hash=%x, "+
|
||||
"local_log_index=%v, pend_updates=%v",
|
||||
htlc.PaymentHash[:], index,
|
||||
l.channel.PendingLocalUpdateCount())
|
||||
l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote))
|
||||
|
||||
pkt.outgoingChanID = l.ShortChanID()
|
||||
pkt.outgoingHTLCID = index
|
||||
|
@ -1863,7 +1862,8 @@ func (l *channelLink) handleDownstreamPkt(pkt *htlcPacket) {
|
|||
// tryBatchUpdateCommitTx updates the commitment transaction if the batch is
|
||||
// full.
|
||||
func (l *channelLink) tryBatchUpdateCommitTx() {
|
||||
if l.channel.PendingLocalUpdateCount() < uint64(l.cfg.BatchSize) {
|
||||
pending := l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote)
|
||||
if pending < uint64(l.cfg.BatchSize) {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1939,7 +1939,6 @@ func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) {
|
|||
// direct channel with, updating our respective commitment chains.
|
||||
func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
||||
switch msg := msg.(type) {
|
||||
|
||||
case *lnwire.UpdateAddHTLC:
|
||||
if l.IsFlushing(Incoming) {
|
||||
// This is forbidden by the protocol specification.
|
||||
|
@ -2597,9 +2596,9 @@ func (l *channelLink) updateCommitTx() error {
|
|||
l.cfg.PendingCommitTicker.Resume()
|
||||
l.log.Trace("PendingCommitTicker resumed")
|
||||
|
||||
n := l.channel.NumPendingUpdates(lntypes.Local, lntypes.Remote)
|
||||
l.log.Tracef("revocation window exhausted, unable to send: "+
|
||||
"%v, pend_updates=%v, dangling_closes%v",
|
||||
l.channel.PendingLocalUpdateCount(),
|
||||
"%v, pend_updates=%v, dangling_closes%v", n,
|
||||
lnutils.SpewLogClosure(l.openedCircuits),
|
||||
lnutils.SpewLogClosure(l.closedCircuits))
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -335,21 +335,23 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool,
|
|||
// The logs of both sides should now be cleared since the entry adding
|
||||
// the HTLC should have been removed once both sides receive the
|
||||
// revocation.
|
||||
if aliceChannel.localUpdateLog.Len() != 0 {
|
||||
if aliceChannel.updateLogs.Local.Len() != 0 {
|
||||
t.Fatalf("alice's local not updated, should be empty, has %v "+
|
||||
"entries instead", aliceChannel.localUpdateLog.Len())
|
||||
"entries instead", aliceChannel.updateLogs.Local.Len())
|
||||
}
|
||||
if aliceChannel.remoteUpdateLog.Len() != 0 {
|
||||
if aliceChannel.updateLogs.Remote.Len() != 0 {
|
||||
t.Fatalf("alice's remote not updated, should be empty, has %v "+
|
||||
"entries instead", aliceChannel.remoteUpdateLog.Len())
|
||||
"entries instead", aliceChannel.updateLogs.Remote.Len())
|
||||
}
|
||||
if len(aliceChannel.localUpdateLog.updateIndex) != 0 {
|
||||
t.Fatalf("alice's local log index not cleared, should be empty but "+
|
||||
"has %v entries", len(aliceChannel.localUpdateLog.updateIndex))
|
||||
if len(aliceChannel.updateLogs.Local.updateIndex) != 0 {
|
||||
t.Fatalf("alice's local log index not cleared, should be "+
|
||||
"empty but has %v entries",
|
||||
len(aliceChannel.updateLogs.Local.updateIndex))
|
||||
}
|
||||
if len(aliceChannel.remoteUpdateLog.updateIndex) != 0 {
|
||||
t.Fatalf("alice's remote log index not cleared, should be empty but "+
|
||||
"has %v entries", len(aliceChannel.remoteUpdateLog.updateIndex))
|
||||
if len(aliceChannel.updateLogs.Remote.updateIndex) != 0 {
|
||||
t.Fatalf("alice's remote log index not cleared, should be "+
|
||||
"empty but has %v entries",
|
||||
len(aliceChannel.updateLogs.Remote.updateIndex))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1435,12 +1437,12 @@ func TestHTLCDustLimit(t *testing.T) {
|
|||
// while Bob's should not, because the value falls beneath his dust
|
||||
// limit. The amount of the HTLC should be applied to fees in Bob's
|
||||
// commitment transaction.
|
||||
aliceCommitment := aliceChannel.localCommitChain.tip()
|
||||
aliceCommitment := aliceChannel.commitChains.Local.tip()
|
||||
if len(aliceCommitment.txn.TxOut) != 3 {
|
||||
t.Fatalf("incorrect # of outputs: expected %v, got %v",
|
||||
3, len(aliceCommitment.txn.TxOut))
|
||||
}
|
||||
bobCommitment := bobChannel.localCommitChain.tip()
|
||||
bobCommitment := bobChannel.commitChains.Local.tip()
|
||||
if len(bobCommitment.txn.TxOut) != 2 {
|
||||
t.Fatalf("incorrect # of outputs: expected %v, got %v",
|
||||
2, len(bobCommitment.txn.TxOut))
|
||||
|
@ -1465,7 +1467,7 @@ func TestHTLCDustLimit(t *testing.T) {
|
|||
|
||||
// At this point, for Alice's commitment chains, the value of the HTLC
|
||||
// should have been added to Alice's balance and TotalSatoshisSent.
|
||||
commitment := aliceChannel.localCommitChain.tip()
|
||||
commitment := aliceChannel.commitChains.Local.tip()
|
||||
if len(commitment.txn.TxOut) != 2 {
|
||||
t.Fatalf("incorrect # of outputs: expected %v, got %v",
|
||||
2, len(commitment.txn.TxOut))
|
||||
|
@ -1698,7 +1700,7 @@ func TestChannelBalanceDustLimit(t *testing.T) {
|
|||
// output for Alice's balance should have been removed as dust, leaving
|
||||
// only a single output that will send the remaining funds in the
|
||||
// channel to Bob.
|
||||
commitment := bobChannel.localCommitChain.tip()
|
||||
commitment := bobChannel.commitChains.Local.tip()
|
||||
if len(commitment.txn.TxOut) != 1 {
|
||||
t.Fatalf("incorrect # of outputs: expected %v, got %v",
|
||||
1, len(commitment.txn.TxOut))
|
||||
|
@ -1773,26 +1775,26 @@ func TestStateUpdatePersistence(t *testing.T) {
|
|||
// Helper method that asserts the expected number of updates are found
|
||||
// in the update logs.
|
||||
assertNumLogUpdates := func(numAliceUpdates, numBobUpdates int) {
|
||||
if aliceChannel.localUpdateLog.Len() != numAliceUpdates {
|
||||
if aliceChannel.updateLogs.Local.Len() != numAliceUpdates {
|
||||
t.Fatalf("expected %d local updates, found %d",
|
||||
numAliceUpdates,
|
||||
aliceChannel.localUpdateLog.Len())
|
||||
aliceChannel.updateLogs.Local.Len())
|
||||
}
|
||||
if aliceChannel.remoteUpdateLog.Len() != numBobUpdates {
|
||||
if aliceChannel.updateLogs.Remote.Len() != numBobUpdates {
|
||||
t.Fatalf("expected %d remote updates, found %d",
|
||||
numBobUpdates,
|
||||
aliceChannel.remoteUpdateLog.Len())
|
||||
aliceChannel.updateLogs.Remote.Len())
|
||||
}
|
||||
|
||||
if bobChannel.localUpdateLog.Len() != numBobUpdates {
|
||||
if bobChannel.updateLogs.Local.Len() != numBobUpdates {
|
||||
t.Fatalf("expected %d local updates, found %d",
|
||||
numBobUpdates,
|
||||
bobChannel.localUpdateLog.Len())
|
||||
bobChannel.updateLogs.Local.Len())
|
||||
}
|
||||
if bobChannel.remoteUpdateLog.Len() != numAliceUpdates {
|
||||
if bobChannel.updateLogs.Remote.Len() != numAliceUpdates {
|
||||
t.Fatalf("expected %d remote updates, found %d",
|
||||
numAliceUpdates,
|
||||
bobChannel.remoteUpdateLog.Len())
|
||||
bobChannel.updateLogs.Remote.Len())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1816,25 +1818,25 @@ func TestStateUpdatePersistence(t *testing.T) {
|
|||
|
||||
// After the state transition the fee update is fully locked in, and
|
||||
// should've been removed from both channels' update logs.
|
||||
if aliceChannel.localCommitChain.tail().feePerKw != fee {
|
||||
if aliceChannel.commitChains.Local.tail().feePerKw != fee {
|
||||
t.Fatalf("fee not locked in")
|
||||
}
|
||||
if bobChannel.localCommitChain.tail().feePerKw != fee {
|
||||
if bobChannel.commitChains.Local.tail().feePerKw != fee {
|
||||
t.Fatalf("fee not locked in")
|
||||
}
|
||||
assertNumLogUpdates(3, 1)
|
||||
|
||||
// The latest commitment from both sides should have all the HTLCs.
|
||||
numAliceOutgoing := aliceChannel.localCommitChain.tail().outgoingHTLCs
|
||||
numAliceIncoming := aliceChannel.localCommitChain.tail().incomingHTLCs
|
||||
numAliceOutgoing := aliceChannel.commitChains.Local.tail().outgoingHTLCs
|
||||
numAliceIncoming := aliceChannel.commitChains.Local.tail().incomingHTLCs
|
||||
if len(numAliceOutgoing) != 3 {
|
||||
t.Fatalf("expected %v htlcs, instead got %v", 3, numAliceOutgoing)
|
||||
}
|
||||
if len(numAliceIncoming) != 1 {
|
||||
t.Fatalf("expected %v htlcs, instead got %v", 1, numAliceIncoming)
|
||||
}
|
||||
numBobOutgoing := bobChannel.localCommitChain.tail().outgoingHTLCs
|
||||
numBobIncoming := bobChannel.localCommitChain.tail().incomingHTLCs
|
||||
numBobOutgoing := bobChannel.commitChains.Local.tail().outgoingHTLCs
|
||||
numBobIncoming := bobChannel.commitChains.Local.tail().incomingHTLCs
|
||||
if len(numBobOutgoing) != 1 {
|
||||
t.Fatalf("expected %v htlcs, instead got %v", 1, numBobOutgoing)
|
||||
}
|
||||
|
@ -1868,62 +1870,72 @@ func TestStateUpdatePersistence(t *testing.T) {
|
|||
|
||||
// The state update logs of the new channels and the old channels
|
||||
// should now be identical other than the height the HTLCs were added.
|
||||
if aliceChannel.localUpdateLog.logIndex !=
|
||||
aliceChannelNew.localUpdateLog.logIndex {
|
||||
if aliceChannel.updateLogs.Local.logIndex !=
|
||||
aliceChannelNew.updateLogs.Local.logIndex {
|
||||
|
||||
t.Fatalf("alice log counter: expected %v, got %v",
|
||||
aliceChannel.localUpdateLog.logIndex,
|
||||
aliceChannelNew.localUpdateLog.logIndex)
|
||||
aliceChannel.updateLogs.Local.logIndex,
|
||||
aliceChannelNew.updateLogs.Local.logIndex)
|
||||
}
|
||||
if aliceChannel.remoteUpdateLog.logIndex !=
|
||||
aliceChannelNew.remoteUpdateLog.logIndex {
|
||||
if aliceChannel.updateLogs.Remote.logIndex !=
|
||||
aliceChannelNew.updateLogs.Remote.logIndex {
|
||||
|
||||
t.Fatalf("alice log counter: expected %v, got %v",
|
||||
aliceChannel.remoteUpdateLog.logIndex,
|
||||
aliceChannelNew.remoteUpdateLog.logIndex)
|
||||
aliceChannel.updateLogs.Remote.logIndex,
|
||||
aliceChannelNew.updateLogs.Remote.logIndex)
|
||||
}
|
||||
if aliceChannel.localUpdateLog.Len() !=
|
||||
aliceChannelNew.localUpdateLog.Len() {
|
||||
if aliceChannel.updateLogs.Local.Len() !=
|
||||
aliceChannelNew.updateLogs.Local.Len() {
|
||||
|
||||
t.Fatalf("alice log len: expected %v, got %v",
|
||||
aliceChannel.localUpdateLog.Len(),
|
||||
aliceChannelNew.localUpdateLog.Len())
|
||||
aliceChannel.updateLogs.Local.Len(),
|
||||
aliceChannelNew.updateLogs.Local.Len())
|
||||
}
|
||||
if aliceChannel.remoteUpdateLog.Len() !=
|
||||
aliceChannelNew.remoteUpdateLog.Len() {
|
||||
if aliceChannel.updateLogs.Remote.Len() !=
|
||||
aliceChannelNew.updateLogs.Remote.Len() {
|
||||
|
||||
t.Fatalf("alice log len: expected %v, got %v",
|
||||
aliceChannel.remoteUpdateLog.Len(),
|
||||
aliceChannelNew.remoteUpdateLog.Len())
|
||||
aliceChannel.updateLogs.Remote.Len(),
|
||||
aliceChannelNew.updateLogs.Remote.Len())
|
||||
}
|
||||
if bobChannel.localUpdateLog.logIndex !=
|
||||
bobChannelNew.localUpdateLog.logIndex {
|
||||
if bobChannel.updateLogs.Local.logIndex !=
|
||||
bobChannelNew.updateLogs.Local.logIndex {
|
||||
|
||||
t.Fatalf("bob log counter: expected %v, got %v",
|
||||
bobChannel.localUpdateLog.logIndex,
|
||||
bobChannelNew.localUpdateLog.logIndex)
|
||||
bobChannel.updateLogs.Local.logIndex,
|
||||
bobChannelNew.updateLogs.Local.logIndex)
|
||||
}
|
||||
if bobChannel.remoteUpdateLog.logIndex !=
|
||||
bobChannelNew.remoteUpdateLog.logIndex {
|
||||
if bobChannel.updateLogs.Remote.logIndex !=
|
||||
bobChannelNew.updateLogs.Remote.logIndex {
|
||||
|
||||
t.Fatalf("bob log counter: expected %v, got %v",
|
||||
bobChannel.remoteUpdateLog.logIndex,
|
||||
bobChannelNew.remoteUpdateLog.logIndex)
|
||||
bobChannel.updateLogs.Remote.logIndex,
|
||||
bobChannelNew.updateLogs.Remote.logIndex)
|
||||
}
|
||||
if bobChannel.localUpdateLog.Len() !=
|
||||
bobChannelNew.localUpdateLog.Len() {
|
||||
if bobChannel.updateLogs.Local.Len() !=
|
||||
bobChannelNew.updateLogs.Local.Len() {
|
||||
|
||||
t.Fatalf("bob log len: expected %v, got %v",
|
||||
bobChannel.localUpdateLog.Len(),
|
||||
bobChannelNew.localUpdateLog.Len())
|
||||
bobChannel.updateLogs.Local.Len(),
|
||||
bobChannelNew.updateLogs.Local.Len())
|
||||
}
|
||||
if bobChannel.remoteUpdateLog.Len() !=
|
||||
bobChannelNew.remoteUpdateLog.Len() {
|
||||
if bobChannel.updateLogs.Remote.Len() !=
|
||||
bobChannelNew.updateLogs.Remote.Len() {
|
||||
|
||||
t.Fatalf("bob log len: expected %v, got %v",
|
||||
bobChannel.remoteUpdateLog.Len(),
|
||||
bobChannelNew.remoteUpdateLog.Len())
|
||||
bobChannel.updateLogs.Remote.Len(),
|
||||
bobChannelNew.updateLogs.Remote.Len())
|
||||
}
|
||||
|
||||
// TODO(roasbeef): expand test to also ensure state revocation log has
|
||||
// proper pk scripts
|
||||
|
||||
// Newly generated pkScripts for HTLCs should be the same as in the old channel.
|
||||
for _, entry := range aliceChannel.localUpdateLog.htlcIndex {
|
||||
for _, entry := range aliceChannel.updateLogs.Local.htlcIndex {
|
||||
htlc := entry.Value
|
||||
restoredHtlc := aliceChannelNew.localUpdateLog.lookupHtlc(htlc.HtlcIndex)
|
||||
restoredHtlc := aliceChannelNew.updateLogs.Local.lookupHtlc(
|
||||
htlc.HtlcIndex,
|
||||
)
|
||||
if !bytes.Equal(htlc.ourPkScript, restoredHtlc.ourPkScript) {
|
||||
t.Fatalf("alice ourPkScript in ourLog: expected %X, got %X",
|
||||
htlc.ourPkScript[:5], restoredHtlc.ourPkScript[:5])
|
||||
|
@ -1933,9 +1945,11 @@ func TestStateUpdatePersistence(t *testing.T) {
|
|||
htlc.theirPkScript[:5], restoredHtlc.theirPkScript[:5])
|
||||
}
|
||||
}
|
||||
for _, entry := range aliceChannel.remoteUpdateLog.htlcIndex {
|
||||
for _, entry := range aliceChannel.updateLogs.Remote.htlcIndex {
|
||||
htlc := entry.Value
|
||||
restoredHtlc := aliceChannelNew.remoteUpdateLog.lookupHtlc(htlc.HtlcIndex)
|
||||
restoredHtlc := aliceChannelNew.updateLogs.Remote.lookupHtlc(
|
||||
htlc.HtlcIndex,
|
||||
)
|
||||
if !bytes.Equal(htlc.ourPkScript, restoredHtlc.ourPkScript) {
|
||||
t.Fatalf("alice ourPkScript in theirLog: expected %X, got %X",
|
||||
htlc.ourPkScript[:5], restoredHtlc.ourPkScript[:5])
|
||||
|
@ -1945,9 +1959,11 @@ func TestStateUpdatePersistence(t *testing.T) {
|
|||
htlc.theirPkScript[:5], restoredHtlc.theirPkScript[:5])
|
||||
}
|
||||
}
|
||||
for _, entry := range bobChannel.localUpdateLog.htlcIndex {
|
||||
for _, entry := range bobChannel.updateLogs.Local.htlcIndex {
|
||||
htlc := entry.Value
|
||||
restoredHtlc := bobChannelNew.localUpdateLog.lookupHtlc(htlc.HtlcIndex)
|
||||
restoredHtlc := bobChannelNew.updateLogs.Local.lookupHtlc(
|
||||
htlc.HtlcIndex,
|
||||
)
|
||||
if !bytes.Equal(htlc.ourPkScript, restoredHtlc.ourPkScript) {
|
||||
t.Fatalf("bob ourPkScript in ourLog: expected %X, got %X",
|
||||
htlc.ourPkScript[:5], restoredHtlc.ourPkScript[:5])
|
||||
|
@ -1957,9 +1973,11 @@ func TestStateUpdatePersistence(t *testing.T) {
|
|||
htlc.theirPkScript[:5], restoredHtlc.theirPkScript[:5])
|
||||
}
|
||||
}
|
||||
for _, entry := range bobChannel.remoteUpdateLog.htlcIndex {
|
||||
for _, entry := range bobChannel.updateLogs.Remote.htlcIndex {
|
||||
htlc := entry.Value
|
||||
restoredHtlc := bobChannelNew.remoteUpdateLog.lookupHtlc(htlc.HtlcIndex)
|
||||
restoredHtlc := bobChannelNew.updateLogs.Remote.lookupHtlc(
|
||||
htlc.HtlcIndex,
|
||||
)
|
||||
if !bytes.Equal(htlc.ourPkScript, restoredHtlc.ourPkScript) {
|
||||
t.Fatalf("bob ourPkScript in theirLog: expected %X, got %X",
|
||||
htlc.ourPkScript[:5], restoredHtlc.ourPkScript[:5])
|
||||
|
@ -2090,20 +2108,24 @@ func TestCancelHTLC(t *testing.T) {
|
|||
|
||||
// Now HTLCs should be present on the commitment transaction for either
|
||||
// side.
|
||||
if len(aliceChannel.localCommitChain.tip().outgoingHTLCs) != 0 ||
|
||||
len(aliceChannel.remoteCommitChain.tip().outgoingHTLCs) != 0 {
|
||||
if len(aliceChannel.commitChains.Local.tip().outgoingHTLCs) != 0 ||
|
||||
len(aliceChannel.commitChains.Remote.tip().outgoingHTLCs) != 0 {
|
||||
|
||||
t.Fatalf("htlc's still active from alice's POV")
|
||||
}
|
||||
if len(aliceChannel.localCommitChain.tip().incomingHTLCs) != 0 ||
|
||||
len(aliceChannel.remoteCommitChain.tip().incomingHTLCs) != 0 {
|
||||
if len(aliceChannel.commitChains.Local.tip().incomingHTLCs) != 0 ||
|
||||
len(aliceChannel.commitChains.Remote.tip().incomingHTLCs) != 0 {
|
||||
|
||||
t.Fatalf("htlc's still active from alice's POV")
|
||||
}
|
||||
if len(bobChannel.localCommitChain.tip().outgoingHTLCs) != 0 ||
|
||||
len(bobChannel.remoteCommitChain.tip().outgoingHTLCs) != 0 {
|
||||
if len(bobChannel.commitChains.Local.tip().outgoingHTLCs) != 0 ||
|
||||
len(bobChannel.commitChains.Remote.tip().outgoingHTLCs) != 0 {
|
||||
|
||||
t.Fatalf("htlc's still active from bob's POV")
|
||||
}
|
||||
if len(bobChannel.localCommitChain.tip().incomingHTLCs) != 0 ||
|
||||
len(bobChannel.remoteCommitChain.tip().incomingHTLCs) != 0 {
|
||||
if len(bobChannel.commitChains.Local.tip().incomingHTLCs) != 0 ||
|
||||
len(bobChannel.commitChains.Remote.tip().incomingHTLCs) != 0 {
|
||||
|
||||
t.Fatalf("htlc's still active from bob's POV")
|
||||
}
|
||||
|
||||
|
@ -3241,39 +3263,39 @@ func TestChanSyncOweCommitment(t *testing.T) {
|
|||
// Alice's local log counter should be 4 and her HTLC index 3. She
|
||||
// should detect Bob's remote log counter as being 3 and his HTLC index
|
||||
// 3 as well.
|
||||
if aliceChannel.localUpdateLog.logIndex != 4 {
|
||||
if aliceChannel.updateLogs.Local.logIndex != 4 {
|
||||
t.Fatalf("incorrect log index: expected %v, got %v", 4,
|
||||
aliceChannel.localUpdateLog.logIndex)
|
||||
aliceChannel.updateLogs.Local.logIndex)
|
||||
}
|
||||
if aliceChannel.localUpdateLog.htlcCounter != 1 {
|
||||
if aliceChannel.updateLogs.Local.htlcCounter != 1 {
|
||||
t.Fatalf("incorrect htlc index: expected %v, got %v", 1,
|
||||
aliceChannel.localUpdateLog.htlcCounter)
|
||||
aliceChannel.updateLogs.Local.htlcCounter)
|
||||
}
|
||||
if aliceChannel.remoteUpdateLog.logIndex != 3 {
|
||||
if aliceChannel.updateLogs.Remote.logIndex != 3 {
|
||||
t.Fatalf("incorrect log index: expected %v, got %v", 3,
|
||||
aliceChannel.localUpdateLog.logIndex)
|
||||
aliceChannel.updateLogs.Local.logIndex)
|
||||
}
|
||||
if aliceChannel.remoteUpdateLog.htlcCounter != 3 {
|
||||
if aliceChannel.updateLogs.Remote.htlcCounter != 3 {
|
||||
t.Fatalf("incorrect htlc index: expected %v, got %v", 3,
|
||||
aliceChannel.localUpdateLog.htlcCounter)
|
||||
aliceChannel.updateLogs.Local.htlcCounter)
|
||||
}
|
||||
|
||||
// Bob should also have the same state, but mirrored.
|
||||
if bobChannel.localUpdateLog.logIndex != 3 {
|
||||
if bobChannel.updateLogs.Local.logIndex != 3 {
|
||||
t.Fatalf("incorrect log index: expected %v, got %v", 3,
|
||||
bobChannel.localUpdateLog.logIndex)
|
||||
bobChannel.updateLogs.Local.logIndex)
|
||||
}
|
||||
if bobChannel.localUpdateLog.htlcCounter != 3 {
|
||||
if bobChannel.updateLogs.Local.htlcCounter != 3 {
|
||||
t.Fatalf("incorrect htlc index: expected %v, got %v", 3,
|
||||
bobChannel.localUpdateLog.htlcCounter)
|
||||
bobChannel.updateLogs.Local.htlcCounter)
|
||||
}
|
||||
if bobChannel.remoteUpdateLog.logIndex != 4 {
|
||||
if bobChannel.updateLogs.Remote.logIndex != 4 {
|
||||
t.Fatalf("incorrect log index: expected %v, got %v", 4,
|
||||
bobChannel.localUpdateLog.logIndex)
|
||||
bobChannel.updateLogs.Local.logIndex)
|
||||
}
|
||||
if bobChannel.remoteUpdateLog.htlcCounter != 1 {
|
||||
if bobChannel.updateLogs.Remote.htlcCounter != 1 {
|
||||
t.Fatalf("incorrect htlc index: expected %v, got %v", 1,
|
||||
bobChannel.localUpdateLog.htlcCounter)
|
||||
bobChannel.updateLogs.Local.htlcCounter)
|
||||
}
|
||||
|
||||
// We'll conclude the test by having Bob settle Alice's HTLC, then
|
||||
|
@ -4503,7 +4525,7 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) {
|
|||
t.Helper()
|
||||
|
||||
expUpd := expFee + expAdd
|
||||
upd, fees := countLog(aliceChannel.localUpdateLog)
|
||||
upd, fees := countLog(aliceChannel.updateLogs.Local)
|
||||
if upd != expUpd {
|
||||
t.Fatalf("expected %d updates, found %d in Alice's "+
|
||||
"log", expUpd, upd)
|
||||
|
@ -4512,7 +4534,7 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) {
|
|||
t.Fatalf("expected %d fee updates, found %d in "+
|
||||
"Alice's log", expFee, fees)
|
||||
}
|
||||
upd, fees = countLog(bobChannel.remoteUpdateLog)
|
||||
upd, fees = countLog(bobChannel.updateLogs.Remote)
|
||||
if upd != expUpd {
|
||||
t.Fatalf("expected %d updates, found %d in Bob's log",
|
||||
expUpd, upd)
|
||||
|
@ -5207,9 +5229,11 @@ func TestChanCommitWeightDustHtlcs(t *testing.T) {
|
|||
// When sending htlcs we enforce the feebuffer on the commitment
|
||||
// transaction.
|
||||
remoteCommitWeight := func(lc *LightningChannel) lntypes.WeightUnit {
|
||||
remoteACKedIndex := lc.localCommitChain.tip().theirMessageIndex
|
||||
remoteACKedIndex :=
|
||||
lc.commitChains.Local.tip().messageIndices.Remote
|
||||
|
||||
htlcView := lc.fetchHTLCView(remoteACKedIndex,
|
||||
lc.localUpdateLog.logIndex)
|
||||
lc.updateLogs.Local.logIndex)
|
||||
|
||||
_, w := lc.availableCommitmentBalance(
|
||||
htlcView, lntypes.Remote, FeeBuffer,
|
||||
|
@ -5830,7 +5854,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) {
|
|||
// At this point, Alice's commitment chain should have a new pending
|
||||
// commit for Bob. We'll extract it so we can simulate Bob broadcasting
|
||||
// the commitment due to an issue.
|
||||
bobCommit := aliceChannel.remoteCommitChain.tip().txn
|
||||
bobCommit := aliceChannel.commitChains.Remote.tip().txn
|
||||
bobTxHash := bobCommit.TxHash()
|
||||
spendDetail := &chainntnfs.SpendDetail{
|
||||
SpenderTxHash: &bobTxHash,
|
||||
|
@ -6916,20 +6940,20 @@ func TestChannelRestoreUpdateLogs(t *testing.T) {
|
|||
|
||||
// compare all the logs between the old and new channels, to make sure
|
||||
// they all got restored properly.
|
||||
err = compareLogs(aliceChannel.localUpdateLog,
|
||||
newAliceChannel.localUpdateLog)
|
||||
err = compareLogs(aliceChannel.updateLogs.Local,
|
||||
newAliceChannel.updateLogs.Local)
|
||||
require.NoError(t, err, "alice local log not restored")
|
||||
|
||||
err = compareLogs(aliceChannel.remoteUpdateLog,
|
||||
newAliceChannel.remoteUpdateLog)
|
||||
err = compareLogs(aliceChannel.updateLogs.Remote,
|
||||
newAliceChannel.updateLogs.Remote)
|
||||
require.NoError(t, err, "alice remote log not restored")
|
||||
|
||||
err = compareLogs(bobChannel.localUpdateLog,
|
||||
newBobChannel.localUpdateLog)
|
||||
err = compareLogs(bobChannel.updateLogs.Local,
|
||||
newBobChannel.updateLogs.Local)
|
||||
require.NoError(t, err, "bob local log not restored")
|
||||
|
||||
err = compareLogs(bobChannel.remoteUpdateLog,
|
||||
newBobChannel.remoteUpdateLog)
|
||||
err = compareLogs(bobChannel.updateLogs.Remote,
|
||||
newBobChannel.updateLogs.Remote)
|
||||
require.NoError(t, err, "bob remote log not restored")
|
||||
}
|
||||
|
||||
|
@ -6962,8 +6986,9 @@ func assertInLog(t *testing.T, log *updateLog, numAdds, numFails int) {
|
|||
// the local and remote update log of the given channel.
|
||||
func assertInLogs(t *testing.T, channel *LightningChannel, numAddsLocal,
|
||||
numFailsLocal, numAddsRemote, numFailsRemote int) {
|
||||
assertInLog(t, channel.localUpdateLog, numAddsLocal, numFailsLocal)
|
||||
assertInLog(t, channel.remoteUpdateLog, numAddsRemote, numFailsRemote)
|
||||
|
||||
assertInLog(t, channel.updateLogs.Local, numAddsLocal, numFailsLocal)
|
||||
assertInLog(t, channel.updateLogs.Remote, numAddsRemote, numFailsRemote)
|
||||
}
|
||||
|
||||
// restoreAndAssert creates a new LightningChannel from the given channel's
|
||||
|
@ -6978,8 +7003,10 @@ func restoreAndAssert(t *testing.T, channel *LightningChannel, numAddsLocal,
|
|||
)
|
||||
require.NoError(t, err, "unable to create new channel")
|
||||
|
||||
assertInLog(t, newChannel.localUpdateLog, numAddsLocal, numFailsLocal)
|
||||
assertInLog(t, newChannel.remoteUpdateLog, numAddsRemote, numFailsRemote)
|
||||
assertInLog(t, newChannel.updateLogs.Local, numAddsLocal, numFailsLocal)
|
||||
assertInLog(
|
||||
t, newChannel.updateLogs.Remote, numAddsRemote, numFailsRemote,
|
||||
)
|
||||
}
|
||||
|
||||
// TestChannelRestoreUpdateLogsFailedHTLC runs through a scenario where an
|
||||
|
@ -7243,16 +7270,18 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
|||
|
||||
var pd *PaymentDescriptor
|
||||
if remoteLog {
|
||||
if newChannel.localUpdateLog.lookupHtlc(htlcIndex) != nil {
|
||||
h := newChannel.updateLogs.Local.lookupHtlc(htlcIndex)
|
||||
if h != nil {
|
||||
t.Fatalf("htlc found in wrong log")
|
||||
}
|
||||
pd = newChannel.remoteUpdateLog.lookupHtlc(htlcIndex)
|
||||
pd = newChannel.updateLogs.Remote.lookupHtlc(htlcIndex)
|
||||
|
||||
} else {
|
||||
if newChannel.remoteUpdateLog.lookupHtlc(htlcIndex) != nil {
|
||||
h := newChannel.updateLogs.Remote.lookupHtlc(htlcIndex)
|
||||
if h != nil {
|
||||
t.Fatalf("htlc found in wrong log")
|
||||
}
|
||||
pd = newChannel.localUpdateLog.lookupHtlc(htlcIndex)
|
||||
pd = newChannel.updateLogs.Local.lookupHtlc(htlcIndex)
|
||||
}
|
||||
if pd == nil {
|
||||
t.Fatalf("htlc not found in log")
|
||||
|
@ -7523,7 +7552,7 @@ func TestForceCloseBorkedState(t *testing.T) {
|
|||
|
||||
// We manually advance the commitment tail here since the above
|
||||
// ReceiveRevocation call will fail before it's actually advanced.
|
||||
aliceChannel.remoteCommitChain.advanceTail()
|
||||
aliceChannel.commitChains.Remote.advanceTail()
|
||||
_, err = aliceChannel.SignNextCommitment()
|
||||
if err != channeldb.ErrChanBorked {
|
||||
t.Fatalf("sign commitment should have failed: %v", err)
|
||||
|
@ -7739,7 +7768,7 @@ func TestIdealCommitFeeRate(t *testing.T) {
|
|||
maxFeeAlloc float64) chainfee.SatPerKWeight {
|
||||
|
||||
balance, weight := c.availableBalance(AdditionalHtlc)
|
||||
feeRate := c.localCommitChain.tip().feePerKw
|
||||
feeRate := c.commitChains.Local.tip().feePerKw
|
||||
currentFee := feeRate.FeeForWeight(weight)
|
||||
|
||||
maxBalance := balance.ToSatoshis() + currentFee
|
||||
|
@ -7756,7 +7785,7 @@ func TestIdealCommitFeeRate(t *testing.T) {
|
|||
// currentFeeRate calculates the current fee rate of the channel. The
|
||||
// ideal fee rate is floored at the current fee rate of the channel.
|
||||
currentFeeRate := func(c *LightningChannel) chainfee.SatPerKWeight {
|
||||
return c.localCommitChain.tip().feePerKw
|
||||
return c.commitChains.Local.tip().feePerKw
|
||||
}
|
||||
|
||||
// testCase definies the test cases when calculating the ideal fee rate
|
||||
|
@ -8190,16 +8219,18 @@ func TestFetchParent(t *testing.T) {
|
|||
// Create a lightning channel with newly initialized
|
||||
// local and remote logs.
|
||||
lc := LightningChannel{
|
||||
localUpdateLog: newUpdateLog(0, 0),
|
||||
remoteUpdateLog: newUpdateLog(0, 0),
|
||||
updateLogs: lntypes.Dual[*updateLog]{
|
||||
Local: newUpdateLog(0, 0),
|
||||
Remote: newUpdateLog(0, 0),
|
||||
},
|
||||
}
|
||||
|
||||
// Add the local and remote entries to update logs.
|
||||
for _, entry := range test.localEntries {
|
||||
lc.localUpdateLog.appendHtlc(entry)
|
||||
lc.updateLogs.Local.appendHtlc(entry)
|
||||
}
|
||||
for _, entry := range test.remoteEntries {
|
||||
lc.remoteUpdateLog.appendHtlc(entry)
|
||||
lc.updateLogs.Remote.appendHtlc(entry)
|
||||
}
|
||||
|
||||
parent, err := lc.fetchParent(
|
||||
|
@ -8526,23 +8557,25 @@ func TestEvaluateView(t *testing.T) {
|
|||
},
|
||||
|
||||
// Create update logs for local and remote.
|
||||
localUpdateLog: newUpdateLog(0, 0),
|
||||
remoteUpdateLog: newUpdateLog(0, 0),
|
||||
updateLogs: lntypes.Dual[*updateLog]{
|
||||
Local: newUpdateLog(0, 0),
|
||||
Remote: newUpdateLog(0, 0),
|
||||
},
|
||||
}
|
||||
|
||||
for _, htlc := range test.ourHtlcs {
|
||||
if htlc.EntryType == Add {
|
||||
lc.localUpdateLog.appendHtlc(htlc)
|
||||
lc.updateLogs.Local.appendHtlc(htlc)
|
||||
} else {
|
||||
lc.localUpdateLog.appendUpdate(htlc)
|
||||
lc.updateLogs.Local.appendUpdate(htlc)
|
||||
}
|
||||
}
|
||||
|
||||
for _, htlc := range test.theirHtlcs {
|
||||
if htlc.EntryType == Add {
|
||||
lc.remoteUpdateLog.appendHtlc(htlc)
|
||||
lc.updateLogs.Remote.appendHtlc(htlc)
|
||||
} else {
|
||||
lc.remoteUpdateLog.appendUpdate(htlc)
|
||||
lc.updateLogs.Remote.appendUpdate(htlc)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11099,7 +11132,7 @@ func TestBlindingPointPersistence(t *testing.T) {
|
|||
require.NoError(t, err, "unable to restart alice")
|
||||
|
||||
// Assert that the blinding point is restored from disk.
|
||||
remoteCommit := aliceChannel.remoteCommitChain.tip()
|
||||
remoteCommit := aliceChannel.commitChains.Remote.tip()
|
||||
require.Len(t, remoteCommit.outgoingHTLCs, 1)
|
||||
require.Equal(t, blinding,
|
||||
remoteCommit.outgoingHTLCs[0].BlindingPoint.UnwrapOrFailV(t))
|
||||
|
@ -11116,7 +11149,7 @@ func TestBlindingPointPersistence(t *testing.T) {
|
|||
require.NoError(t, err, "unable to restart bob's channel")
|
||||
|
||||
// Assert that Bob is able to recover the blinding point from disk.
|
||||
bobCommit := bobChannel.localCommitChain.tip()
|
||||
bobCommit := bobChannel.commitChains.Local.tip()
|
||||
require.Len(t, bobCommit.incomingHTLCs, 1)
|
||||
require.Equal(t, blinding,
|
||||
bobCommit.incomingHTLCs[0].BlindingPoint.UnwrapOrFailV(t))
|
||||
|
|
Loading…
Add table
Reference in a new issue