diff --git a/contractcourt/channel_arbitrator.go b/contractcourt/channel_arbitrator.go index 2bb0dbbc5..ce67908cd 100644 --- a/contractcourt/channel_arbitrator.go +++ b/contractcourt/channel_arbitrator.go @@ -932,21 +932,36 @@ func (c *ChannelArbitrator) stateStep( // arbitrating for. If a commitment has confirmed, then we'll // use the set snapshot from the chain, otherwise we'll use our // current set. - var htlcs map[HtlcSetKey]htlcSet - if confCommitSet != nil { - htlcs = confCommitSet.toActiveHTLCSets() - } else { + var ( + chainActions ChainActionMap + err error + ) + + // Normally if we force close the channel locally we will have + // no confCommitSet. However when the remote commitment confirms + // without us ever broadcasting our local commitment we need to + // make sure we cancel all upstream HTLCs for outgoing dust + // HTLCs as well hence we need to fetch the chain actions here + // as well. + if confCommitSet == nil { // Update the set of activeHTLCs so // checkLocalChainActions has an up-to-date view of the // commitments. c.updateActiveHTLCs() - htlcs = c.activeHTLCs - } - chainActions, err := c.checkLocalChainActions( - triggerHeight, trigger, htlcs, false, - ) - if err != nil { - return StateDefault, nil, err + htlcs := c.activeHTLCs + chainActions, err = c.checkLocalChainActions( + triggerHeight, trigger, htlcs, false, + ) + if err != nil { + return StateDefault, nil, err + } + } else { + chainActions, err = c.constructChainActions( + confCommitSet, triggerHeight, trigger, + ) + if err != nil { + return StateDefault, nil, err + } } // If there are no actions to be made, then we'll remain in the @@ -964,6 +979,25 @@ func (c *ChannelArbitrator) stateStep( log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v", c.cfg.ChanPoint, lnutils.SpewLogClosure(chainActions)) + // Cancel upstream HTLCs for all outgoing dust HTLCs available + // either on the local or the remote/remote pending commitment + // transaction. + dustHTLCs := chainActions[HtlcFailDustAction] + if len(dustHTLCs) > 0 { + log.Debugf("ChannelArbitrator(%v): canceling %v dust "+ + "HTLCs backwards", c.cfg.ChanPoint, + len(dustHTLCs)) + + getIdx := func(htlc channeldb.HTLC) uint64 { + return htlc.HtlcIndex + } + dustHTLCSet := fn.NewSet(fn.Map(getIdx, dustHTLCs)...) + err = c.abandonForwards(dustHTLCSet) + if err != nil { + return StateError, closeTx, err + } + } + // Depending on the type of trigger, we'll either "tunnel" // through to a farther state, or just proceed linearly to the // next state. @@ -1214,18 +1248,19 @@ func (c *ChannelArbitrator) stateStep( return StateError, closeTx, err } - // In case its a breach transaction we fail back all outgoing - // HTLCs on the remote commitment set. + // In case its a breach transaction we fail back all upstream + // HTLCs for their corresponding outgoing HTLCs on the remote + // commitment set (remote and remote pending set). if contractResolutions.BreachResolution != nil { // cancelBreachedHTLCs is a set which holds HTLCs whose // corresponding incoming HTLCs will be failed back // because the peer broadcasted an old state. cancelBreachedHTLCs := fn.NewSet[uint64]() - // We'll use the CommitSet, we'll fail back all outgoing - // HTLC's that exist on either of the remote - // commitments. The map is used to deduplicate any - // shared HTLC's. + // We'll use the CommitSet, we'll fail back all + // upstream HTLCs for their corresponding outgoing + // HTLC that exist on either of the remote commitments. + // The map is used to deduplicate any shared HTLC's. for htlcSetKey, htlcs := range confCommitSet.HtlcSets { if !htlcSetKey.IsRemote { continue @@ -1256,9 +1291,11 @@ func (c *ChannelArbitrator) stateStep( return StateError, closeTx, err } - // We can fail the upstream HTLC for an outgoing - // dangling and dust HTLC because we can be sure they - // will not be resolved onchain. + // We fail the upstream HTLCs for all remote pending + // outgoing HTLCs as soon as the commitment is + // confirmed. The upstream HTLCs for outgoing dust + // HTLCs have already been resolved before we reach + // this point. getIdx := func(htlc channeldb.HTLC) uint64 { return htlc.HtlcIndex } @@ -1269,15 +1306,6 @@ func (c *ChannelArbitrator) stateStep( if err != nil { return StateError, closeTx, err } - - dustHTLCs := fn.NewSet(fn.Map( - getIdx, htlcActions[HtlcFailDustAction], - )...) - - err = c.abandonForwards(dustHTLCs) - if err != nil { - return StateError, closeTx, err - } } // Now that we know we'll need to act, we'll process all the diff --git a/contractcourt/channel_arbitrator_test.go b/contractcourt/channel_arbitrator_test.go index 916cd5f58..ba78139ab 100644 --- a/contractcourt/channel_arbitrator_test.go +++ b/contractcourt/channel_arbitrator_test.go @@ -931,6 +931,24 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { t.Fatalf("no response received") } + // We expect an immediate resolution message for the outgoing dust htlc. + // It is not resolvable on-chain and it can be canceled back even before + // the commitment transaction confirmed. + select { + case msgs := <-chanArbCtx.resolutions: + if len(msgs) != 1 { + t.Fatalf("expected 1 message, instead got %v", + len(msgs)) + } + + if msgs[0].HtlcIndex != outgoingDustHtlc.HtlcIndex { + t.Fatalf("wrong htlc index: expected %v, got %v", + outgoingDustHtlc.HtlcIndex, msgs[0].HtlcIndex) + } + case <-time.After(defaultTimeout): + t.Fatalf("resolution msgs not sent") + } + // Now notify about the local force close getting confirmed. closeTx := &wire.MsgTx{ TxIn: []*wire.TxIn{ @@ -993,22 +1011,6 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { StateWaitingFullResolution, ) - // We expect an immediate resolution message for the outgoing dust htlc. - // It is not resolvable on-chain. - select { - case msgs := <-chanArbCtx.resolutions: - if len(msgs) != 1 { - t.Fatalf("expected 1 message, instead got %v", len(msgs)) - } - - if msgs[0].HtlcIndex != outgoingDustHtlc.HtlcIndex { - t.Fatalf("wrong htlc index: expected %v, got %v", - outgoingDustHtlc.HtlcIndex, msgs[0].HtlcIndex) - } - case <-time.After(defaultTimeout): - t.Fatalf("resolution msgs not sent") - } - // We'll grab the old notifier here as our resolvers are still holding // a reference to this instance, and a new one will be created when we // restart the channel arb below.