multi: coop close with active HTLCs on the channel

For the lncli cmd we now always initiate the coop close even if
there are active HTLCs on the channel. In case HTLCs are on the
channel and the coop close is initiated LND handles the closing
flow in the background and the lncli cmd will block until the
transaction is broadcasted to the mempool. In the background LND
disallows any new HTLCs and waits until all HTLCs are resolved
before kicking of the negotiation process.
Moreover if active HTLCs are present and the no_wait param is not
set the error msg is now highlightning it so the user can react
accordingly.
This commit is contained in:
ziggie 2025-02-13 22:16:08 +01:00
parent 319a0ee470
commit 59443faa36
No known key found for this signature in database
GPG key ID: 1AFF9C4DCED6D666
6 changed files with 2353 additions and 2273 deletions

View file

@ -1011,6 +1011,11 @@ var closeChannelCommand = cli.Command{
comparison is the end boundary of the fee negotiation, if not specified comparison is the end boundary of the fee negotiation, if not specified
it's always x3 of the starting value. Increasing this value increases it's always x3 of the starting value. Increasing this value increases
the chance of a successful negotiation. the chance of a successful negotiation.
Moreover if the channel has active HTLCs on it, the coop close will
wait until all HTLCs are resolved and will not allow any new HTLCs on
the channel. The channel will appear as disabled in the listchannels
output. The command will block in that case until the channel close tx
is broadcasted.
In the case of a cooperative closure, one can manually set the address In the case of a cooperative closure, one can manually set the address
to deliver funds to upon closure. This is optional, and may only be used to deliver funds to upon closure. This is optional, and may only be used
@ -1043,7 +1048,9 @@ var closeChannelCommand = cli.Command{
}, },
cli.BoolFlag{ cli.BoolFlag{
Name: "block", Name: "block",
Usage: "block until the channel is closed", Usage: `block will wait for the channel to be closed,
"meaning that it will wait for the channel close tx to
get 1 confirmation.`,
}, },
cli.Int64Flag{ cli.Int64Flag{
Name: "conf_target", Name: "conf_target",
@ -1117,6 +1124,9 @@ func closeChannel(ctx *cli.Context) error {
SatPerVbyte: ctx.Uint64(feeRateFlag), SatPerVbyte: ctx.Uint64(feeRateFlag),
DeliveryAddress: ctx.String("delivery_addr"), DeliveryAddress: ctx.String("delivery_addr"),
MaxFeePerVbyte: ctx.Uint64("max_fee_rate"), MaxFeePerVbyte: ctx.Uint64("max_fee_rate"),
// This makes sure that a coop close will also be executed if
// active HTLCs are present on the channel.
NoWait: true,
} }
// After parsing the request, we'll spin up a goroutine that will // After parsing the request, we'll spin up a goroutine that will
@ -1154,7 +1164,9 @@ func closeChannel(ctx *cli.Context) error {
// executeChannelClose attempts to close the channel from a request. The closing // executeChannelClose attempts to close the channel from a request. The closing
// transaction ID is sent through `txidChan` as soon as it is broadcasted to the // transaction ID is sent through `txidChan` as soon as it is broadcasted to the
// network. The block boolean is used to determine if we should block until the // network. The block boolean is used to determine if we should block until the
// closing transaction receives all of its required confirmations. // closing transaction receives a confirmation of 1 block. The logging outputs
// are sent to stderr to avoid conflicts with the JSON output of the command
// and potential work flows which depend on a proper JSON output.
func executeChannelClose(ctxc context.Context, client lnrpc.LightningClient, func executeChannelClose(ctxc context.Context, client lnrpc.LightningClient,
req *lnrpc.CloseChannelRequest, txidChan chan<- string, block bool) error { req *lnrpc.CloseChannelRequest, txidChan chan<- string, block bool) error {
@ -1173,9 +1185,17 @@ func executeChannelClose(ctxc context.Context, client lnrpc.LightningClient,
switch update := resp.Update.(type) { switch update := resp.Update.(type) {
case *lnrpc.CloseStatusUpdate_CloseInstant: case *lnrpc.CloseStatusUpdate_CloseInstant:
if req.NoWait { fmt.Fprintln(os.Stderr, "Channel close successfully "+
return nil "initiated")
pendingHtlcs := update.CloseInstant.NumPendingHtlcs
if pendingHtlcs > 0 {
fmt.Fprintf(os.Stderr, "Cooperative channel "+
"close waiting for %d HTLCs to be "+
"resolved before the close process "+
"can kick off\n", pendingHtlcs)
} }
case *lnrpc.CloseStatusUpdate_ClosePending: case *lnrpc.CloseStatusUpdate_ClosePending:
closingHash := update.ClosePending.Txid closingHash := update.ClosePending.Txid
txid, err := chainhash.NewHash(closingHash) txid, err := chainhash.NewHash(closingHash)
@ -1183,12 +1203,22 @@ func executeChannelClose(ctxc context.Context, client lnrpc.LightningClient,
return err return err
} }
fmt.Fprintf(os.Stderr, "Channel close transaction "+
"broadcasted: %v\n", txid)
txidChan <- txid.String() txidChan <- txid.String()
if !block { if !block {
return nil return nil
} }
fmt.Fprintln(os.Stderr, "Waiting for channel close "+
"confirmation ...")
case *lnrpc.CloseStatusUpdate_ChanClose: case *lnrpc.CloseStatusUpdate_ChanClose:
fmt.Fprintln(os.Stderr, "Channel close successfully "+
"confirmed")
return nil return nil
} }
} }

View file

@ -93,14 +93,17 @@ func coopCloseWithHTLCs(ht *lntest.HarnessTest) {
// closure is set up. Let's settle the invoice. // closure is set up. Let's settle the invoice.
alice.RPC.SettleInvoice(preimage[:]) alice.RPC.SettleInvoice(preimage[:])
// Pull the instant update off the wire to clear the path for the // Pull the instant update off the wire and make sure the number of
// close pending update. // pending HTLCs is as expected.
_, err := closeClient.Recv() update, err := closeClient.Recv()
require.NoError(ht, err) require.NoError(ht, err)
closeInstant := update.GetCloseInstant()
require.NotNil(ht, closeInstant)
require.Equal(ht, closeInstant.NumPendingHtlcs, int32(1))
// Wait for the next channel closure update. Now that we have settled // Wait for the next channel closure update. Now that we have settled
// the only HTLC this should be imminent. // the only HTLC this should be imminent.
update, err := closeClient.Recv() update, err = closeClient.Recv()
require.NoError(ht, err) require.NoError(ht, err)
// This next update should be a GetClosePending as it should be the // This next update should be a GetClosePending as it should be the

File diff suppressed because it is too large Load diff

View file

@ -2146,9 +2146,13 @@ message CloseChannelRequest {
// NOTE: This field is only respected if we're the initiator of the channel. // NOTE: This field is only respected if we're the initiator of the channel.
uint64 max_fee_per_vbyte = 7; uint64 max_fee_per_vbyte = 7;
// If true, then the rpc call will not block while it awaits a closing txid. // If true, then the rpc call will not block while it awaits a closing txid
// Consequently this RPC call will not return a closing txid if this value // to be broadcasted to the mempool. To obtain the closing tx one has to
// is set. // listen to the stream for the particular updates. Moreover if a coop close
// is specified and this flag is set to true the coop closing flow will be
// initiated even if HTLCs are active on the channel. The channel will wait
// until all HTLCs are resolved and then start the coop closing process. The
// channel will be disabled in the meantime and will disallow any new HTLCs.
bool no_wait = 8; bool no_wait = 8;
} }
@ -2166,6 +2170,10 @@ message PendingUpdate {
} }
message InstantUpdate { message InstantUpdate {
// The number of pending HTLCs that are currently active on the channel.
// These HTLCs need to be resolved before the channel can be closed
// cooperatively.
int32 num_pending_htlcs = 1;
} }
message ReadyForPsbtFunding { message ReadyForPsbtFunding {

View file

@ -878,7 +878,7 @@
}, },
{ {
"name": "no_wait", "name": "no_wait",
"description": "If true, then the rpc call will not block while it awaits a closing txid.\nConsequently this RPC call will not return a closing txid if this value\nis set.", "description": "If true, then the rpc call will not block while it awaits a closing txid\nto be broadcasted to the mempool. To obtain the closing tx one has to\nlisten to the stream for the particular updates. Moreover if a coop close\nis specified and this flag is set to true the coop closing flow will be\ninitiated even if HTLCs are active on the channel. The channel will wait\nuntil all HTLCs are resolved and then start the coop closing process. The\nchannel will be disabled in the meantime and will disallow any new HTLCs.",
"in": "query", "in": "query",
"required": false, "required": false,
"type": "boolean" "type": "boolean"
@ -5475,7 +5475,14 @@
"default": "INITIATOR_UNKNOWN" "default": "INITIATOR_UNKNOWN"
}, },
"lnrpcInstantUpdate": { "lnrpcInstantUpdate": {
"type": "object" "type": "object",
"properties": {
"num_pending_htlcs": {
"type": "integer",
"format": "int32",
"description": "The number of pending HTLCs that are currently active on the channel.\nThese HTLCs need to be resolved before the channel can be closed\ncooperatively."
}
}
}, },
"lnrpcInterceptFeedback": { "lnrpcInterceptFeedback": {
"type": "object", "type": "object",

View file

@ -2717,6 +2717,9 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
return err return err
} }
// Retrieve the number of active HTLCs on the channel.
activeHtlcs := channel.ActiveHtlcs()
// If a force closure was requested, then we'll handle all the details // If a force closure was requested, then we'll handle all the details
// around the creation and broadcast of the unilateral closure // around the creation and broadcast of the unilateral closure
// transaction here rather than going to the switch as we don't require // transaction here rather than going to the switch as we don't require
@ -2833,9 +2836,12 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
// If the user hasn't specified NoWait, then before we attempt // If the user hasn't specified NoWait, then before we attempt
// to close the channel we ensure there are no active HTLCs on // to close the channel we ensure there are no active HTLCs on
// the link. // the link.
if !in.NoWait && len(channel.ActiveHtlcs()) != 0 { if !in.NoWait && len(activeHtlcs) != 0 {
return fmt.Errorf("cannot co-op close channel " + return fmt.Errorf("cannot coop close channel with "+
"with active htlcs") "active htlcs (number of active htlcs: %d), "+
"bypass this check and initiate the coop "+
"close by setting no_wait=true",
len(activeHtlcs))
} }
// Otherwise, the caller has requested a regular interactive // Otherwise, the caller has requested a regular interactive
@ -2879,12 +2885,19 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
} }
// If the user doesn't want to wait for the txid to come back then we // If the user doesn't want to wait for the txid to come back then we
// will send an empty update to kick off the stream. // will send an empty update to kick off the stream. This is also used
// when active htlcs are still on the channel to give the client
// immediate feedback.
if in.NoWait { if in.NoWait {
rpcsLog.Trace("[closechannel] sending instant update") rpcsLog.Trace("[closechannel] sending instant update")
if err := updateStream.Send( if err := updateStream.Send(
//nolint:ll
&lnrpc.CloseStatusUpdate{ &lnrpc.CloseStatusUpdate{
Update: &lnrpc.CloseStatusUpdate_CloseInstant{}, Update: &lnrpc.CloseStatusUpdate_CloseInstant{
CloseInstant: &lnrpc.InstantUpdate{
NumPendingHtlcs: int32(len(activeHtlcs)),
},
},
}, },
); err != nil { ); err != nil {
return err return err