2021-12-04 12:23:56 +01:00
|
|
|
#include "config.h"
|
2019-09-10 05:27:51 +02:00
|
|
|
#include <ccan/cast/cast.h>
|
2023-07-27 23:37:52 +02:00
|
|
|
#include <ccan/mem/mem.h>
|
|
|
|
#include <ccan/tal/str/str.h>
|
2020-08-25 03:33:16 +02:00
|
|
|
#include <channeld/channeld_wiregen.h>
|
2019-08-23 23:34:52 +02:00
|
|
|
#include <common/json_command.h>
|
2022-07-04 05:49:38 +02:00
|
|
|
#include <common/json_param.h>
|
|
|
|
#include <common/json_stream.h>
|
2018-05-06 15:32:01 +02:00
|
|
|
#include <common/memleak.h>
|
2021-02-19 05:22:01 +01:00
|
|
|
#include <common/shutdown_scriptpubkey.h>
|
2021-09-16 07:00:42 +02:00
|
|
|
#include <common/type_to_string.h>
|
2018-08-17 07:06:35 +02:00
|
|
|
#include <common/wire_error.h>
|
2022-03-31 11:10:50 +02:00
|
|
|
#include <connectd/connectd_wiregen.h>
|
2018-02-20 21:59:09 +01:00
|
|
|
#include <errno.h>
|
2023-07-27 23:37:52 +02:00
|
|
|
#include <fcntl.h>
|
2023-08-07 07:51:40 +02:00
|
|
|
#include <hsmd/permissions.h>
|
2021-09-17 00:31:38 +02:00
|
|
|
#include <lightningd/chaintopology.h>
|
2021-09-16 07:00:42 +02:00
|
|
|
#include <lightningd/channel.h>
|
2018-02-20 21:59:09 +01:00
|
|
|
#include <lightningd/channel_control.h>
|
|
|
|
#include <lightningd/closing_control.h>
|
2020-04-02 03:58:07 +02:00
|
|
|
#include <lightningd/coin_mvts.h>
|
2023-10-22 06:07:31 +02:00
|
|
|
#include <lightningd/connect_control.h>
|
2021-03-12 01:19:40 +01:00
|
|
|
#include <lightningd/dual_open_control.h>
|
2022-01-24 21:03:52 +01:00
|
|
|
#include <lightningd/gossip_control.h>
|
2018-02-20 21:59:09 +01:00
|
|
|
#include <lightningd/hsm_control.h>
|
2020-10-09 00:21:20 +02:00
|
|
|
#include <lightningd/notification.h>
|
2018-02-20 21:59:09 +01:00
|
|
|
#include <lightningd/peer_control.h>
|
2022-01-11 02:13:59 +01:00
|
|
|
#include <lightningd/peer_fd.h>
|
2021-12-23 21:16:35 +01:00
|
|
|
#include <wally_bip32.h>
|
2023-07-27 23:37:52 +02:00
|
|
|
#include <wally_psbt.h>
|
|
|
|
|
|
|
|
struct splice_command {
|
|
|
|
/* Inside struct lightningd splice_commands. */
|
|
|
|
struct list_node list;
|
|
|
|
/* Command structure. This is the parent of the splice command. */
|
|
|
|
struct command *cmd;
|
|
|
|
/* Channel being spliced. */
|
|
|
|
struct channel *channel;
|
|
|
|
};
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2023-07-21 09:19:22 +02:00
|
|
|
void channel_update_feerates(struct lightningd *ld, const struct channel *channel)
|
2018-08-23 01:27:22 +02:00
|
|
|
{
|
2018-08-23 01:27:25 +02:00
|
|
|
u8 *msg;
|
2023-07-21 09:16:22 +02:00
|
|
|
u32 min_feerate, max_feerate;
|
2023-06-26 01:13:21 +02:00
|
|
|
bool anchors = channel_type_has_anchors(channel->type);
|
|
|
|
u32 feerate = unilateral_feerate(ld->topology, anchors);
|
2018-08-23 01:27:25 +02:00
|
|
|
|
|
|
|
/* Nothing to do if we don't know feerate. */
|
|
|
|
if (!feerate)
|
|
|
|
return;
|
|
|
|
|
2023-06-26 01:13:21 +02:00
|
|
|
/* For anchors, we just need the commitment tx to relay. */
|
|
|
|
if (anchors)
|
|
|
|
min_feerate = get_feerate_floor(ld->topology);
|
|
|
|
else
|
|
|
|
min_feerate = feerate_min(ld, NULL);
|
2023-07-21 09:16:22 +02:00
|
|
|
max_feerate = feerate_max(ld, NULL);
|
|
|
|
|
|
|
|
if (channel->ignore_fee_limits || ld->config.ignore_fee_limits) {
|
|
|
|
min_feerate = 1;
|
|
|
|
max_feerate = 0xFFFFFFFF;
|
|
|
|
}
|
2023-06-26 01:13:21 +02:00
|
|
|
|
2021-02-18 05:15:54 +01:00
|
|
|
log_debug(ld->log,
|
|
|
|
"update_feerates: feerate = %u, min=%u, max=%u, penalty=%u",
|
|
|
|
feerate,
|
2023-06-26 01:13:21 +02:00
|
|
|
min_feerate,
|
2021-02-18 05:15:54 +01:00
|
|
|
feerate_max(ld, NULL),
|
lightningd: clean up feerate handling, deprecate old terms.
Drop try_get_feerate() in favor of explicit feerate_for_deadline() and
smoothed_feerate_for_deadline().
This shows us everywhere we deal with old-style feerates by names.
`delayed_to_us` and `htlc_resolution` will be moving to dynamic fees,
so deprecate those.
Note that "penalty" is still used for generating penalty txs for
watchtowers, and "unilateral_close" still used until we get zero-fee
anchors.
Changelog-Added: JSON-RPC: `feerates` `estimates` array shows fee estimates by blockcount from underlying plugin (usually *bcli*).
Changelog-Changed: JSON-RPC: `close`, `fundchannel`, `fundpsbt`, `multifundchannel`, `multiwithdraw`, `txprepare`, `upgradewallet`, `withdraw` `feerate` (`feerange` for `close`) value *slow* is now 100 block-estimate, not half of 100-block estimate.
Changelog-Deprecated: JSON-RPC: `close`, `fundchannel`, `fundpsbt`, `multifundchannel`, `multiwithdraw`, `txprepare`, `upgradewallet`, `withdraw` `feerate` (`feerange` for `close`) expressed as, "delayed_to_us", "htlc_resolution", "max_acceptable" or "min_acceptable". Use explicit block counts or *slow*/*normal*/*urgent*/*minimum*.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2023-04-07 06:43:39 +02:00
|
|
|
penalty_feerate(ld->topology));
|
2021-02-18 05:15:54 +01:00
|
|
|
|
2020-08-25 03:33:16 +02:00
|
|
|
msg = towire_channeld_feerates(NULL, feerate,
|
2023-06-26 01:13:21 +02:00
|
|
|
min_feerate,
|
2023-07-21 09:16:22 +02:00
|
|
|
max_feerate,
|
lightningd: clean up feerate handling, deprecate old terms.
Drop try_get_feerate() in favor of explicit feerate_for_deadline() and
smoothed_feerate_for_deadline().
This shows us everywhere we deal with old-style feerates by names.
`delayed_to_us` and `htlc_resolution` will be moving to dynamic fees,
so deprecate those.
Note that "penalty" is still used for generating penalty txs for
watchtowers, and "unilateral_close" still used until we get zero-fee
anchors.
Changelog-Added: JSON-RPC: `feerates` `estimates` array shows fee estimates by blockcount from underlying plugin (usually *bcli*).
Changelog-Changed: JSON-RPC: `close`, `fundchannel`, `fundpsbt`, `multifundchannel`, `multiwithdraw`, `txprepare`, `upgradewallet`, `withdraw` `feerate` (`feerange` for `close`) value *slow* is now 100 block-estimate, not half of 100-block estimate.
Changelog-Deprecated: JSON-RPC: `close`, `fundchannel`, `fundpsbt`, `multifundchannel`, `multiwithdraw`, `txprepare`, `upgradewallet`, `withdraw` `feerate` (`feerange` for `close`) expressed as, "delayed_to_us", "htlc_resolution", "max_acceptable" or "min_acceptable". Use explicit block counts or *slow*/*normal*/*urgent*/*minimum*.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2023-04-07 06:43:39 +02:00
|
|
|
penalty_feerate(ld->topology));
|
2018-08-23 01:27:22 +02:00
|
|
|
subd_send_msg(channel->owner, take(msg));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void try_update_feerates(struct lightningd *ld, struct channel *channel)
|
|
|
|
{
|
|
|
|
/* No point until funding locked in */
|
2023-10-02 00:59:49 +02:00
|
|
|
if (!channel_state_fees_can_change(channel->state))
|
2018-08-23 01:27:22 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Can't if no daemon listening. */
|
|
|
|
if (!channel->owner)
|
|
|
|
return;
|
|
|
|
|
2023-07-21 09:19:22 +02:00
|
|
|
channel_update_feerates(ld, channel);
|
2018-08-23 01:27:22 +02:00
|
|
|
}
|
|
|
|
|
2021-06-22 20:25:59 +02:00
|
|
|
static void try_update_blockheight(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
u32 blockheight)
|
|
|
|
{
|
|
|
|
u8 *msg;
|
|
|
|
|
2023-10-17 01:48:44 +02:00
|
|
|
/* We don't update the blockheight for non-leased chans */
|
|
|
|
if (channel->lease_expiry == 0)
|
|
|
|
return;
|
|
|
|
|
2021-07-02 22:25:40 +02:00
|
|
|
log_debug(channel->log, "attempting update blockheight %s",
|
|
|
|
type_to_string(tmpctx, struct channel_id, &channel->cid));
|
|
|
|
|
2023-07-24 07:13:38 +02:00
|
|
|
if (!topology_synced(ld->topology)) {
|
|
|
|
log_debug(channel->log, "chain not synced,"
|
|
|
|
" not updating blockheight");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-07-02 22:25:40 +02:00
|
|
|
/* If they're offline, check that we're not too far behind anyway */
|
|
|
|
if (!channel->owner) {
|
2023-10-17 01:48:44 +02:00
|
|
|
if (channel->opener == REMOTE) {
|
2021-07-02 22:25:40 +02:00
|
|
|
u32 peer_height
|
|
|
|
= get_blockheight(channel->blockheight_states,
|
|
|
|
channel->opener, REMOTE);
|
|
|
|
|
|
|
|
/* Lease no longer active, we don't (really) care */
|
|
|
|
if (peer_height >= channel->lease_expiry)
|
|
|
|
return;
|
|
|
|
|
|
|
|
assert(peer_height + 1008 > peer_height);
|
|
|
|
if (peer_height + 1008 < blockheight)
|
|
|
|
channel_fail_permanent(channel,
|
|
|
|
REASON_PROTOCOL,
|
|
|
|
"Offline peer is too"
|
|
|
|
" far behind,"
|
|
|
|
" terminating leased"
|
|
|
|
" channel. Our current"
|
|
|
|
" %u, theirs %u",
|
|
|
|
blockheight,
|
|
|
|
peer_height);
|
|
|
|
}
|
2021-06-22 20:25:59 +02:00
|
|
|
return;
|
2021-07-02 22:25:40 +02:00
|
|
|
}
|
2021-06-22 20:25:59 +02:00
|
|
|
|
2021-07-02 22:25:40 +02:00
|
|
|
/* If we're not opened/locked in yet, don't send update */
|
2023-10-17 01:48:28 +02:00
|
|
|
if (!channel_state_can_add_htlc(channel->state))
|
2021-06-22 20:25:59 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
log_debug(ld->log, "update_blockheight: height = %u", blockheight);
|
|
|
|
|
|
|
|
msg = towire_channeld_blockheight(NULL, blockheight);
|
|
|
|
subd_send_msg(channel->owner, take(msg));
|
|
|
|
}
|
|
|
|
|
2018-08-23 01:27:22 +02:00
|
|
|
void notify_feerate_change(struct lightningd *ld)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
2018-08-23 01:27:22 +02:00
|
|
|
|
2023-01-18 06:04:32 +01:00
|
|
|
for (peer = peer_node_id_map_first(ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(ld->peers, &it)) {
|
2022-03-22 21:30:54 +01:00
|
|
|
struct channel *channel;
|
2018-08-23 01:27:22 +02:00
|
|
|
|
2022-03-22 21:30:54 +01:00
|
|
|
list_for_each(&peer->channels, channel, list)
|
|
|
|
try_update_feerates(ld, channel);
|
2018-08-23 01:27:22 +02:00
|
|
|
}
|
2022-03-22 21:30:54 +01:00
|
|
|
|
|
|
|
/* FIXME: We choose not to drop to chain if we can't contact
|
|
|
|
* peer. We *could* do so, however. */
|
2018-08-23 01:27:22 +02:00
|
|
|
}
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
static struct splice_command *splice_command_for_chan(struct lightningd *ld,
|
|
|
|
struct channel *channel)
|
|
|
|
{
|
|
|
|
struct splice_command *cc;
|
|
|
|
|
|
|
|
list_for_each(&ld->splice_commands, cc, list)
|
|
|
|
if (channel == cc->channel)
|
|
|
|
return cc;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_splice_funding_error(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct splice_command *cc;
|
|
|
|
struct amount_msat funding, req_funding;
|
|
|
|
bool opener_error;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_splice_funding_error(msg, &funding,
|
|
|
|
&req_funding,
|
|
|
|
&opener_error)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad channeld_splice_feerate_error %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
cc = splice_command_for_chan(ld, channel);
|
|
|
|
if (cc) {
|
2023-09-19 23:35:57 +02:00
|
|
|
was_pending(command_fail(cc->cmd, SPLICE_FUNDING_LOW,
|
|
|
|
"%s provided %s but committed to %s.",
|
|
|
|
opener_error ? "You" : "Peer",
|
|
|
|
fmt_amount_msat(tmpctx, funding),
|
|
|
|
fmt_amount_msat(tmpctx, req_funding)));
|
2023-07-27 23:37:52 +02:00
|
|
|
}
|
2023-09-19 23:35:57 +02:00
|
|
|
else {
|
2023-07-27 23:37:52 +02:00
|
|
|
log_peer_unusual(ld->log, &channel->peer->id,
|
|
|
|
"Splice funding too low. %s provided but %s"
|
|
|
|
" commited to %s",
|
|
|
|
opener_error ? "peer" : "you",
|
|
|
|
fmt_amount_msat(tmpctx, funding),
|
|
|
|
fmt_amount_msat(tmpctx, req_funding));
|
2023-09-19 23:35:57 +02:00
|
|
|
}
|
2023-07-27 23:37:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_splice_state_error(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct splice_command *cc;
|
|
|
|
char *error_msg;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_splice_state_error(tmpctx, msg, &error_msg)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad channeld_splice_state_error %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
cc = splice_command_for_chan(ld, channel);
|
2023-09-19 23:35:57 +02:00
|
|
|
if (cc)
|
|
|
|
was_pending(command_fail(cc->cmd, SPLICE_STATE_ERROR,
|
|
|
|
"%s", error_msg));
|
2023-07-27 23:37:52 +02:00
|
|
|
else
|
|
|
|
log_peer_unusual(ld->log, &channel->peer->id,
|
|
|
|
"Splice state error: %s", error_msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_splice_feerate_error(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct splice_command *cc;
|
|
|
|
struct amount_msat fee;
|
|
|
|
bool too_high;
|
2023-09-19 23:35:57 +02:00
|
|
|
char *error_msg;
|
2023-07-27 23:37:52 +02:00
|
|
|
|
|
|
|
if (!fromwire_channeld_splice_feerate_error(msg, &fee, &too_high)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad fromwire_channeld_splice_feerate_error %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
cc = splice_command_for_chan(ld, channel);
|
|
|
|
if (cc) {
|
|
|
|
if (too_high)
|
2023-09-19 23:35:57 +02:00
|
|
|
error_msg = tal_fmt(tmpctx, "Feerate too high. Do you "
|
|
|
|
"really want to spend %s on fees?",
|
|
|
|
fmt_amount_msat(tmpctx, fee));
|
2023-07-27 23:37:52 +02:00
|
|
|
else
|
2023-09-19 23:35:57 +02:00
|
|
|
error_msg = tal_fmt(tmpctx, "Feerate too low. Your "
|
|
|
|
"funding only provided %s in fees",
|
|
|
|
fmt_amount_msat(tmpctx, fee));
|
2023-07-27 23:37:52 +02:00
|
|
|
|
2023-09-19 23:35:57 +02:00
|
|
|
was_pending(command_fail(cc->cmd,
|
|
|
|
too_high ? SPLICE_HIGH_FEE : SPLICE_LOW_FEE,
|
|
|
|
"%s", error_msg));
|
2023-07-27 23:37:52 +02:00
|
|
|
}
|
2023-09-19 23:35:57 +02:00
|
|
|
else {
|
2023-07-27 23:37:52 +02:00
|
|
|
log_peer_unusual(ld->log, &channel->peer->id, "Peer gave us a"
|
|
|
|
" splice pkg with too low of feerate (fee was"
|
|
|
|
" %s), we rejected it.",
|
|
|
|
fmt_amount_msat(tmpctx, fee));
|
2023-09-19 23:35:57 +02:00
|
|
|
}
|
2023-07-27 23:37:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* When channeld finishes processing the `splice_init` command, this is called */
|
|
|
|
static void handle_splice_confirmed_init(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct splice_command *cc;
|
|
|
|
struct wally_psbt *psbt;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_splice_confirmed_init(tmpctx, msg, &psbt)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad splice_confirmed_init %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
cc = splice_command_for_chan(ld, channel);
|
|
|
|
if (!cc) {
|
|
|
|
channel_internal_error(channel, "splice_confirmed_init"
|
|
|
|
" received without an active command %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct json_stream *response = json_stream_success(cc->cmd);
|
|
|
|
json_add_string(response, "psbt", psbt_to_b64(tmpctx, psbt));
|
|
|
|
|
|
|
|
was_pending(command_success(cc->cmd, response));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Channeld sends us this in response to a user's `splice_update` request */
|
|
|
|
static void handle_splice_confirmed_update(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct splice_command *cc;
|
|
|
|
struct wally_psbt *psbt;
|
|
|
|
bool commitments_secured;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_splice_confirmed_update(tmpctx,
|
|
|
|
msg,
|
|
|
|
&psbt,
|
|
|
|
&commitments_secured)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad splice_confirmed_update %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
cc = splice_command_for_chan(ld, channel);
|
|
|
|
if (!cc) {
|
|
|
|
channel_internal_error(channel, "splice_update_confirmed"
|
|
|
|
" received without an active command %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct json_stream *response = json_stream_success(cc->cmd);
|
|
|
|
json_add_string(response, "psbt", psbt_to_b64(tmpctx, psbt));
|
|
|
|
json_add_bool(response, "commitments_secured", commitments_secured);
|
|
|
|
|
|
|
|
was_pending(command_success(cc->cmd, response));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Channeld uses this to request the funding transaction for help building the
|
|
|
|
* splice tx */
|
|
|
|
static void handle_splice_lookup_tx(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct bitcoin_txid txid;
|
|
|
|
struct bitcoin_tx *tx;
|
|
|
|
u8 *outmsg;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_splice_lookup_tx(msg, &txid)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad splice_lookup_tx %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tx = wallet_transaction_get(tmpctx, ld->wallet, &txid);
|
|
|
|
|
|
|
|
if (!tx) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"channel control unable to find txid %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct bitcoin_txid,
|
|
|
|
&txid));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
outmsg = towire_channeld_splice_lookup_tx_result(NULL, tx);
|
|
|
|
subd_send_msg(channel->owner, take(outmsg));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Extra splice data we want to store for bitcoin send tx interface */
|
|
|
|
struct send_splice_info
|
|
|
|
{
|
|
|
|
struct splice_command *cc;
|
|
|
|
struct channel *channel;
|
|
|
|
const struct bitcoin_tx *final_tx;
|
|
|
|
u32 output_index;
|
|
|
|
const char *err_msg;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void handle_tx_broadcast(struct send_splice_info *info)
|
|
|
|
{
|
|
|
|
struct lightningd *ld = info->channel->peer->ld;
|
|
|
|
struct amount_sat unused;
|
|
|
|
struct json_stream *response;
|
|
|
|
struct bitcoin_txid txid;
|
|
|
|
u8 *tx_bytes;
|
|
|
|
int num_utxos;
|
|
|
|
|
|
|
|
tx_bytes = linearize_tx(tmpctx, info->final_tx);
|
|
|
|
bitcoin_txid(info->final_tx, &txid);
|
|
|
|
|
|
|
|
/* This might have spent UTXOs from our wallet */
|
|
|
|
num_utxos = wallet_extract_owned_outputs(ld->wallet,
|
|
|
|
info->final_tx->wtx, false,
|
|
|
|
NULL, &unused);
|
|
|
|
if (num_utxos)
|
|
|
|
wallet_transaction_add(ld->wallet, info->final_tx->wtx, 0, 0);
|
|
|
|
|
|
|
|
if (info->cc) {
|
|
|
|
response = json_stream_success(info->cc->cmd);
|
|
|
|
|
|
|
|
json_add_hex(response, "tx", tx_bytes, tal_bytelen(tx_bytes));
|
|
|
|
json_add_txid(response, "txid", &txid);
|
|
|
|
|
|
|
|
was_pending(command_success(info->cc->cmd, response));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Succeeds if the utxo was found in the mempool or in the utxo set. If it's in
|
|
|
|
* a block and spent it will fail but we're okay with that here. */
|
|
|
|
static void check_utxo_block(struct bitcoind *bitcoind UNUSED,
|
|
|
|
const struct bitcoin_tx_output *txout,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
struct send_splice_info *info = arg;
|
|
|
|
|
|
|
|
if(!txout) {
|
|
|
|
if (info->cc)
|
|
|
|
was_pending(command_fail(info->cc->cmd,
|
|
|
|
SPLICE_BROADCAST_FAIL,
|
|
|
|
"Error broadcasting splice "
|
|
|
|
"tx: %s. Unsent tx discarded "
|
|
|
|
"%s.",
|
|
|
|
info->err_msg,
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct wally_tx,
|
|
|
|
info->final_tx->wtx)));
|
|
|
|
|
|
|
|
log_unusual(info->channel->log,
|
|
|
|
"Error broadcasting splice "
|
|
|
|
"tx: %s. Unsent tx discarded "
|
|
|
|
"%s.",
|
|
|
|
info->err_msg,
|
|
|
|
type_to_string(tmpctx, struct wally_tx,
|
|
|
|
info->final_tx->wtx));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
handle_tx_broadcast(info);
|
|
|
|
|
|
|
|
tal_free(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Callback for after the splice tx is sent to bitcoind */
|
|
|
|
static void send_splice_tx_done(struct bitcoind *bitcoind UNUSED,
|
|
|
|
bool success, const char *msg,
|
|
|
|
struct send_splice_info *info)
|
|
|
|
{
|
|
|
|
/* A NULL value of `info->cc` means we got here without user intiation.
|
|
|
|
* This means we are the ACCEPTER side of the splice */
|
|
|
|
struct lightningd *ld = info->channel->peer->ld;
|
|
|
|
struct bitcoin_outpoint outpoint;
|
|
|
|
|
|
|
|
bitcoin_txid(info->final_tx, &outpoint.txid);
|
|
|
|
outpoint.n = info->output_index;
|
|
|
|
|
|
|
|
if (!success) {
|
|
|
|
info->err_msg = tal_strdup(info, msg);
|
|
|
|
bitcoind_getutxout(ld->topology->bitcoind, &outpoint,
|
|
|
|
check_utxo_block, info);
|
|
|
|
} else {
|
|
|
|
handle_tx_broadcast(info);
|
|
|
|
tal_free(info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Where the splice tx gets finally transmitted to the chain */
|
|
|
|
static void send_splice_tx(struct channel *channel,
|
|
|
|
const struct bitcoin_tx *tx,
|
|
|
|
struct splice_command *cc,
|
|
|
|
u32 output_index)
|
|
|
|
{
|
|
|
|
struct lightningd *ld = channel->peer->ld;
|
|
|
|
u8* tx_bytes = linearize_tx(tmpctx, tx);
|
|
|
|
|
|
|
|
log_debug(channel->log,
|
|
|
|
"Broadcasting splice tx %s for channel %s.",
|
|
|
|
tal_hex(tmpctx, tx_bytes),
|
|
|
|
type_to_string(tmpctx, struct channel_id, &channel->cid));
|
|
|
|
|
|
|
|
struct send_splice_info *info = tal(NULL, struct send_splice_info);
|
|
|
|
|
|
|
|
info->cc = tal_steal(info, cc);
|
|
|
|
info->channel = channel;
|
|
|
|
info->final_tx = tal_steal(info, tx);
|
|
|
|
info->output_index = output_index;
|
|
|
|
info->err_msg = NULL;
|
|
|
|
|
|
|
|
bitcoind_sendrawtx(ld->topology->bitcoind,
|
2023-10-24 03:41:30 +02:00
|
|
|
ld->topology->bitcoind,
|
2023-07-27 23:37:52 +02:00
|
|
|
cc ? cc->cmd->id : NULL,
|
|
|
|
tal_hex(tmpctx, tx_bytes),
|
|
|
|
false,
|
|
|
|
send_splice_tx_done, info);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* After user signs PSBT with splice_signed, our node goes through the signing
|
|
|
|
* process (adding it's own signatures and peers' sigs), sending the result to
|
|
|
|
* us here: */
|
|
|
|
static void handle_splice_confirmed_signed(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct splice_command *cc;
|
|
|
|
struct bitcoin_tx *tx;
|
|
|
|
struct bitcoin_txid txid;
|
|
|
|
struct channel_inflight *inflight;
|
|
|
|
u32 output_index;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_splice_confirmed_signed(tmpctx, msg, &tx, &output_index)) {
|
|
|
|
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad splice_confirmed_signed %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bitcoin_txid(tx, &txid);
|
|
|
|
inflight = channel_inflight_find(channel, &txid);
|
|
|
|
if (!inflight)
|
|
|
|
channel_internal_error(channel, "Unable to load inflight for"
|
|
|
|
" splice_confirmed_signed txid %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct bitcoin_txid,
|
|
|
|
&txid));
|
|
|
|
|
|
|
|
inflight->remote_tx_sigs = true;
|
|
|
|
wallet_inflight_save(ld->wallet, inflight);
|
|
|
|
|
|
|
|
if (channel->state != CHANNELD_NORMAL) {
|
|
|
|
log_debug(channel->log,
|
|
|
|
"Would broadcast splice, but state %s"
|
|
|
|
" isn't CHANNELD_NORMAL",
|
|
|
|
channel_state_name(channel));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
cc = splice_command_for_chan(ld, channel);
|
|
|
|
/* If matching user command found, this was a user intiated splice */
|
|
|
|
channel_set_state(channel,
|
|
|
|
CHANNELD_NORMAL,
|
|
|
|
CHANNELD_AWAITING_SPLICE,
|
|
|
|
cc ? REASON_USER : REASON_REMOTE,
|
|
|
|
"Broadcasting splice");
|
|
|
|
|
|
|
|
send_splice_tx(channel, tx, cc, output_index);
|
|
|
|
}
|
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
bool depthcb_update_scid(struct channel *channel,
|
2023-10-02 00:59:52 +02:00
|
|
|
const struct bitcoin_txid *txid,
|
|
|
|
const struct bitcoin_outpoint *outpoint)
|
2023-10-02 00:59:51 +02:00
|
|
|
{
|
|
|
|
struct txlocator *loc;
|
|
|
|
struct lightningd *ld = channel->peer->ld;
|
|
|
|
struct short_channel_id scid;
|
|
|
|
|
|
|
|
/* What scid is this giving us? */
|
|
|
|
loc = wallet_transaction_locate(tmpctx, ld->wallet, txid);
|
|
|
|
if (!mk_short_channel_id(&scid,
|
|
|
|
loc->blkheight, loc->index,
|
2023-10-02 00:59:52 +02:00
|
|
|
outpoint->n)) {
|
2023-10-02 00:59:51 +02:00
|
|
|
channel_fail_permanent(channel,
|
|
|
|
REASON_LOCAL,
|
|
|
|
"Invalid funding scid %u:%u:%u",
|
|
|
|
loc->blkheight, loc->index,
|
2023-10-02 00:59:52 +02:00
|
|
|
outpoint->n);
|
2023-10-02 00:59:51 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!channel->scid) {
|
2023-10-02 00:59:52 +02:00
|
|
|
wallet_annotate_txout(ld->wallet, outpoint,
|
2023-10-02 00:59:51 +02:00
|
|
|
TX_CHANNEL_FUNDING, channel->dbid);
|
|
|
|
channel->scid = tal_dup(channel, struct short_channel_id, &scid);
|
|
|
|
|
|
|
|
/* If we have a zeroconf channel, i.e., no scid yet
|
|
|
|
* but have exchange `channel_ready` messages, then we
|
|
|
|
* need to fire a second time, in order to trigger the
|
|
|
|
* `coin_movement` event. This is a subset of the
|
|
|
|
* `lockin_complete` function called from
|
|
|
|
* AWAITING_LOCKIN->NORMAL otherwise. */
|
|
|
|
if (channel->minimum_depth == 0)
|
|
|
|
lockin_has_completed(channel, false);
|
|
|
|
|
|
|
|
wallet_channel_save(ld->wallet, channel);
|
|
|
|
} else if (!short_channel_id_eq(channel->scid, &scid)) {
|
|
|
|
/* We freaked out if required when original was
|
|
|
|
* removed, so just update now */
|
|
|
|
log_info(channel->log, "Short channel id changed from %s->%s",
|
|
|
|
type_to_string(tmpctx, struct short_channel_id, channel->scid),
|
|
|
|
type_to_string(tmpctx, struct short_channel_id, &scid));
|
|
|
|
*channel->scid = scid;
|
|
|
|
wallet_channel_save(ld->wallet, channel);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum watch_result splice_depth_cb(struct lightningd *ld,
|
|
|
|
const struct bitcoin_txid *txid,
|
|
|
|
const struct bitcoin_tx *tx,
|
|
|
|
unsigned int depth,
|
|
|
|
struct channel_inflight *inflight)
|
|
|
|
{
|
|
|
|
/* Usually, we're here because we're awaiting a splice, but
|
|
|
|
* we could also mutual shutdown, or that weird splice_locked_memonly
|
|
|
|
* hack... */
|
|
|
|
if (inflight->channel->state != CHANNELD_AWAITING_SPLICE)
|
|
|
|
return DELETE_WATCH;
|
|
|
|
|
|
|
|
/* Reorged out? OK, we're not committed yet. */
|
|
|
|
if (depth == 0)
|
|
|
|
return KEEP_WATCHING;
|
|
|
|
|
2023-10-02 00:59:52 +02:00
|
|
|
if (!depthcb_update_scid(inflight->channel, txid, &inflight->funding->outpoint))
|
2023-10-02 00:59:51 +02:00
|
|
|
return DELETE_WATCH;
|
|
|
|
|
|
|
|
if (inflight->channel->owner) {
|
|
|
|
subd_send_msg(inflight->channel->owner,
|
|
|
|
take(towire_channeld_funding_depth(
|
|
|
|
NULL, inflight->channel->scid,
|
|
|
|
inflight->channel->alias[LOCAL],
|
|
|
|
depth, true, txid)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* channeld will tell us when splice is locked in: we'll clean
|
|
|
|
* this watch up then. */
|
|
|
|
return KEEP_WATCHING;
|
|
|
|
}
|
|
|
|
|
|
|
|
void watch_splice_inflight(struct lightningd *ld,
|
|
|
|
struct channel_inflight *inflight)
|
|
|
|
{
|
|
|
|
watch_txid(inflight, ld->topology,
|
|
|
|
&inflight->funding->outpoint.txid,
|
|
|
|
splice_depth_cb, inflight);
|
|
|
|
}
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
static void handle_add_inflight(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct bitcoin_outpoint outpoint;
|
|
|
|
u32 feerate;
|
|
|
|
struct amount_sat satoshis;
|
|
|
|
s64 splice_amnt;
|
|
|
|
struct wally_psbt *psbt;
|
|
|
|
struct channel_inflight *inflight;
|
|
|
|
struct bitcoin_signature last_sig;
|
|
|
|
bool i_am_initiator;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_add_inflight(tmpctx,
|
|
|
|
msg,
|
|
|
|
&outpoint.txid,
|
|
|
|
&outpoint.n,
|
|
|
|
&feerate,
|
|
|
|
&satoshis,
|
|
|
|
&splice_amnt,
|
|
|
|
&psbt,
|
|
|
|
&i_am_initiator)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad channel_add_inflight %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FIXME: DTODO: Use a pointer to a sig instead of zero'ing one out. */
|
|
|
|
memset(&last_sig, 0, sizeof(last_sig));
|
|
|
|
|
|
|
|
inflight = new_inflight(channel,
|
|
|
|
&outpoint,
|
|
|
|
feerate,
|
|
|
|
satoshis,
|
|
|
|
channel->our_funds,
|
|
|
|
psbt,
|
|
|
|
NULL,
|
|
|
|
last_sig,
|
|
|
|
channel->lease_expiry,
|
|
|
|
channel->lease_commit_sig,
|
|
|
|
channel->lease_chan_max_msat,
|
|
|
|
channel->lease_chan_max_ppt,
|
|
|
|
0,
|
|
|
|
AMOUNT_MSAT(0),
|
|
|
|
AMOUNT_SAT(0),
|
|
|
|
splice_amnt,
|
|
|
|
i_am_initiator);
|
|
|
|
|
|
|
|
log_debug(channel->log, "lightningd adding inflight with txid %s",
|
|
|
|
type_to_string(tmpctx, struct bitcoin_txid,
|
|
|
|
&inflight->funding->outpoint.txid));
|
|
|
|
|
|
|
|
wallet_inflight_add(ld->wallet, inflight);
|
2023-10-02 00:59:51 +02:00
|
|
|
watch_splice_inflight(ld, inflight);
|
2023-07-27 23:37:52 +02:00
|
|
|
|
|
|
|
subd_send_msg(channel->owner, take(towire_channeld_got_inflight(NULL)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_update_inflight(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
struct channel_inflight *inflight;
|
|
|
|
struct wally_psbt *psbt;
|
|
|
|
struct bitcoin_txid txid;
|
|
|
|
struct bitcoin_tx *last_tx;
|
|
|
|
struct bitcoin_signature *last_sig;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_update_inflight(tmpctx, msg, &psbt, &last_tx,
|
|
|
|
&last_sig)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad channel_add_inflight %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
psbt_txid(tmpctx, psbt, &txid, NULL);
|
|
|
|
inflight = channel_inflight_find(channel, &txid);
|
|
|
|
if (!inflight)
|
|
|
|
channel_internal_error(channel, "Unable to load inflight for"
|
|
|
|
" update_inflight txid %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct bitcoin_txid,
|
|
|
|
&txid));
|
|
|
|
|
|
|
|
if (!!last_tx != !!last_sig)
|
|
|
|
channel_internal_error(channel, "Must set last_tx and last_sig"
|
|
|
|
" together at the same time for"
|
|
|
|
" update_inflight txid %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct bitcoin_txid,
|
|
|
|
&txid));
|
|
|
|
|
2023-08-16 04:58:53 +02:00
|
|
|
if (last_tx) {
|
|
|
|
tal_free(inflight->last_tx);
|
|
|
|
inflight->last_tx = clone_bitcoin_tx(inflight, last_tx);
|
|
|
|
}
|
2023-07-27 23:37:52 +02:00
|
|
|
|
|
|
|
if (last_sig)
|
|
|
|
inflight->last_sig = *last_sig;
|
|
|
|
|
|
|
|
tal_wally_start();
|
|
|
|
if (wally_psbt_combine(inflight->funding_psbt, psbt) != WALLY_OK) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"Unable to combine PSBTs: %s, %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct wally_psbt,
|
|
|
|
inflight->funding_psbt),
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct wally_psbt,
|
|
|
|
psbt));
|
|
|
|
tal_wally_end(inflight->funding_psbt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tal_wally_end(inflight->funding_psbt);
|
|
|
|
|
|
|
|
psbt_finalize(inflight->funding_psbt);
|
|
|
|
wallet_inflight_save(ld->wallet, inflight);
|
|
|
|
}
|
|
|
|
|
2022-07-19 21:35:56 +02:00
|
|
|
void channel_record_open(struct channel *channel, u32 blockheight, bool record_push)
|
2020-04-02 03:58:07 +02:00
|
|
|
{
|
|
|
|
struct chain_coin_mvt *mvt;
|
2021-12-01 16:32:55 +01:00
|
|
|
struct amount_msat start_balance;
|
2021-12-08 18:42:07 +01:00
|
|
|
bool is_pushed = !amount_msat_zero(channel->push);
|
|
|
|
bool is_leased = channel->lease_expiry > 0;
|
2020-04-02 03:58:07 +02:00
|
|
|
|
2021-12-08 18:42:07 +01:00
|
|
|
/* If funds were pushed, add/sub them from the starting balance */
|
2022-02-08 21:39:50 +01:00
|
|
|
if (channel->opener == LOCAL) {
|
|
|
|
if (!amount_msat_add(&start_balance,
|
|
|
|
channel->our_msat, channel->push))
|
|
|
|
fatal("Unable to add push_msat (%s) + our_msat (%s)",
|
|
|
|
type_to_string(tmpctx, struct amount_msat,
|
|
|
|
&channel->push),
|
|
|
|
type_to_string(tmpctx, struct amount_msat,
|
|
|
|
&channel->our_msat));
|
|
|
|
} else {
|
|
|
|
if (!amount_msat_sub(&start_balance,
|
|
|
|
channel->our_msat, channel->push))
|
|
|
|
fatal("Unable to sub our_msat (%s) - push (%s)",
|
|
|
|
type_to_string(tmpctx, struct amount_msat,
|
|
|
|
&channel->our_msat),
|
|
|
|
type_to_string(tmpctx, struct amount_msat,
|
|
|
|
&channel->push));
|
|
|
|
}
|
2021-12-01 16:32:55 +01:00
|
|
|
|
2022-07-19 21:35:56 +02:00
|
|
|
/* If it's not in a block yet, send a proposal */
|
|
|
|
if (blockheight > 0)
|
|
|
|
mvt = new_coin_channel_open(tmpctx,
|
|
|
|
&channel->cid,
|
|
|
|
&channel->funding,
|
|
|
|
&channel->peer->id,
|
|
|
|
blockheight,
|
|
|
|
start_balance,
|
|
|
|
channel->funding_sats,
|
|
|
|
channel->opener == LOCAL,
|
|
|
|
is_leased);
|
|
|
|
else
|
|
|
|
mvt = new_coin_channel_open_proposed(tmpctx,
|
|
|
|
&channel->cid,
|
|
|
|
&channel->funding,
|
|
|
|
&channel->peer->id,
|
|
|
|
start_balance,
|
|
|
|
channel->funding_sats,
|
|
|
|
channel->opener == LOCAL,
|
|
|
|
is_leased);
|
2020-04-02 03:58:07 +02:00
|
|
|
|
|
|
|
notify_chain_mvt(channel->peer->ld, mvt);
|
2021-12-01 16:32:55 +01:00
|
|
|
|
2021-12-08 18:42:07 +01:00
|
|
|
/* If we pushed sats, *now* record them */
|
2022-07-19 21:35:56 +02:00
|
|
|
if (is_pushed && record_push)
|
2021-12-01 16:32:55 +01:00
|
|
|
notify_channel_mvt(channel->peer->ld,
|
2021-12-08 18:42:07 +01:00
|
|
|
new_coin_channel_push(tmpctx, &channel->cid,
|
|
|
|
channel->push,
|
|
|
|
is_leased ? LEASE_FEE : PUSHED,
|
|
|
|
channel->opener == REMOTE));
|
2020-04-02 03:58:07 +02:00
|
|
|
}
|
|
|
|
|
2023-10-02 00:59:49 +02:00
|
|
|
void lockin_has_completed(struct channel *channel, bool record_push)
|
|
|
|
{
|
|
|
|
/* Fees might have changed (and we use IMMEDIATE once we're funded),
|
|
|
|
* so update now. */
|
|
|
|
try_update_feerates(channel->peer->ld, channel);
|
|
|
|
|
|
|
|
try_update_blockheight(channel->peer->ld, channel,
|
|
|
|
get_block_height(channel->peer->ld->topology));
|
|
|
|
|
|
|
|
/* Emit an event for the channel open (or channel proposal if blockheight
|
|
|
|
* is zero) */
|
|
|
|
channel_record_open(channel,
|
|
|
|
channel->scid ?
|
|
|
|
short_channel_id_blocknum(channel->scid) : 0,
|
|
|
|
record_push);
|
|
|
|
}
|
|
|
|
|
|
|
|
void lockin_complete(struct channel *channel,
|
|
|
|
enum channel_state expected_state)
|
2018-04-23 12:08:01 +02:00
|
|
|
{
|
2022-04-28 17:48:16 +02:00
|
|
|
if (!channel->scid &&
|
|
|
|
(!channel->alias[REMOTE] || !channel->alias[LOCAL])) {
|
|
|
|
log_debug(channel->log, "Attempted lockin, but neither scid "
|
|
|
|
"nor aliases are set, ignoring");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-23 12:08:01 +02:00
|
|
|
/* We set this once they're locked in. */
|
2022-09-10 04:10:31 +02:00
|
|
|
assert(channel->remote_channel_ready);
|
2019-04-16 05:23:57 +02:00
|
|
|
|
|
|
|
/* We might have already started shutting down */
|
2023-07-27 23:37:52 +02:00
|
|
|
if (channel->state != expected_state) {
|
2019-04-16 05:23:57 +02:00
|
|
|
log_debug(channel->log, "Lockin complete, but state %s",
|
|
|
|
channel_state_name(channel));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
log_debug(channel->log, "Moving channel state from %s to %s",
|
|
|
|
channel_state_str(expected_state),
|
|
|
|
channel_state_str(CHANNELD_NORMAL));
|
|
|
|
|
feat: adds state change cause and message
This adds a `state_change` 'cause' to a channel.
A 'cause' is some initial 'reason' a channel was created or closed by:
/* Anything other than the reasons below. Should not happen. */
REASON_UNKNOWN,
/* Unconscious internal reasons, e.g. dev fail of a channel. */
REASON_LOCAL,
/* The operator or a plugin opened or closed a channel by intention. */
REASON_USER,
/* The remote closed or funded a channel with us by intention. */
REASON_REMOTE,
/* E.g. We need to close a channel because of bad signatures and such. */
REASON_PROTOCOL,
/* A channel was closed onchain, while we were offline. */
/* Note: This is very likely a conscious remote decision. */
REASON_ONCHAIN
If a 'cause' is known and a subsequent state change is made with
`REASON_UNKNOWN` the preceding cause will be used as reason, since a lot
(all `REASON_UNKNOWN`) state changes are a subsequent consequences of a prior
cause: local, user, remote, protocol or onchain.
Changelog-Added: Plugins: Channel closure resaon/cause to channel_state_changed notification
2020-10-28 11:46:12 +01:00
|
|
|
channel_set_state(channel,
|
2023-07-27 23:37:52 +02:00
|
|
|
expected_state,
|
feat: adds state change cause and message
This adds a `state_change` 'cause' to a channel.
A 'cause' is some initial 'reason' a channel was created or closed by:
/* Anything other than the reasons below. Should not happen. */
REASON_UNKNOWN,
/* Unconscious internal reasons, e.g. dev fail of a channel. */
REASON_LOCAL,
/* The operator or a plugin opened or closed a channel by intention. */
REASON_USER,
/* The remote closed or funded a channel with us by intention. */
REASON_REMOTE,
/* E.g. We need to close a channel because of bad signatures and such. */
REASON_PROTOCOL,
/* A channel was closed onchain, while we were offline. */
/* Note: This is very likely a conscious remote decision. */
REASON_ONCHAIN
If a 'cause' is known and a subsequent state change is made with
`REASON_UNKNOWN` the preceding cause will be used as reason, since a lot
(all `REASON_UNKNOWN`) state changes are a subsequent consequences of a prior
cause: local, user, remote, protocol or onchain.
Changelog-Added: Plugins: Channel closure resaon/cause to channel_state_changed notification
2020-10-28 11:46:12 +01:00
|
|
|
CHANNELD_NORMAL,
|
|
|
|
REASON_UNKNOWN,
|
|
|
|
"Lockin complete");
|
2018-08-23 01:27:17 +02:00
|
|
|
|
2023-10-02 00:59:49 +02:00
|
|
|
lockin_has_completed(channel, true);
|
2018-04-23 12:08:01 +02:00
|
|
|
}
|
|
|
|
|
2022-09-10 04:10:31 +02:00
|
|
|
bool channel_on_channel_ready(struct channel *channel,
|
|
|
|
struct pubkey *next_per_commitment_point)
|
2020-11-24 02:24:50 +01:00
|
|
|
{
|
2022-09-10 04:10:31 +02:00
|
|
|
if (channel->remote_channel_ready) {
|
2020-11-24 02:24:50 +01:00
|
|
|
channel_internal_error(channel,
|
2022-09-10 04:10:31 +02:00
|
|
|
"channel_got_channel_ready twice");
|
2020-11-24 02:24:50 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
update_per_commit_point(channel, next_per_commitment_point);
|
|
|
|
|
2022-09-10 04:10:31 +02:00
|
|
|
log_debug(channel->log, "Got channel_ready");
|
|
|
|
channel->remote_channel_ready = true;
|
2020-11-24 02:24:50 +01:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
static void handle_peer_splice_locked(struct channel *channel, const u8 *msg)
|
|
|
|
{
|
|
|
|
struct amount_sat funding_sats;
|
|
|
|
s64 splice_amnt;
|
|
|
|
struct channel_inflight *inflight;
|
|
|
|
struct bitcoin_txid locked_txid;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_got_splice_locked(msg, &funding_sats,
|
|
|
|
&splice_amnt,
|
|
|
|
&locked_txid)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad channel_got_funding_locked %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
channel->our_msat.millisatoshis += splice_amnt * 1000; /* Raw: splicing */
|
|
|
|
channel->msat_to_us_min.millisatoshis += splice_amnt * 1000; /* Raw: splicing */
|
|
|
|
channel->msat_to_us_max.millisatoshis += splice_amnt * 1000; /* Raw: splicing */
|
|
|
|
|
|
|
|
inflight = channel_inflight_find(channel, &locked_txid);
|
|
|
|
if(!inflight)
|
|
|
|
channel_internal_error(channel, "Unable to load inflight for"
|
|
|
|
" locked_txid %s",
|
|
|
|
type_to_string(tmpctx,
|
|
|
|
struct bitcoin_txid,
|
|
|
|
&locked_txid));
|
|
|
|
|
|
|
|
wallet_htlcsigs_confirm_inflight(channel->peer->ld->wallet, channel,
|
|
|
|
&inflight->funding->outpoint);
|
|
|
|
|
|
|
|
update_channel_from_inflight(channel->peer->ld, channel, inflight);
|
|
|
|
|
|
|
|
/* Remember that we got the lockin */
|
|
|
|
wallet_channel_save(channel->peer->ld->wallet, channel);
|
|
|
|
|
|
|
|
log_debug(channel->log, "lightningd, splice_locked clearing inflights");
|
2023-08-16 04:58:53 +02:00
|
|
|
|
|
|
|
/* Take out the successful inflight from the list temporarily */
|
|
|
|
list_del(&inflight->list);
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
wallet_channel_clear_inflights(channel->peer->ld->wallet, channel);
|
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
/* That freed watchers in inflights: now watch funding tx */
|
|
|
|
channel_watch_funding(channel->peer->ld, channel);
|
|
|
|
|
2023-08-16 04:58:53 +02:00
|
|
|
/* Put the successful inflight back in as a memory-only object.
|
|
|
|
* peer_control's funding_spent function will pick this up and clean up
|
|
|
|
* our inflight.
|
|
|
|
*
|
|
|
|
* This prevents any potential race conditions between us and them. */
|
|
|
|
inflight->splice_locked_memonly = true;
|
|
|
|
list_add_tail(&channel->inflights, &inflight->list);
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
lockin_complete(channel, CHANNELD_AWAITING_SPLICE);
|
|
|
|
}
|
|
|
|
|
2022-09-10 04:10:31 +02:00
|
|
|
/* We were informed by channeld that channel is ready (reached mindepth) */
|
|
|
|
static void peer_got_channel_ready(struct channel *channel, const u8 *msg)
|
2018-02-20 21:59:09 +01:00
|
|
|
{
|
|
|
|
struct pubkey next_per_commitment_point;
|
2022-04-22 16:15:49 +02:00
|
|
|
struct short_channel_id *alias_remote;
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2022-09-10 04:10:31 +02:00
|
|
|
if (!fromwire_channeld_got_channel_ready(tmpctx,
|
2022-04-22 16:15:49 +02:00
|
|
|
msg, &next_per_commitment_point, &alias_remote)) {
|
2018-02-20 21:59:09 +01:00
|
|
|
channel_internal_error(channel,
|
2022-09-10 04:10:31 +02:00
|
|
|
"bad channel_got_channel_ready %s",
|
2018-02-20 21:59:09 +01:00
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-09-10 04:10:31 +02:00
|
|
|
if (!channel_on_channel_ready(channel, &next_per_commitment_point))
|
2018-02-20 21:59:09 +01:00
|
|
|
return;
|
2018-04-23 12:08:01 +02:00
|
|
|
|
2022-04-22 16:15:49 +02:00
|
|
|
if (channel->alias[REMOTE] == NULL)
|
|
|
|
channel->alias[REMOTE] = tal_steal(channel, alias_remote);
|
|
|
|
|
|
|
|
/* Remember that we got the lockin */
|
|
|
|
wallet_channel_save(channel->peer->ld->wallet, channel);
|
2022-06-02 01:39:53 +02:00
|
|
|
|
|
|
|
if (channel->depth >= channel->minimum_depth)
|
2023-07-27 23:37:52 +02:00
|
|
|
lockin_complete(channel, CHANNELD_AWAITING_LOCKIN);
|
2018-02-20 21:59:09 +01:00
|
|
|
}
|
|
|
|
|
2019-04-25 13:58:07 +02:00
|
|
|
static void peer_got_announcement(struct channel *channel, const u8 *msg)
|
|
|
|
{
|
|
|
|
secp256k1_ecdsa_signature remote_ann_node_sig;
|
|
|
|
secp256k1_ecdsa_signature remote_ann_bitcoin_sig;
|
|
|
|
|
2020-08-25 03:33:16 +02:00
|
|
|
if (!fromwire_channeld_got_announcement(msg,
|
2019-04-25 13:58:07 +02:00
|
|
|
&remote_ann_node_sig,
|
|
|
|
&remote_ann_bitcoin_sig)) {
|
|
|
|
channel_internal_error(channel,
|
2019-06-03 18:40:15 +02:00
|
|
|
"bad channel_got_announcement %s",
|
2019-04-25 13:58:07 +02:00
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
wallet_announcement_save(channel->peer->ld->wallet, channel->dbid,
|
|
|
|
&remote_ann_node_sig,
|
|
|
|
&remote_ann_bitcoin_sig);
|
|
|
|
}
|
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
static void peer_got_shutdown(struct channel *channel, const u8 *msg)
|
|
|
|
{
|
|
|
|
u8 *scriptpubkey;
|
|
|
|
struct lightningd *ld = channel->peer->ld;
|
2021-03-15 21:25:41 +01:00
|
|
|
struct bitcoin_outpoint *wrong_funding;
|
2021-02-24 03:53:12 +01:00
|
|
|
bool anysegwit = feature_negotiated(ld->our_features,
|
|
|
|
channel->peer->their_features,
|
|
|
|
OPT_SHUTDOWN_ANYSEGWIT);
|
2022-03-31 11:10:50 +02:00
|
|
|
bool anchors = feature_negotiated(ld->our_features,
|
|
|
|
channel->peer->their_features,
|
|
|
|
OPT_ANCHOR_OUTPUTS)
|
|
|
|
|| feature_negotiated(ld->our_features,
|
|
|
|
channel->peer->their_features,
|
|
|
|
OPT_ANCHORS_ZERO_FEE_HTLC_TX);
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2021-03-15 21:25:41 +01:00
|
|
|
if (!fromwire_channeld_got_shutdown(channel, msg, &scriptpubkey,
|
|
|
|
&wrong_funding)) {
|
2018-02-20 21:59:09 +01:00
|
|
|
channel_internal_error(channel, "bad channel_got_shutdown %s",
|
|
|
|
tal_hex(msg, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-03-31 11:10:50 +02:00
|
|
|
/* BOLT #2:
|
|
|
|
* A receiving node:
|
|
|
|
*...
|
|
|
|
* - if the `scriptpubkey` is not in one of the above forms:
|
|
|
|
* - SHOULD send a `warning`.
|
|
|
|
*/
|
2022-12-12 07:06:04 +01:00
|
|
|
if (!valid_shutdown_scriptpubkey(scriptpubkey, anysegwit, !anchors)) {
|
2022-03-31 11:10:50 +02:00
|
|
|
u8 *warning = towire_warningfmt(NULL,
|
|
|
|
&channel->cid,
|
|
|
|
"Bad shutdown scriptpubkey %s",
|
|
|
|
tal_hex(tmpctx, scriptpubkey));
|
|
|
|
|
|
|
|
/* Get connectd to send warning, and then allow reconnect. */
|
|
|
|
subd_send_msg(ld->connectd,
|
|
|
|
take(towire_connectd_peer_final_msg(NULL,
|
|
|
|
&channel->peer->id,
|
2022-07-18 14:12:27 +02:00
|
|
|
channel->peer->connectd_counter,
|
2022-03-31 11:10:50 +02:00
|
|
|
warning)));
|
2023-10-02 00:59:48 +02:00
|
|
|
channel_fail_transient(channel, true, "Bad shutdown scriptpubkey %s",
|
2021-06-10 05:51:12 +02:00
|
|
|
tal_hex(tmpctx, scriptpubkey));
|
2018-02-20 21:59:09 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-03-31 11:10:50 +02:00
|
|
|
/* FIXME: Add to spec that we must allow repeated shutdown! */
|
|
|
|
tal_free(channel->shutdown_scriptpubkey[REMOTE]);
|
|
|
|
channel->shutdown_scriptpubkey[REMOTE] = scriptpubkey;
|
|
|
|
|
2018-03-07 01:06:07 +01:00
|
|
|
/* If we weren't already shutting down, we are now */
|
2018-04-03 09:08:53 +02:00
|
|
|
if (channel->state != CHANNELD_SHUTTING_DOWN)
|
2018-02-20 21:59:09 +01:00
|
|
|
channel_set_state(channel,
|
feat: adds state change cause and message
This adds a `state_change` 'cause' to a channel.
A 'cause' is some initial 'reason' a channel was created or closed by:
/* Anything other than the reasons below. Should not happen. */
REASON_UNKNOWN,
/* Unconscious internal reasons, e.g. dev fail of a channel. */
REASON_LOCAL,
/* The operator or a plugin opened or closed a channel by intention. */
REASON_USER,
/* The remote closed or funded a channel with us by intention. */
REASON_REMOTE,
/* E.g. We need to close a channel because of bad signatures and such. */
REASON_PROTOCOL,
/* A channel was closed onchain, while we were offline. */
/* Note: This is very likely a conscious remote decision. */
REASON_ONCHAIN
If a 'cause' is known and a subsequent state change is made with
`REASON_UNKNOWN` the preceding cause will be used as reason, since a lot
(all `REASON_UNKNOWN`) state changes are a subsequent consequences of a prior
cause: local, user, remote, protocol or onchain.
Changelog-Added: Plugins: Channel closure resaon/cause to channel_state_changed notification
2020-10-28 11:46:12 +01:00
|
|
|
channel->state,
|
|
|
|
CHANNELD_SHUTTING_DOWN,
|
|
|
|
REASON_REMOTE,
|
|
|
|
"Peer closes channel");
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2021-03-15 21:25:41 +01:00
|
|
|
/* If we set it, that's what we want. Otherwise use their preference.
|
|
|
|
* We can't have both, since only opener can set this! */
|
|
|
|
if (!channel->shutdown_wrong_funding)
|
|
|
|
channel->shutdown_wrong_funding = wrong_funding;
|
|
|
|
|
2021-03-15 21:26:13 +01:00
|
|
|
/* We now watch the "wrong" funding, in case we spend it. */
|
|
|
|
channel_watch_wrong_funding(ld, channel);
|
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
/* TODO(cdecker) Selectively save updated fields to DB */
|
|
|
|
wallet_channel_save(ld->wallet, channel);
|
|
|
|
}
|
|
|
|
|
2020-12-10 21:02:02 +01:00
|
|
|
void channel_fallen_behind(struct channel *channel, const u8 *msg)
|
2018-08-17 07:06:35 +02:00
|
|
|
{
|
|
|
|
|
2019-09-10 05:27:51 +02:00
|
|
|
/* per_commitment_point is NULL if option_static_remotekey, but we
|
|
|
|
* use its presence as a flag so set it any valid key in that case. */
|
|
|
|
if (!channel->future_per_commitment_point) {
|
|
|
|
struct pubkey *any = tal(channel, struct pubkey);
|
|
|
|
if (!pubkey_from_node_id(any, &channel->peer->ld->id))
|
|
|
|
fatal("Our own id invalid?");
|
|
|
|
channel->future_per_commitment_point = any;
|
|
|
|
}
|
2018-08-17 07:06:35 +02:00
|
|
|
}
|
|
|
|
|
2020-12-10 21:02:02 +01:00
|
|
|
static void
|
|
|
|
channel_fail_fallen_behind(struct channel *channel, const u8 *msg)
|
|
|
|
{
|
|
|
|
if (!fromwire_channeld_fail_fallen_behind(channel, msg,
|
|
|
|
cast_const2(struct pubkey **,
|
|
|
|
&channel->future_per_commitment_point))) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad channel_fail_fallen_behind %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
channel_fallen_behind(channel, msg);
|
|
|
|
}
|
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
static void peer_start_closingd_after_shutdown(struct channel *channel,
|
|
|
|
const u8 *msg,
|
|
|
|
const int *fds)
|
|
|
|
{
|
2022-01-11 02:13:59 +01:00
|
|
|
struct peer_fd *peer_fd;
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2022-01-08 14:29:29 +01:00
|
|
|
if (!fromwire_channeld_shutdown_complete(msg)) {
|
2018-02-20 21:59:09 +01:00
|
|
|
channel_internal_error(channel, "bad shutdown_complete: %s",
|
|
|
|
tal_hex(msg, msg));
|
|
|
|
return;
|
|
|
|
}
|
2022-01-11 02:13:59 +01:00
|
|
|
peer_fd = new_peer_fd_arr(msg, fds);
|
2018-02-20 21:59:09 +01:00
|
|
|
|
|
|
|
/* This sets channel->owner, closes down channeld. */
|
2022-01-11 02:13:59 +01:00
|
|
|
peer_start_closingd(channel, peer_fd);
|
2021-06-14 23:09:49 +02:00
|
|
|
|
|
|
|
/* We might have reconnected, so already be here. */
|
2023-10-02 00:59:49 +02:00
|
|
|
if (channel->state == CHANNELD_SHUTTING_DOWN)
|
2021-06-14 23:09:49 +02:00
|
|
|
channel_set_state(channel,
|
|
|
|
CHANNELD_SHUTTING_DOWN,
|
|
|
|
CLOSINGD_SIGEXCHANGE,
|
|
|
|
REASON_UNKNOWN,
|
|
|
|
"Start closingd");
|
2018-02-20 21:59:09 +01:00
|
|
|
}
|
|
|
|
|
2019-12-12 01:23:19 +01:00
|
|
|
static void forget(struct channel *channel)
|
2019-08-23 23:34:52 +02:00
|
|
|
{
|
|
|
|
struct command **forgets = tal_steal(tmpctx, channel->forgets);
|
|
|
|
channel->forgets = tal_arr(channel, struct command *, 0);
|
|
|
|
|
|
|
|
/* Forget the channel. */
|
|
|
|
delete_channel(channel);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < tal_count(forgets); i++) {
|
|
|
|
assert(!forgets[i]->json_stream);
|
|
|
|
|
|
|
|
struct json_stream *response;
|
|
|
|
response = json_stream_success(forgets[i]);
|
2020-11-24 01:36:22 +01:00
|
|
|
json_add_string(response, "cancelled",
|
|
|
|
"Channel open canceled by RPC(after"
|
|
|
|
" fundchannel_complete)");
|
2019-08-23 23:34:52 +02:00
|
|
|
was_pending(command_success(forgets[i], response));
|
|
|
|
}
|
|
|
|
|
|
|
|
tal_free(forgets);
|
|
|
|
}
|
|
|
|
|
2019-12-12 01:23:19 +01:00
|
|
|
static void handle_error_channel(struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2020-08-25 03:33:16 +02:00
|
|
|
if (!fromwire_channeld_send_error_reply(msg)) {
|
2019-12-12 01:23:19 +01:00
|
|
|
channel_internal_error(channel, "bad send_error_reply: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
forget(channel);
|
|
|
|
}
|
|
|
|
|
2022-01-24 21:03:52 +01:00
|
|
|
static void handle_local_private_channel(struct channel *channel, const u8 *msg)
|
|
|
|
{
|
|
|
|
struct amount_sat capacity;
|
|
|
|
u8 *features;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_local_private_channel(msg, msg, &capacity,
|
|
|
|
&features)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad channeld_local_private_channel %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tell_gossipd_local_private_channel(channel->peer->ld, channel,
|
|
|
|
capacity, features);
|
|
|
|
}
|
|
|
|
|
2021-12-04 12:27:06 +01:00
|
|
|
static void forget_channel(struct channel *channel, const char *why)
|
2019-12-12 01:23:19 +01:00
|
|
|
{
|
2020-09-09 09:20:53 +02:00
|
|
|
channel->error = towire_errorfmt(channel, &channel->cid, "%s", why);
|
2019-12-12 01:23:19 +01:00
|
|
|
|
|
|
|
/* If the peer is connected, we let them know. Otherwise
|
|
|
|
* we just directly remove the channel */
|
|
|
|
if (channel->owner)
|
|
|
|
subd_send_msg(channel->owner,
|
2020-08-25 03:33:16 +02:00
|
|
|
take(towire_channeld_send_error(NULL, why)));
|
2019-12-12 01:23:19 +01:00
|
|
|
else
|
|
|
|
forget(channel);
|
|
|
|
}
|
|
|
|
|
2021-06-04 07:13:47 +02:00
|
|
|
static void handle_channel_upgrade(struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
2021-09-09 07:29:35 +02:00
|
|
|
struct channel_type *newtype;
|
2021-06-04 07:13:47 +02:00
|
|
|
|
2021-09-09 07:29:35 +02:00
|
|
|
if (!fromwire_channeld_upgraded(msg, msg, &newtype)) {
|
2021-06-04 07:13:47 +02:00
|
|
|
channel_internal_error(channel, "bad handle_channel_upgrade: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-09-09 07:29:35 +02:00
|
|
|
/* You can currently only upgrade to turn on option_static_remotekey:
|
|
|
|
* if they somehow thought anything else we need to close channel! */
|
|
|
|
if (channel->static_remotekey_start[LOCAL] != 0x7FFFFFFFFFFFFFFFULL) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"channel_upgrade already static_remotekey? %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!channel_type_eq(newtype, channel_type_static_remotekey(tmpctx))) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"channel_upgrade must be static_remotekey, not %s",
|
|
|
|
fmt_featurebits(tmpctx, newtype->features));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tal_free(channel->type);
|
|
|
|
channel->type = channel_type_dup(channel, newtype);
|
2021-06-04 07:13:47 +02:00
|
|
|
channel->static_remotekey_start[LOCAL] = channel->next_index[LOCAL];
|
|
|
|
channel->static_remotekey_start[REMOTE] = channel->next_index[REMOTE];
|
|
|
|
log_debug(channel->log,
|
|
|
|
"option_static_remotekey enabled at %"PRIu64"/%"PRIu64,
|
|
|
|
channel->static_remotekey_start[LOCAL],
|
|
|
|
channel->static_remotekey_start[REMOTE]);
|
|
|
|
|
|
|
|
wallet_channel_save(channel->peer->ld->wallet, channel);
|
|
|
|
}
|
|
|
|
|
2023-10-24 05:50:11 +02:00
|
|
|
static void handle_local_channel_update(struct channel *channel,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
bool enable;
|
|
|
|
|
|
|
|
if (!fromwire_channeld_local_channel_update(msg, &enable)) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"bad channeld_local_channel_update %s",
|
|
|
|
tal_hex(channel, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tell_gossipd_local_channel_update(channel->peer->ld, channel, enable);
|
|
|
|
}
|
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
static unsigned channel_msg(struct subd *sd, const u8 *msg, const int *fds)
|
|
|
|
{
|
2020-08-25 03:33:16 +02:00
|
|
|
enum channeld_wire t = fromwire_peektype(msg);
|
2018-02-20 21:59:09 +01:00
|
|
|
|
|
|
|
switch (t) {
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_SENDING_COMMITSIG:
|
2018-02-20 21:59:09 +01:00
|
|
|
peer_sending_commitsig(sd->channel, msg);
|
|
|
|
break;
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_GOT_COMMITSIG:
|
2018-02-20 21:59:09 +01:00
|
|
|
peer_got_commitsig(sd->channel, msg);
|
|
|
|
break;
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_GOT_REVOKE:
|
2018-02-20 21:59:09 +01:00
|
|
|
peer_got_revoke(sd->channel, msg);
|
|
|
|
break;
|
2022-09-10 04:10:31 +02:00
|
|
|
case WIRE_CHANNELD_GOT_CHANNEL_READY:
|
|
|
|
peer_got_channel_ready(sd->channel, msg);
|
2018-02-20 21:59:09 +01:00
|
|
|
break;
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_GOT_ANNOUNCEMENT:
|
2019-04-25 13:58:07 +02:00
|
|
|
peer_got_announcement(sd->channel, msg);
|
|
|
|
break;
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_GOT_SHUTDOWN:
|
2018-02-20 21:59:09 +01:00
|
|
|
peer_got_shutdown(sd->channel, msg);
|
|
|
|
break;
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_SHUTDOWN_COMPLETE:
|
2022-01-29 04:33:05 +01:00
|
|
|
/* We expect 1 fd. */
|
2018-02-20 21:59:09 +01:00
|
|
|
if (!fds)
|
2022-01-29 04:33:05 +01:00
|
|
|
return 1;
|
2018-02-20 21:59:09 +01:00
|
|
|
peer_start_closingd_after_shutdown(sd->channel, msg, fds);
|
|
|
|
break;
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_FAIL_FALLEN_BEHIND:
|
2018-08-17 07:06:35 +02:00
|
|
|
channel_fail_fallen_behind(sd->channel, msg);
|
|
|
|
break;
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_SEND_ERROR_REPLY:
|
2019-08-23 23:34:52 +02:00
|
|
|
handle_error_channel(sd->channel, msg);
|
|
|
|
break;
|
2022-01-24 21:03:52 +01:00
|
|
|
case WIRE_CHANNELD_LOCAL_CHANNEL_UPDATE:
|
2023-10-24 05:50:11 +02:00
|
|
|
handle_local_channel_update(sd->channel, msg);
|
2022-01-24 21:03:52 +01:00
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_LOCAL_CHANNEL_ANNOUNCEMENT:
|
|
|
|
tell_gossipd_local_channel_announce(sd->ld, sd->channel, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_LOCAL_PRIVATE_CHANNEL:
|
|
|
|
handle_local_private_channel(sd->channel, msg);
|
|
|
|
break;
|
2023-07-27 23:37:52 +02:00
|
|
|
case WIRE_CHANNELD_SPLICE_CONFIRMED_INIT:
|
|
|
|
handle_splice_confirmed_init(sd->ld, sd->channel, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_SPLICE_FEERATE_ERROR:
|
|
|
|
handle_splice_feerate_error(sd->ld, sd->channel, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_SPLICE_FUNDING_ERROR:
|
|
|
|
handle_splice_funding_error(sd->ld, sd->channel, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_SPLICE_STATE_ERROR:
|
|
|
|
handle_splice_state_error(sd->ld, sd->channel, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_SPLICE_CONFIRMED_UPDATE:
|
|
|
|
handle_splice_confirmed_update(sd->ld, sd->channel, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_SPLICE_LOOKUP_TX:
|
|
|
|
handle_splice_lookup_tx(sd->ld, sd->channel, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_SPLICE_CONFIRMED_SIGNED:
|
|
|
|
handle_splice_confirmed_signed(sd->ld, sd->channel, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_ADD_INFLIGHT:
|
|
|
|
handle_add_inflight(sd->ld, sd->channel, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_UPDATE_INFLIGHT:
|
|
|
|
handle_update_inflight(sd->ld, sd->channel, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_CHANNELD_GOT_SPLICE_LOCKED:
|
|
|
|
handle_peer_splice_locked(sd->channel, msg);
|
|
|
|
break;
|
2021-06-04 07:13:47 +02:00
|
|
|
case WIRE_CHANNELD_UPGRADED:
|
|
|
|
handle_channel_upgrade(sd->channel, msg);
|
|
|
|
break;
|
2018-02-20 21:59:09 +01:00
|
|
|
/* And we never get these from channeld. */
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_INIT:
|
|
|
|
case WIRE_CHANNELD_FUNDING_DEPTH:
|
|
|
|
case WIRE_CHANNELD_OFFER_HTLC:
|
|
|
|
case WIRE_CHANNELD_FULFILL_HTLC:
|
|
|
|
case WIRE_CHANNELD_FAIL_HTLC:
|
|
|
|
case WIRE_CHANNELD_GOT_COMMITSIG_REPLY:
|
|
|
|
case WIRE_CHANNELD_GOT_REVOKE_REPLY:
|
|
|
|
case WIRE_CHANNELD_SENDING_COMMITSIG_REPLY:
|
|
|
|
case WIRE_CHANNELD_SEND_SHUTDOWN:
|
|
|
|
case WIRE_CHANNELD_DEV_REENABLE_COMMIT:
|
|
|
|
case WIRE_CHANNELD_FEERATES:
|
2021-06-22 20:25:59 +02:00
|
|
|
case WIRE_CHANNELD_BLOCKHEIGHT:
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_DEV_MEMLEAK:
|
2021-05-31 05:08:04 +02:00
|
|
|
case WIRE_CHANNELD_DEV_QUIESCE:
|
2023-07-27 23:37:52 +02:00
|
|
|
case WIRE_CHANNELD_GOT_INFLIGHT:
|
2020-04-01 05:53:09 +02:00
|
|
|
/* Replies go to requests. */
|
2020-08-25 03:33:16 +02:00
|
|
|
case WIRE_CHANNELD_OFFER_HTLC_REPLY:
|
|
|
|
case WIRE_CHANNELD_DEV_REENABLE_COMMIT_REPLY:
|
|
|
|
case WIRE_CHANNELD_DEV_MEMLEAK_REPLY:
|
|
|
|
case WIRE_CHANNELD_SEND_ERROR:
|
2023-07-27 23:37:52 +02:00
|
|
|
case WIRE_CHANNELD_SPLICE_INIT:
|
|
|
|
case WIRE_CHANNELD_SPLICE_UPDATE:
|
|
|
|
case WIRE_CHANNELD_SPLICE_LOOKUP_TX_RESULT:
|
|
|
|
case WIRE_CHANNELD_SPLICE_SIGNED:
|
2021-05-31 05:08:04 +02:00
|
|
|
case WIRE_CHANNELD_DEV_QUIESCE_REPLY:
|
2018-02-20 21:59:09 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
bool peer_start_channeld(struct channel *channel,
|
2022-01-11 02:13:59 +01:00
|
|
|
struct peer_fd *peer_fd,
|
2020-09-10 21:34:18 +02:00
|
|
|
const u8 *fwd_msg,
|
2021-06-15 07:07:10 +02:00
|
|
|
bool reconnected,
|
2022-03-22 21:27:30 +01:00
|
|
|
bool reestablish_only)
|
2018-02-20 21:59:09 +01:00
|
|
|
{
|
2018-07-09 13:17:59 +02:00
|
|
|
u8 *initmsg;
|
2018-02-20 21:59:09 +01:00
|
|
|
int hsmfd;
|
2020-04-03 05:14:07 +02:00
|
|
|
const struct existing_htlc **htlcs;
|
2019-06-03 14:05:18 +02:00
|
|
|
struct short_channel_id scid;
|
2018-02-20 21:59:09 +01:00
|
|
|
u64 num_revocations;
|
|
|
|
struct lightningd *ld = channel->peer->ld;
|
|
|
|
const struct config *cfg = &ld->config;
|
2018-04-26 06:51:02 +02:00
|
|
|
bool reached_announce_depth;
|
2018-08-17 06:16:34 +02:00
|
|
|
struct secret last_remote_per_commit_secret;
|
2019-05-14 11:36:05 +02:00
|
|
|
secp256k1_ecdsa_signature *remote_ann_node_sig, *remote_ann_bitcoin_sig;
|
2020-05-07 02:49:43 +02:00
|
|
|
struct penalty_base *pbases;
|
2023-07-24 07:13:38 +02:00
|
|
|
u32 min_feerate, max_feerate, curr_blockheight;
|
2023-07-27 23:37:52 +02:00
|
|
|
struct channel_inflight *inflight;
|
|
|
|
struct inflight **inflights;
|
|
|
|
struct bitcoin_txid txid;
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2018-07-09 13:17:59 +02:00
|
|
|
hsmfd = hsm_get_client_fd(ld, &channel->peer->id,
|
|
|
|
channel->dbid,
|
2023-08-07 07:51:40 +02:00
|
|
|
HSM_PERM_SIGN_GOSSIP
|
|
|
|
| HSM_PERM_ECDH
|
|
|
|
| HSM_PERM_COMMITMENT_POINT
|
|
|
|
| HSM_PERM_SIGN_REMOTE_TX
|
|
|
|
| HSM_PERM_SIGN_ONCHAIN_TX
|
2023-08-07 07:51:41 +02:00
|
|
|
| HSM_PERM_SIGN_CLOSING_TX
|
2023-09-25 00:12:02 +02:00
|
|
|
| HSM_PERM_SIGN_SPLICE_TX
|
|
|
|
| HSM_PERM_LOCK_OUTPOINT);
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2018-04-26 06:51:01 +02:00
|
|
|
channel_set_owner(channel,
|
2022-03-29 01:49:23 +02:00
|
|
|
new_channel_subd(channel, ld,
|
2020-12-01 21:49:35 +01:00
|
|
|
"lightning_channeld",
|
2021-01-20 02:51:15 +01:00
|
|
|
channel,
|
2019-11-17 12:40:33 +01:00
|
|
|
&channel->peer->id,
|
2018-04-26 06:51:01 +02:00
|
|
|
channel->log, true,
|
2020-08-25 03:33:16 +02:00
|
|
|
channeld_wire_name,
|
2018-02-20 21:59:09 +01:00
|
|
|
channel_msg,
|
|
|
|
channel_errmsg,
|
2018-02-23 06:53:47 +01:00
|
|
|
channel_set_billboard,
|
2022-01-11 02:13:59 +01:00
|
|
|
take(&peer_fd->fd),
|
2019-07-25 04:47:34 +02:00
|
|
|
take(&hsmfd), NULL));
|
2018-02-20 21:59:09 +01:00
|
|
|
|
|
|
|
if (!channel->owner) {
|
2020-03-25 05:26:44 +01:00
|
|
|
log_broken(channel->log, "Could not subdaemon channel: %s",
|
|
|
|
strerror(errno));
|
2023-10-22 06:07:31 +02:00
|
|
|
force_peer_disconnect(ld, channel->peer,
|
|
|
|
"Failed to create channeld");
|
2022-07-18 14:12:18 +02:00
|
|
|
return false;
|
2018-02-20 21:59:09 +01:00
|
|
|
}
|
|
|
|
|
2020-04-03 05:14:07 +02:00
|
|
|
htlcs = peer_htlcs(tmpctx, channel);
|
2018-02-20 21:59:09 +01:00
|
|
|
|
|
|
|
if (channel->scid) {
|
2019-06-03 14:05:18 +02:00
|
|
|
scid = *channel->scid;
|
2019-09-22 04:08:43 +02:00
|
|
|
reached_announce_depth
|
|
|
|
= is_scid_depth_announceable(&scid,
|
|
|
|
get_block_height(ld->topology));
|
2018-04-26 06:51:02 +02:00
|
|
|
log_debug(channel->log, "Already have funding locked in%s",
|
|
|
|
reached_announce_depth
|
|
|
|
? " (and ready to announce)" : "");
|
2018-02-20 21:59:09 +01:00
|
|
|
} else {
|
|
|
|
log_debug(channel->log, "Waiting for funding confirmations");
|
2019-06-03 14:05:18 +02:00
|
|
|
memset(&scid, 0, sizeof(scid));
|
2018-04-26 06:51:02 +02:00
|
|
|
reached_announce_depth = false;
|
2018-02-20 21:59:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
num_revocations = revocations_received(&channel->their_shachain.chain);
|
|
|
|
|
2018-08-17 06:16:34 +02:00
|
|
|
/* BOLT #2:
|
2019-08-02 05:24:25 +02:00
|
|
|
* - if `next_revocation_number` equals 0:
|
2018-08-17 06:16:34 +02:00
|
|
|
* - MUST set `your_last_per_commitment_secret` to all zeroes
|
|
|
|
* - otherwise:
|
|
|
|
* - MUST set `your_last_per_commitment_secret` to the last
|
|
|
|
* `per_commitment_secret` it received
|
|
|
|
*/
|
|
|
|
if (num_revocations == 0)
|
|
|
|
memset(&last_remote_per_commit_secret, 0,
|
|
|
|
sizeof(last_remote_per_commit_secret));
|
|
|
|
else if (!shachain_get_secret(&channel->their_shachain.chain,
|
|
|
|
num_revocations-1,
|
|
|
|
&last_remote_per_commit_secret)) {
|
|
|
|
channel_fail_permanent(channel,
|
feat: adds state change cause and message
This adds a `state_change` 'cause' to a channel.
A 'cause' is some initial 'reason' a channel was created or closed by:
/* Anything other than the reasons below. Should not happen. */
REASON_UNKNOWN,
/* Unconscious internal reasons, e.g. dev fail of a channel. */
REASON_LOCAL,
/* The operator or a plugin opened or closed a channel by intention. */
REASON_USER,
/* The remote closed or funded a channel with us by intention. */
REASON_REMOTE,
/* E.g. We need to close a channel because of bad signatures and such. */
REASON_PROTOCOL,
/* A channel was closed onchain, while we were offline. */
/* Note: This is very likely a conscious remote decision. */
REASON_ONCHAIN
If a 'cause' is known and a subsequent state change is made with
`REASON_UNKNOWN` the preceding cause will be used as reason, since a lot
(all `REASON_UNKNOWN`) state changes are a subsequent consequences of a prior
cause: local, user, remote, protocol or onchain.
Changelog-Added: Plugins: Channel closure resaon/cause to channel_state_changed notification
2020-10-28 11:46:12 +01:00
|
|
|
REASON_LOCAL,
|
2018-08-17 06:16:34 +02:00
|
|
|
"Could not get revocation secret %"PRIu64,
|
|
|
|
num_revocations-1);
|
2022-07-18 14:12:18 +02:00
|
|
|
return false;
|
2018-08-17 06:16:34 +02:00
|
|
|
}
|
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
/* Warn once. */
|
2023-07-21 09:16:22 +02:00
|
|
|
if (channel->ignore_fee_limits || ld->config.ignore_fee_limits)
|
2023-07-07 18:12:58 +02:00
|
|
|
log_unusual(channel->log, "Ignoring fee limits!");
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2020-11-24 01:36:22 +01:00
|
|
|
if (!wallet_remote_ann_sigs_load(tmpctx, channel->peer->ld->wallet,
|
|
|
|
channel->dbid,
|
|
|
|
&remote_ann_node_sig,
|
|
|
|
&remote_ann_bitcoin_sig)) {
|
2019-05-14 11:36:05 +02:00
|
|
|
channel_internal_error(channel,
|
2020-11-24 01:36:22 +01:00
|
|
|
"Could not load remote announcement"
|
|
|
|
" signatures");
|
2022-07-18 14:12:18 +02:00
|
|
|
return false;
|
2019-05-14 11:36:05 +02:00
|
|
|
}
|
|
|
|
|
2020-05-07 02:49:43 +02:00
|
|
|
pbases = wallet_penalty_base_load_for_channel(
|
|
|
|
tmpctx, channel->peer->ld->wallet, channel->dbid);
|
|
|
|
|
2021-12-23 21:16:35 +01:00
|
|
|
struct ext_key final_ext_key;
|
|
|
|
if (bip32_key_from_parent(
|
2023-03-21 04:58:15 +01:00
|
|
|
ld->bip32_base,
|
2021-12-23 21:16:35 +01:00
|
|
|
channel->final_key_idx,
|
|
|
|
BIP32_FLAG_KEY_PUBLIC,
|
|
|
|
&final_ext_key) != WALLY_OK) {
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"Could not derive final_ext_key %"PRIu64,
|
|
|
|
channel->final_key_idx);
|
2022-07-18 14:12:18 +02:00
|
|
|
return false;
|
2021-12-23 21:16:35 +01:00
|
|
|
}
|
|
|
|
|
2023-06-26 01:13:21 +02:00
|
|
|
/* For anchors, we just need the commitment tx to relay. */
|
|
|
|
if (channel_type_has_anchors(channel->type))
|
|
|
|
min_feerate = get_feerate_floor(ld->topology);
|
|
|
|
else
|
|
|
|
min_feerate = feerate_min(ld, NULL);
|
2023-07-21 09:16:22 +02:00
|
|
|
max_feerate = feerate_max(ld, NULL);
|
|
|
|
|
|
|
|
if (channel->ignore_fee_limits || ld->config.ignore_fee_limits) {
|
|
|
|
min_feerate = 1;
|
|
|
|
max_feerate = 0xFFFFFFFF;
|
|
|
|
}
|
2023-06-26 01:13:21 +02:00
|
|
|
|
2023-07-24 07:13:38 +02:00
|
|
|
/* Make sure we don't go backsards on blockheights */
|
|
|
|
curr_blockheight = get_block_height(ld->topology);
|
|
|
|
if (curr_blockheight < get_blockheight(channel->blockheight_states,
|
|
|
|
channel->opener, LOCAL)) {
|
|
|
|
|
|
|
|
u32 last_height = get_blockheight(channel->blockheight_states,
|
|
|
|
channel->opener, LOCAL);
|
|
|
|
|
|
|
|
log_debug(channel->log,
|
|
|
|
"current blockheight is (%d),"
|
|
|
|
" last saved (%d). setting to last saved. %s",
|
|
|
|
curr_blockheight,
|
|
|
|
last_height,
|
|
|
|
!topology_synced(ld->topology) ? "(not synced)" : "");
|
|
|
|
|
|
|
|
curr_blockheight = last_height;
|
|
|
|
}
|
|
|
|
|
2023-05-12 00:07:32 +02:00
|
|
|
inflights = tal_arr(tmpctx, struct inflight *, 0);
|
2023-07-27 23:37:52 +02:00
|
|
|
list_for_each(&channel->inflights, inflight, list) {
|
2023-08-16 04:58:53 +02:00
|
|
|
struct inflight *infcopy;
|
|
|
|
|
|
|
|
if (inflight->splice_locked_memonly)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
infcopy = tal(inflights, struct inflight);
|
2023-05-12 00:07:32 +02:00
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
infcopy->outpoint = inflight->funding->outpoint;
|
|
|
|
infcopy->amnt = inflight->funding->total_funds;
|
|
|
|
infcopy->splice_amnt = inflight->funding->splice_amnt;
|
2023-09-19 23:35:57 +02:00
|
|
|
if (inflight->last_tx)
|
|
|
|
infcopy->last_tx = tal_dup(infcopy, struct bitcoin_tx, inflight->last_tx);
|
|
|
|
else
|
|
|
|
infcopy->last_tx = NULL;
|
2023-07-27 23:37:52 +02:00
|
|
|
infcopy->last_sig = inflight->last_sig;
|
|
|
|
infcopy->i_am_initiator = inflight->i_am_initiator;
|
2023-05-12 00:07:32 +02:00
|
|
|
tal_wally_start();
|
|
|
|
wally_psbt_clone_alloc(inflight->funding_psbt, 0, &infcopy->psbt);
|
|
|
|
tal_wally_end_onto(infcopy, infcopy->psbt, struct wally_psbt);
|
2023-07-27 23:37:52 +02:00
|
|
|
tal_arr_expand(&inflights, infcopy);
|
|
|
|
}
|
|
|
|
|
2020-08-25 03:33:16 +02:00
|
|
|
initmsg = towire_channeld_init(tmpctx,
|
2021-12-29 04:26:40 +01:00
|
|
|
chainparams,
|
|
|
|
ld->our_features,
|
|
|
|
&channel->cid,
|
|
|
|
&channel->funding,
|
|
|
|
channel->funding_sats,
|
|
|
|
channel->minimum_depth,
|
2023-07-24 07:13:38 +02:00
|
|
|
curr_blockheight,
|
2021-12-29 04:26:40 +01:00
|
|
|
channel->blockheight_states,
|
|
|
|
channel->lease_expiry,
|
|
|
|
&channel->our_config,
|
|
|
|
&channel->channel_info.their_config,
|
|
|
|
channel->fee_states,
|
2023-06-26 01:13:21 +02:00
|
|
|
min_feerate,
|
2023-07-21 09:16:22 +02:00
|
|
|
max_feerate,
|
lightningd: clean up feerate handling, deprecate old terms.
Drop try_get_feerate() in favor of explicit feerate_for_deadline() and
smoothed_feerate_for_deadline().
This shows us everywhere we deal with old-style feerates by names.
`delayed_to_us` and `htlc_resolution` will be moving to dynamic fees,
so deprecate those.
Note that "penalty" is still used for generating penalty txs for
watchtowers, and "unilateral_close" still used until we get zero-fee
anchors.
Changelog-Added: JSON-RPC: `feerates` `estimates` array shows fee estimates by blockcount from underlying plugin (usually *bcli*).
Changelog-Changed: JSON-RPC: `close`, `fundchannel`, `fundpsbt`, `multifundchannel`, `multiwithdraw`, `txprepare`, `upgradewallet`, `withdraw` `feerate` (`feerange` for `close`) value *slow* is now 100 block-estimate, not half of 100-block estimate.
Changelog-Deprecated: JSON-RPC: `close`, `fundchannel`, `fundpsbt`, `multifundchannel`, `multiwithdraw`, `txprepare`, `upgradewallet`, `withdraw` `feerate` (`feerange` for `close`) expressed as, "delayed_to_us", "htlc_resolution", "max_acceptable" or "min_acceptable". Use explicit block counts or *slow*/*normal*/*urgent*/*minimum*.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2023-04-07 06:43:39 +02:00
|
|
|
penalty_feerate(ld->topology),
|
2021-12-29 04:26:40 +01:00
|
|
|
&channel->last_sig,
|
|
|
|
&channel->channel_info.remote_fundingkey,
|
|
|
|
&channel->channel_info.theirbase,
|
|
|
|
&channel->channel_info.remote_per_commit,
|
|
|
|
&channel->channel_info.old_remote_per_commit,
|
|
|
|
channel->opener,
|
|
|
|
channel->our_msat,
|
|
|
|
&channel->local_basepoints,
|
|
|
|
&channel->local_funding_pubkey,
|
|
|
|
&ld->id,
|
|
|
|
&channel->peer->id,
|
|
|
|
cfg->commit_time_ms,
|
|
|
|
channel->last_was_revoke,
|
|
|
|
channel->last_sent_commit,
|
|
|
|
channel->next_index[LOCAL],
|
|
|
|
channel->next_index[REMOTE],
|
|
|
|
num_revocations,
|
|
|
|
channel->next_htlc_id,
|
|
|
|
htlcs,
|
|
|
|
channel->scid != NULL,
|
2022-09-10 04:10:31 +02:00
|
|
|
channel->remote_channel_ready,
|
2021-12-29 04:26:40 +01:00
|
|
|
&scid,
|
|
|
|
reconnected,
|
2021-06-15 07:07:10 +02:00
|
|
|
/* Anything that indicates we are or have
|
|
|
|
* shut down */
|
2023-10-02 00:59:49 +02:00
|
|
|
channel_state_closing(channel->state),
|
2021-12-29 04:26:40 +01:00
|
|
|
channel->shutdown_scriptpubkey[REMOTE] != NULL,
|
2021-12-23 21:16:35 +01:00
|
|
|
channel->final_key_idx,
|
|
|
|
&final_ext_key,
|
2021-12-29 04:26:40 +01:00
|
|
|
channel->shutdown_scriptpubkey[LOCAL],
|
|
|
|
channel->channel_flags,
|
|
|
|
fwd_msg,
|
|
|
|
reached_announce_depth,
|
|
|
|
&last_remote_per_commit_secret,
|
|
|
|
channel->peer->their_features,
|
|
|
|
channel->remote_upfront_shutdown_script,
|
|
|
|
remote_ann_node_sig,
|
|
|
|
remote_ann_bitcoin_sig,
|
|
|
|
channel->type,
|
2023-09-21 07:36:28 +02:00
|
|
|
ld->dev_fast_gossip,
|
|
|
|
ld->dev_disable_commit == -1
|
2021-12-29 04:26:40 +01:00
|
|
|
? NULL
|
|
|
|
: (u32 *)&ld->dev_disable_commit,
|
|
|
|
pbases,
|
2022-01-24 21:02:52 +01:00
|
|
|
reestablish_only,
|
2023-07-27 23:37:52 +02:00
|
|
|
ld->experimental_upgrade_protocol,
|
|
|
|
cast_const2(const struct inflight **,
|
|
|
|
inflights));
|
2018-02-20 21:59:09 +01:00
|
|
|
|
|
|
|
/* We don't expect a response: we are triggered by funding_depth_cb. */
|
|
|
|
subd_send_msg(channel->owner, take(initmsg));
|
2018-08-22 04:33:32 +02:00
|
|
|
|
2021-06-22 20:25:59 +02:00
|
|
|
/* On restart, feerate and blockheight
|
|
|
|
* might not be what we expect: adjust now. */
|
|
|
|
if (channel->opener == LOCAL) {
|
2018-08-22 04:33:32 +02:00
|
|
|
try_update_feerates(ld, channel);
|
2021-06-22 20:25:59 +02:00
|
|
|
try_update_blockheight(ld, channel,
|
|
|
|
get_block_height(ld->topology));
|
|
|
|
}
|
2022-04-25 13:03:43 +02:00
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
/* FIXME: DTODO: Use a pointer to a txid instead of zero'ing one out. */
|
|
|
|
memset(&txid, 0, sizeof(txid));
|
|
|
|
|
2022-04-25 13:03:43 +02:00
|
|
|
/* Artificial confirmation event for zeroconf */
|
2022-04-26 14:53:58 +02:00
|
|
|
subd_send_msg(channel->owner,
|
|
|
|
take(towire_channeld_funding_depth(
|
2023-07-27 23:37:52 +02:00
|
|
|
NULL, channel->scid, channel->alias[LOCAL], 0, false,
|
|
|
|
&txid)));
|
2022-07-18 14:12:18 +02:00
|
|
|
return true;
|
2018-02-20 21:59:09 +01:00
|
|
|
}
|
2018-04-23 12:08:01 +02:00
|
|
|
|
2023-10-02 00:59:49 +02:00
|
|
|
/* Actually send the depth message to channeld */
|
|
|
|
void channeld_tell_depth(struct channel *channel,
|
|
|
|
const struct bitcoin_txid *txid,
|
|
|
|
u32 depth)
|
2023-10-02 00:59:49 +02:00
|
|
|
{
|
2023-10-02 00:59:49 +02:00
|
|
|
if (!channel->owner) {
|
|
|
|
log_debug(channel->log,
|
|
|
|
"Funding tx %s confirmed, but peer disconnected",
|
2023-10-02 00:59:49 +02:00
|
|
|
type_to_string(tmpctx, struct bitcoin_txid, txid));
|
2023-10-02 00:59:49 +02:00
|
|
|
return;
|
2023-10-02 00:59:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
log_debug(channel->log,
|
|
|
|
"Sending towire_channeld_funding_depth with channel state %s",
|
|
|
|
channel_state_str(channel->state));
|
|
|
|
|
|
|
|
subd_send_msg(channel->owner,
|
|
|
|
take(towire_channeld_funding_depth(
|
|
|
|
NULL, channel->scid, channel->alias[LOCAL], depth,
|
|
|
|
channel->state == CHANNELD_AWAITING_SPLICE, txid)));
|
|
|
|
}
|
|
|
|
|
2018-05-06 15:32:01 +02:00
|
|
|
/* Check if we are the fundee of this channel, the channel
|
|
|
|
* funding transaction is still not yet seen onchain, and
|
|
|
|
* it has been too long since the channel was first opened.
|
|
|
|
* If so, we should forget the channel. */
|
|
|
|
static bool
|
|
|
|
is_fundee_should_forget(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
u32 block_height)
|
|
|
|
{
|
|
|
|
/* BOLT #2:
|
|
|
|
*
|
|
|
|
* A non-funding node (fundee):
|
|
|
|
* - SHOULD forget the channel if it does not see the
|
2021-04-05 23:12:11 +02:00
|
|
|
* correct funding transaction after a timeout of 2016 blocks.
|
2018-05-06 15:32:01 +02:00
|
|
|
*/
|
2023-09-21 07:36:28 +02:00
|
|
|
u32 max_funding_unconfirmed;
|
|
|
|
|
|
|
|
if (ld->developer)
|
|
|
|
max_funding_unconfirmed = ld->dev_max_funding_unconfirmed;
|
|
|
|
else
|
|
|
|
max_funding_unconfirmed = 2016;
|
2018-05-06 15:32:01 +02:00
|
|
|
|
|
|
|
/* Only applies if we are fundee. */
|
2019-09-09 18:11:24 +02:00
|
|
|
if (channel->opener == LOCAL)
|
2018-05-06 15:32:01 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Does not apply if we already saw the funding tx. */
|
|
|
|
if (channel->scid)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Not even reached previous starting blocknum.
|
|
|
|
* (e.g. if --rescan option is used) */
|
|
|
|
if (block_height < channel->first_blocknum)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Timeout in blocks not yet reached. */
|
2018-05-07 01:01:49 +02:00
|
|
|
if (block_height - channel->first_blocknum < max_funding_unconfirmed)
|
2018-05-06 15:32:01 +02:00
|
|
|
return false;
|
|
|
|
|
2021-07-02 22:21:54 +02:00
|
|
|
/* If we've got funds in the channel, don't forget it */
|
|
|
|
if (!amount_sat_zero(channel->our_funds))
|
|
|
|
return false;
|
|
|
|
|
2018-05-06 15:32:01 +02:00
|
|
|
/* Ah forget it! */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Notify all channels of new blocks. */
|
|
|
|
void channel_notify_new_block(struct lightningd *ld,
|
|
|
|
u32 block_height)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
struct channel *channel;
|
|
|
|
struct channel **to_forget = tal_arr(NULL, struct channel *, 0);
|
|
|
|
size_t i;
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
2018-05-06 15:32:01 +02:00
|
|
|
|
2023-01-18 06:04:32 +01:00
|
|
|
/* FIXME: keep separate block-aware channel structure instead? */
|
|
|
|
for (peer = peer_node_id_map_first(ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(ld->peers, &it)) {
|
|
|
|
list_for_each(&peer->channels, channel, list) {
|
2023-10-02 00:59:51 +02:00
|
|
|
if (channel_state_uncommitted(channel->state))
|
2021-01-22 01:55:23 +01:00
|
|
|
continue;
|
2018-05-06 15:32:01 +02:00
|
|
|
if (is_fundee_should_forget(ld, channel, block_height)) {
|
2019-01-15 04:51:27 +01:00
|
|
|
tal_arr_expand(&to_forget, channel);
|
2021-06-22 20:25:59 +02:00
|
|
|
} else
|
|
|
|
/* Let channels know about new blocks,
|
|
|
|
* required for lease updates */
|
|
|
|
try_update_blockheight(ld, channel,
|
|
|
|
block_height);
|
2021-01-22 01:55:23 +01:00
|
|
|
}
|
2018-05-06 15:32:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Need to forget in a separate loop, else the above
|
|
|
|
* nested loops may crash due to the last channel of
|
|
|
|
* a peer also deleting the peer, making the inner
|
|
|
|
* loop crash.
|
|
|
|
* list_for_each_safe does not work because it is not
|
|
|
|
* just the freeing of the channel that occurs, but the
|
|
|
|
* potential destruction of the peer that invalidates
|
|
|
|
* memory the inner loop is accessing. */
|
|
|
|
for (i = 0; i < tal_count(to_forget); ++i) {
|
|
|
|
channel = to_forget[i];
|
|
|
|
/* Report it first. */
|
|
|
|
log_unusual(channel->log,
|
|
|
|
"Forgetting channel: "
|
|
|
|
"It has been %"PRIu32" blocks without the "
|
|
|
|
"funding transaction %s getting deeply "
|
|
|
|
"confirmed. "
|
|
|
|
"We are fundee and can forget channel without "
|
|
|
|
"loss of funds.",
|
|
|
|
block_height - channel->first_blocknum,
|
|
|
|
type_to_string(tmpctx, struct bitcoin_txid,
|
2021-10-13 05:45:36 +02:00
|
|
|
&channel->funding.txid));
|
2018-08-02 08:49:56 +02:00
|
|
|
/* FIXME: Send an error packet for this case! */
|
2018-05-06 15:32:01 +02:00
|
|
|
/* And forget it. */
|
|
|
|
delete_channel(channel);
|
|
|
|
}
|
|
|
|
|
|
|
|
tal_free(to_forget);
|
|
|
|
}
|
2019-08-23 23:34:52 +02:00
|
|
|
|
2019-09-19 08:44:24 +02:00
|
|
|
/* Since this could vanish while we're checking with bitcoind, we need to save
|
|
|
|
* the details and re-lookup.
|
|
|
|
*
|
|
|
|
* channel_id *should* be unique, but it can be set by the counterparty, so
|
|
|
|
* we cannot rely on that! */
|
|
|
|
struct channel_to_cancel {
|
|
|
|
struct node_id peer;
|
|
|
|
struct channel_id cid;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void process_check_funding_broadcast(struct bitcoind *bitcoind,
|
2019-08-23 23:34:52 +02:00
|
|
|
const struct bitcoin_tx_output *txout,
|
|
|
|
void *arg)
|
|
|
|
{
|
2019-09-19 08:44:24 +02:00
|
|
|
struct channel_to_cancel *cc = arg;
|
|
|
|
struct peer *peer;
|
|
|
|
struct channel *cancel;
|
|
|
|
|
|
|
|
/* Peer could have errored out while we were waiting */
|
|
|
|
peer = peer_by_id(bitcoind->ld, &cc->peer);
|
|
|
|
if (!peer)
|
2020-09-08 11:39:52 +02:00
|
|
|
goto cleanup;
|
2019-09-19 08:44:24 +02:00
|
|
|
cancel = find_channel_by_id(peer, &cc->cid);
|
|
|
|
if (!cancel)
|
2020-09-08 11:39:52 +02:00
|
|
|
goto cleanup;
|
2019-08-23 23:34:52 +02:00
|
|
|
|
|
|
|
if (txout != NULL) {
|
|
|
|
for (size_t i = 0; i < tal_count(cancel->forgets); i++)
|
2020-06-24 06:34:26 +02:00
|
|
|
was_pending(command_fail(cancel->forgets[i],
|
|
|
|
FUNDING_CANCEL_NOT_SAFE,
|
2019-08-23 23:34:52 +02:00
|
|
|
"The funding transaction has been broadcast, "
|
|
|
|
"please consider `close` or `dev-fail`! "));
|
|
|
|
tal_free(cancel->forgets);
|
|
|
|
cancel->forgets = tal_arr(cancel, struct command *, 0);
|
2020-09-08 11:39:52 +02:00
|
|
|
goto cleanup;
|
2019-08-23 23:34:52 +02:00
|
|
|
}
|
|
|
|
|
2019-12-12 01:50:30 +01:00
|
|
|
char *error_reason = "Cancel channel by our RPC "
|
|
|
|
"command before funding "
|
|
|
|
"transaction broadcast.";
|
|
|
|
forget_channel(cancel, error_reason);
|
2020-09-08 11:39:52 +02:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
tal_free(cc);
|
|
|
|
return;
|
2019-08-23 23:34:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
struct command_result *cancel_channel_before_broadcast(struct command *cmd,
|
2020-06-24 06:34:26 +02:00
|
|
|
struct peer *peer)
|
2019-08-23 23:34:52 +02:00
|
|
|
{
|
2019-09-19 08:31:30 +02:00
|
|
|
struct channel *cancel_channel;
|
2019-09-19 08:44:24 +02:00
|
|
|
struct channel_to_cancel *cc = tal(cmd, struct channel_to_cancel);
|
2020-06-24 06:34:26 +02:00
|
|
|
struct channel *channel;
|
2019-08-23 23:34:52 +02:00
|
|
|
|
2019-09-19 08:44:24 +02:00
|
|
|
cc->peer = peer->id;
|
2020-06-24 06:34:26 +02:00
|
|
|
cancel_channel = NULL;
|
|
|
|
list_for_each(&peer->channels, channel, list) {
|
|
|
|
/* After `fundchannel_complete`, channel is in
|
|
|
|
* `CHANNELD_AWAITING_LOCKIN` state.
|
|
|
|
*
|
|
|
|
* TODO: This assumes only one channel at a time
|
|
|
|
* can be in this state, which is true at the
|
|
|
|
* time of this writing, but may change *if* we
|
|
|
|
* ever implement multiple channels per peer.
|
|
|
|
*/
|
|
|
|
if (channel->state != CHANNELD_AWAITING_LOCKIN)
|
|
|
|
continue;
|
|
|
|
cancel_channel = channel;
|
|
|
|
break;
|
2019-08-23 23:34:52 +02:00
|
|
|
}
|
2020-06-24 06:34:26 +02:00
|
|
|
if (!cancel_channel)
|
|
|
|
return command_fail(cmd, FUNDING_NOTHING_TO_CANCEL,
|
|
|
|
"No channels being opened or "
|
|
|
|
"awaiting lock-in for "
|
|
|
|
"peer_id %s",
|
|
|
|
type_to_string(tmpctx, struct node_id,
|
|
|
|
&peer->id));
|
2020-09-09 09:20:53 +02:00
|
|
|
cc->cid = cancel_channel->cid;
|
2019-08-23 23:34:52 +02:00
|
|
|
|
2019-09-09 18:11:24 +02:00
|
|
|
if (cancel_channel->opener == REMOTE)
|
2020-06-24 06:34:26 +02:00
|
|
|
return command_fail(cmd, FUNDING_CANCEL_NOT_SAFE,
|
2019-12-12 01:46:08 +01:00
|
|
|
"Cannot cancel channel that was "
|
|
|
|
"initiated by peer");
|
|
|
|
|
2020-11-24 01:36:22 +01:00
|
|
|
/* Check if we broadcast the transaction. (We store the transaction
|
|
|
|
* type into DB before broadcast). */
|
2023-01-30 07:06:03 +01:00
|
|
|
if (wallet_transaction_get(tmpctx, cmd->ld->wallet,
|
|
|
|
&cancel_channel->funding.txid))
|
2020-06-24 06:34:26 +02:00
|
|
|
return command_fail(cmd, FUNDING_CANCEL_NOT_SAFE,
|
2020-11-24 01:36:22 +01:00
|
|
|
"Has the funding transaction been"
|
|
|
|
" broadcast? Please use `close` or"
|
|
|
|
" `dev-fail` instead.");
|
2019-08-23 23:34:52 +02:00
|
|
|
|
|
|
|
if (channel_has_htlc_out(cancel_channel) ||
|
|
|
|
channel_has_htlc_in(cancel_channel)) {
|
2020-06-24 06:34:26 +02:00
|
|
|
return command_fail(cmd, FUNDING_CANCEL_NOT_SAFE,
|
2020-11-24 01:36:22 +01:00
|
|
|
"This channel has HTLCs attached and it"
|
|
|
|
" is not safe to cancel. Has the funding"
|
|
|
|
" transaction been broadcast? Please use"
|
|
|
|
" `close` or `dev-fail` instead.");
|
2019-08-23 23:34:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
tal_arr_expand(&cancel_channel->forgets, cmd);
|
2023-06-22 07:25:11 +02:00
|
|
|
/* Now, cmd will be ended by forget() or process_check_funding_broadcast(),
|
|
|
|
* but in the shutdown case it might be freed first and those crash. So instead
|
|
|
|
* we make it a child if forgets so it will stay around at least that long! */
|
|
|
|
tal_steal(cancel_channel->forgets, cmd);
|
2019-08-23 23:34:52 +02:00
|
|
|
|
|
|
|
/* Check if the transaction is onchain. */
|
|
|
|
/* Note: The above check and this check can't completely ensure that
|
|
|
|
* the funding transaction isn't broadcast. We can't know if the funding
|
2020-11-24 01:36:22 +01:00
|
|
|
* is broadcast by external wallet and the transaction hasn't
|
|
|
|
* been onchain. */
|
2020-01-09 16:38:12 +01:00
|
|
|
bitcoind_getutxout(cmd->ld->topology->bitcoind,
|
2021-10-13 05:45:36 +02:00
|
|
|
&cancel_channel->funding,
|
2020-01-09 16:38:12 +01:00
|
|
|
process_check_funding_broadcast,
|
2021-11-24 05:06:05 +01:00
|
|
|
/* Freed by callback */
|
|
|
|
tal_steal(NULL, cc));
|
2019-08-23 23:34:52 +02:00
|
|
|
return command_still_pending(cmd);
|
|
|
|
}
|
2019-10-28 04:33:42 +01:00
|
|
|
|
2022-01-24 21:02:52 +01:00
|
|
|
void channel_replace_update(struct channel *channel, u8 *update TAKES)
|
|
|
|
{
|
|
|
|
tal_free(channel->channel_update);
|
|
|
|
channel->channel_update = tal_dup_talarr(channel, u8, update);
|
|
|
|
}
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
static struct command_result *param_channel_for_splice(struct command *cmd,
|
|
|
|
const char *name,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *tok,
|
|
|
|
struct channel **channel)
|
|
|
|
{
|
|
|
|
struct command_result *result;
|
|
|
|
struct channel_id *cid;
|
|
|
|
|
|
|
|
result = param_channel_id(cmd, name, buffer, tok, &cid);
|
|
|
|
|
|
|
|
if (result != NULL)
|
|
|
|
return result;
|
|
|
|
|
|
|
|
*channel = channel_by_cid(cmd->ld, cid);
|
|
|
|
if (!*channel)
|
|
|
|
return command_fail(cmd, SPLICE_UNKNOWN_CHANNEL,
|
|
|
|
"Unknown channel %s",
|
|
|
|
type_to_string(tmpctx, struct channel_id,
|
|
|
|
cid));
|
|
|
|
|
|
|
|
if (!feature_negotiated(cmd->ld->our_features,
|
|
|
|
(*channel)->peer->their_features,
|
2023-08-10 02:20:29 +02:00
|
|
|
OPT_EXPERIMENTAL_SPLICE))
|
2023-07-27 23:37:52 +02:00
|
|
|
return command_fail(cmd, SPLICE_NOT_SUPPORTED,
|
|
|
|
"splicing not supported");
|
|
|
|
|
|
|
|
if (!(*channel)->owner)
|
|
|
|
return command_fail(cmd, SPLICE_WRONG_OWNER,
|
|
|
|
"Channel is disconnected");
|
|
|
|
|
|
|
|
if (!streq((*channel)->owner->name, "channeld"))
|
|
|
|
return command_fail(cmd,
|
|
|
|
SPLICE_WRONG_OWNER,
|
|
|
|
"Channel hasn't finished connecting or in "
|
|
|
|
"abnormal owner state %s",
|
|
|
|
(*channel)->owner->name);
|
|
|
|
|
|
|
|
if ((*channel)->state != CHANNELD_NORMAL)
|
|
|
|
return command_fail(cmd,
|
|
|
|
SPLICE_INVALID_CHANNEL_STATE,
|
|
|
|
"Channel needs to be in normal state but "
|
|
|
|
"is in state %s",
|
|
|
|
channel_state_name(*channel));
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy_splice_command(struct splice_command *cc)
|
|
|
|
{
|
|
|
|
list_del(&cc->list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct command_result *json_splice_init(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
|
|
|
{
|
|
|
|
struct channel *channel;
|
|
|
|
struct splice_command *cc;
|
|
|
|
struct wally_psbt *initialpsbt;
|
|
|
|
s64 *relative_amount;
|
|
|
|
u32 *feerate_per_kw;
|
|
|
|
bool *force_feerate;
|
|
|
|
u8 *msg;
|
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (!param_check(cmd, buffer, params,
|
|
|
|
p_req("channel_id", param_channel_for_splice, &channel),
|
|
|
|
p_req("relative_amount", param_s64, &relative_amount),
|
|
|
|
p_opt("initialpsbt", param_psbt, &initialpsbt),
|
|
|
|
p_opt("feerate_per_kw", param_feerate, &feerate_per_kw),
|
|
|
|
p_opt_def("force_feerate", param_bool, &force_feerate, false),
|
|
|
|
NULL))
|
2023-07-27 23:37:52 +02:00
|
|
|
return command_param_failed();
|
|
|
|
|
|
|
|
if (splice_command_for_chan(cmd->ld, channel))
|
|
|
|
return command_fail(cmd,
|
|
|
|
SPLICE_BUSY_ERROR,
|
|
|
|
"Currently waiting on previous splice"
|
|
|
|
" command to finish.");
|
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (command_check_only(cmd))
|
|
|
|
return command_check_done(cmd);
|
|
|
|
|
|
|
|
if (!feerate_per_kw) {
|
|
|
|
feerate_per_kw = tal(cmd, u32);
|
|
|
|
*feerate_per_kw = opening_feerate(cmd->ld->topology);
|
|
|
|
}
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
if (!initialpsbt)
|
|
|
|
initialpsbt = create_psbt(cmd, 0, 0, 0);
|
|
|
|
if (!validate_psbt(initialpsbt))
|
|
|
|
return command_fail(cmd,
|
|
|
|
SPLICE_INPUT_ERROR,
|
|
|
|
"PSBT failed to validate.");
|
|
|
|
|
|
|
|
log_debug(cmd->ld->log, "splice_init input PSBT version %d",
|
|
|
|
initialpsbt->version);
|
|
|
|
|
|
|
|
cc = tal(cmd, struct splice_command);
|
|
|
|
|
|
|
|
list_add_tail(&cmd->ld->splice_commands, &cc->list);
|
|
|
|
tal_add_destructor(cc, destroy_splice_command);
|
|
|
|
|
|
|
|
cc->cmd = cmd;
|
|
|
|
cc->channel = channel;
|
|
|
|
|
|
|
|
msg = towire_channeld_splice_init(NULL, initialpsbt, *relative_amount,
|
|
|
|
*feerate_per_kw, *force_feerate);
|
|
|
|
|
|
|
|
subd_send_msg(channel->owner, take(msg));
|
|
|
|
return command_still_pending(cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct command_result *json_splice_update(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
|
|
|
{
|
|
|
|
struct channel *channel;
|
|
|
|
struct splice_command *cc;
|
|
|
|
struct wally_psbt *psbt;
|
|
|
|
|
|
|
|
if (!param(cmd, buffer, params,
|
|
|
|
p_req("channel_id", param_channel_for_splice, &channel),
|
|
|
|
p_req("psbt", param_psbt, &psbt),
|
|
|
|
NULL))
|
|
|
|
return command_param_failed();
|
|
|
|
|
|
|
|
if (splice_command_for_chan(cmd->ld, channel))
|
|
|
|
return command_fail(cmd,
|
|
|
|
SPLICE_BUSY_ERROR,
|
|
|
|
"Currently waiting on previous splice"
|
|
|
|
" command to finish.");
|
|
|
|
if (!validate_psbt(psbt))
|
|
|
|
return command_fail(cmd,
|
|
|
|
SPLICE_INPUT_ERROR,
|
|
|
|
"PSBT failed to validate.");
|
|
|
|
|
|
|
|
log_debug(cmd->ld->log, "splice_update input PSBT version %d",
|
|
|
|
psbt->version);
|
|
|
|
|
|
|
|
cc = tal(cmd, struct splice_command);
|
|
|
|
|
|
|
|
list_add_tail(&cmd->ld->splice_commands, &cc->list);
|
|
|
|
tal_add_destructor(cc, destroy_splice_command);
|
|
|
|
|
|
|
|
cc->cmd = cmd;
|
|
|
|
cc->channel = channel;
|
|
|
|
|
|
|
|
subd_send_msg(channel->owner,
|
|
|
|
take(towire_channeld_splice_update(NULL, psbt)));
|
|
|
|
return command_still_pending(cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct command_result *json_splice_signed(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
|
|
|
{
|
|
|
|
u8 *msg;
|
|
|
|
struct channel *channel;
|
|
|
|
struct splice_command *cc;
|
|
|
|
struct wally_psbt *psbt;
|
|
|
|
bool *sign_first;
|
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (!param_check(cmd, buffer, params,
|
|
|
|
p_req("channel_id", param_channel_for_splice, &channel),
|
|
|
|
p_req("psbt", param_psbt, &psbt),
|
|
|
|
p_opt_def("sign_first", param_bool, &sign_first, false),
|
|
|
|
NULL))
|
2023-07-27 23:37:52 +02:00
|
|
|
return command_param_failed();
|
|
|
|
|
|
|
|
if (splice_command_for_chan(cmd->ld, channel))
|
|
|
|
return command_fail(cmd,
|
|
|
|
SPLICE_BUSY_ERROR,
|
|
|
|
"Currently waiting on previous splice"
|
|
|
|
" command to finish.");
|
|
|
|
if (!validate_psbt(psbt))
|
|
|
|
return command_fail(cmd,
|
|
|
|
SPLICE_INPUT_ERROR,
|
|
|
|
"PSBT failed to validate.");
|
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (command_check_only(cmd))
|
|
|
|
return command_check_done(cmd);
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
log_debug(cmd->ld->log, "splice_signed input PSBT version %d",
|
|
|
|
psbt->version);
|
|
|
|
|
|
|
|
cc = tal(cmd, struct splice_command);
|
|
|
|
|
|
|
|
list_add_tail(&cmd->ld->splice_commands, &cc->list);
|
|
|
|
tal_add_destructor(cc, destroy_splice_command);
|
|
|
|
|
|
|
|
cc->cmd = cmd;
|
|
|
|
cc->channel = channel;
|
|
|
|
|
|
|
|
msg = towire_channeld_splice_signed(tmpctx, psbt, *sign_first);
|
|
|
|
subd_send_msg(channel->owner, take(msg));
|
|
|
|
return command_still_pending(cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command splice_init_command = {
|
|
|
|
"splice_init",
|
|
|
|
"channels",
|
|
|
|
json_splice_init,
|
|
|
|
"Init a channel splice to {channel_id} for {relative_amount} satoshis with {initialpsbt}. "
|
|
|
|
"Returns updated {psbt} with (partial) contributions from peer"
|
|
|
|
};
|
|
|
|
AUTODATA(json_command, &splice_init_command);
|
|
|
|
|
|
|
|
static const struct json_command splice_update_command = {
|
|
|
|
"splice_update",
|
|
|
|
"channels",
|
|
|
|
json_splice_update,
|
|
|
|
"Update {channel_id} currently active negotiated splice with {psbt}. "
|
|
|
|
""
|
|
|
|
"Returns updated {psbt} with (partial) contributions from peer. "
|
|
|
|
"If {commitments_secured} is true, next call may be to splicechannel_finalize, "
|
|
|
|
"otherwise keep calling splice_update passing back in the returned PSBT until "
|
|
|
|
"{commitments_secured} is true."
|
|
|
|
};
|
|
|
|
AUTODATA(json_command, &splice_update_command);
|
|
|
|
|
|
|
|
static const struct json_command splice_signed_command = {
|
|
|
|
"splice_signed",
|
|
|
|
"channels",
|
|
|
|
json_splice_signed,
|
|
|
|
"Send our {signed_psbt}'s tx sigs for {channel_id}."
|
|
|
|
};
|
|
|
|
AUTODATA(json_command, &splice_signed_command);
|
|
|
|
|
2019-10-28 04:33:42 +01:00
|
|
|
static struct command_result *json_dev_feerate(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
|
|
|
{
|
|
|
|
u32 *feerate;
|
|
|
|
struct node_id *id;
|
|
|
|
struct peer *peer;
|
|
|
|
struct json_stream *response;
|
|
|
|
struct channel *channel;
|
|
|
|
const u8 *msg;
|
2022-03-22 23:59:20 +01:00
|
|
|
bool more_than_one;
|
2019-10-28 04:33:42 +01:00
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (!param_check(cmd, buffer, params,
|
|
|
|
p_req("id", param_node_id, &id),
|
|
|
|
p_req("feerate", param_number, &feerate),
|
|
|
|
NULL))
|
2019-10-28 04:33:42 +01:00
|
|
|
return command_param_failed();
|
|
|
|
|
|
|
|
peer = peer_by_id(cmd->ld, id);
|
|
|
|
if (!peer)
|
|
|
|
return command_fail(cmd, LIGHTNINGD, "Peer not connected");
|
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
channel = peer_any_channel(peer, channel_state_can_add_htlc, &more_than_one);
|
2023-10-02 00:59:49 +02:00
|
|
|
if (!channel || !channel->owner)
|
2019-10-28 04:33:42 +01:00
|
|
|
return command_fail(cmd, LIGHTNINGD, "Peer bad state");
|
2022-03-22 23:59:20 +01:00
|
|
|
/* This is a dev command: fix the api if you need this! */
|
|
|
|
if (more_than_one)
|
|
|
|
return command_fail(cmd, LIGHTNINGD, "More than one channel");
|
2019-10-28 04:33:42 +01:00
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (command_check_only(cmd))
|
|
|
|
return command_check_done(cmd);
|
|
|
|
|
2020-08-25 03:33:16 +02:00
|
|
|
msg = towire_channeld_feerates(NULL, *feerate,
|
2020-11-24 01:36:22 +01:00
|
|
|
feerate_min(cmd->ld, NULL),
|
|
|
|
feerate_max(cmd->ld, NULL),
|
lightningd: clean up feerate handling, deprecate old terms.
Drop try_get_feerate() in favor of explicit feerate_for_deadline() and
smoothed_feerate_for_deadline().
This shows us everywhere we deal with old-style feerates by names.
`delayed_to_us` and `htlc_resolution` will be moving to dynamic fees,
so deprecate those.
Note that "penalty" is still used for generating penalty txs for
watchtowers, and "unilateral_close" still used until we get zero-fee
anchors.
Changelog-Added: JSON-RPC: `feerates` `estimates` array shows fee estimates by blockcount from underlying plugin (usually *bcli*).
Changelog-Changed: JSON-RPC: `close`, `fundchannel`, `fundpsbt`, `multifundchannel`, `multiwithdraw`, `txprepare`, `upgradewallet`, `withdraw` `feerate` (`feerange` for `close`) value *slow* is now 100 block-estimate, not half of 100-block estimate.
Changelog-Deprecated: JSON-RPC: `close`, `fundchannel`, `fundpsbt`, `multifundchannel`, `multiwithdraw`, `txprepare`, `upgradewallet`, `withdraw` `feerate` (`feerange` for `close`) expressed as, "delayed_to_us", "htlc_resolution", "max_acceptable" or "min_acceptable". Use explicit block counts or *slow*/*normal*/*urgent*/*minimum*.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2023-04-07 06:43:39 +02:00
|
|
|
penalty_feerate(cmd->ld->topology));
|
2019-10-28 04:33:42 +01:00
|
|
|
subd_send_msg(channel->owner, take(msg));
|
|
|
|
|
|
|
|
response = json_stream_success(cmd);
|
|
|
|
json_add_node_id(response, "id", id);
|
|
|
|
json_add_u32(response, "feerate", *feerate);
|
|
|
|
|
|
|
|
return command_success(cmd, response);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command dev_feerate_command = {
|
|
|
|
"dev-feerate",
|
|
|
|
"developer",
|
|
|
|
json_dev_feerate,
|
2023-09-21 07:36:27 +02:00
|
|
|
"Set feerate for {id} to {feerate}",
|
|
|
|
.dev_only = true,
|
2019-10-28 04:33:42 +01:00
|
|
|
};
|
|
|
|
AUTODATA(json_command, &dev_feerate_command);
|
2021-05-31 05:08:04 +02:00
|
|
|
|
|
|
|
static void quiesce_reply(struct subd *channeld UNUSED,
|
|
|
|
const u8 *reply,
|
|
|
|
const int *fds UNUSED,
|
|
|
|
struct command *cmd)
|
|
|
|
{
|
|
|
|
struct json_stream *response;
|
|
|
|
|
|
|
|
response = json_stream_success(cmd);
|
|
|
|
was_pending(command_success(cmd, response));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct command_result *json_dev_quiesce(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
|
|
|
{
|
|
|
|
struct node_id *id;
|
|
|
|
struct peer *peer;
|
|
|
|
struct channel *channel;
|
|
|
|
const u8 *msg;
|
2022-03-22 23:59:20 +01:00
|
|
|
bool more_than_one;
|
2021-05-31 05:08:04 +02:00
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (!param_check(cmd, buffer, params,
|
|
|
|
p_req("id", param_node_id, &id),
|
|
|
|
NULL))
|
2021-05-31 05:08:04 +02:00
|
|
|
return command_param_failed();
|
|
|
|
|
|
|
|
peer = peer_by_id(cmd->ld, id);
|
|
|
|
if (!peer)
|
|
|
|
return command_fail(cmd, LIGHTNINGD, "Peer not connected");
|
|
|
|
|
2023-05-22 02:51:44 +02:00
|
|
|
/* FIXME: If this becomes a real API, check for OPT_QUIESCE! */
|
2023-10-02 00:59:51 +02:00
|
|
|
channel = peer_any_channel(peer, channel_state_wants_peercomms, &more_than_one);
|
2023-10-02 00:59:49 +02:00
|
|
|
if (!channel || !channel->owner)
|
2021-05-31 05:08:04 +02:00
|
|
|
return command_fail(cmd, LIGHTNINGD, "Peer bad state");
|
2022-03-22 23:59:20 +01:00
|
|
|
/* This is a dev command: fix the api if you need this! */
|
|
|
|
if (more_than_one)
|
|
|
|
return command_fail(cmd, LIGHTNINGD, "More than one channel");
|
2021-05-31 05:08:04 +02:00
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (command_check_only(cmd))
|
|
|
|
return command_check_done(cmd);
|
|
|
|
|
2021-05-31 05:08:04 +02:00
|
|
|
msg = towire_channeld_dev_quiesce(NULL);
|
|
|
|
subd_req(channel->owner, channel->owner, take(msg), -1, 0,
|
|
|
|
quiesce_reply, cmd);
|
|
|
|
return command_still_pending(cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command dev_quiesce_command = {
|
|
|
|
"dev-quiesce",
|
|
|
|
"developer",
|
|
|
|
json_dev_quiesce,
|
2023-09-21 07:36:27 +02:00
|
|
|
"Initiate quiscence protocol with peer",
|
|
|
|
.dev_only = true,
|
2021-05-31 05:08:04 +02:00
|
|
|
};
|
|
|
|
AUTODATA(json_command, &dev_quiesce_command);
|