2021-12-04 12:23:56 +01:00
|
|
|
#include "config.h"
|
2017-10-23 06:14:38 +02:00
|
|
|
#include <arpa/inet.h>
|
2018-06-20 08:47:57 +02:00
|
|
|
#include <bitcoin/feerate.h>
|
2017-03-07 02:03:55 +01:00
|
|
|
#include <bitcoin/script.h>
|
2017-02-24 06:52:56 +01:00
|
|
|
#include <bitcoin/tx.h>
|
2018-02-23 06:53:47 +01:00
|
|
|
#include <ccan/array_size/array_size.h>
|
2024-01-31 04:16:19 +01:00
|
|
|
#include <ccan/asort/asort.h>
|
2019-09-29 10:53:26 +02:00
|
|
|
#include <ccan/cast/cast.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/io/io.h>
|
2019-09-29 10:53:26 +02:00
|
|
|
#include <ccan/mem/mem.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/noerr/noerr.h>
|
2017-09-04 05:41:34 +02:00
|
|
|
#include <ccan/str/str.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/take/take.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <ccan/tal/str/str.h>
|
2020-08-25 03:33:16 +02:00
|
|
|
#include <channeld/channeld_wiregen.h>
|
2019-10-29 18:20:34 +01:00
|
|
|
#include <common/addr.h>
|
2020-01-03 14:08:29 +01:00
|
|
|
#include <common/closing_fee.h>
|
2021-09-16 07:00:42 +02:00
|
|
|
#include <common/configdir.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/dev_disconnect.h>
|
2018-01-12 14:35:52 +01:00
|
|
|
#include <common/features.h>
|
2019-05-31 09:30:32 +02:00
|
|
|
#include <common/htlc_trim.h>
|
2017-08-28 18:02:01 +02:00
|
|
|
#include <common/initial_commit_tx.h>
|
2024-01-29 00:36:16 +01:00
|
|
|
#include <common/json_channel_type.h>
|
2018-12-08 01:39:28 +01:00
|
|
|
#include <common/json_command.h>
|
2022-07-04 05:49:38 +02:00
|
|
|
#include <common/json_param.h>
|
2018-12-08 01:39:28 +01:00
|
|
|
#include <common/jsonrpc_errors.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/key_derive.h>
|
2022-06-24 02:51:07 +02:00
|
|
|
#include <common/scb_wiregen.h>
|
2021-06-10 05:51:12 +02:00
|
|
|
#include <common/shutdown_scriptpubkey.h>
|
2017-08-28 18:05:01 +02:00
|
|
|
#include <common/status.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <common/timeout.h>
|
2019-10-15 12:58:30 +02:00
|
|
|
#include <common/utils.h>
|
2018-11-15 15:00:34 +01:00
|
|
|
#include <common/version.h>
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
#include <common/wire_error.h>
|
2020-08-25 04:16:22 +02:00
|
|
|
#include <connectd/connectd_wiregen.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <errno.h>
|
2017-06-24 08:25:51 +02:00
|
|
|
#include <fcntl.h>
|
2022-02-23 19:11:03 +01:00
|
|
|
#include <gossipd/gossipd_wiregen.h>
|
2020-08-25 03:55:38 +02:00
|
|
|
#include <hsmd/hsmd_wiregen.h>
|
2017-01-10 06:08:33 +01:00
|
|
|
#include <inttypes.h>
|
2023-06-26 01:08:21 +02:00
|
|
|
#include <lightningd/anchorspend.h>
|
2018-02-06 15:46:34 +01:00
|
|
|
#include <lightningd/bitcoind.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <lightningd/chaintopology.h>
|
2021-09-16 07:00:42 +02:00
|
|
|
#include <lightningd/channel.h>
|
2018-02-20 21:59:09 +01:00
|
|
|
#include <lightningd/channel_control.h>
|
2024-01-31 04:16:17 +01:00
|
|
|
#include <lightningd/channel_gossip.h>
|
2018-02-20 21:59:09 +01:00
|
|
|
#include <lightningd/closing_control.h>
|
2018-02-20 21:59:09 +01:00
|
|
|
#include <lightningd/connect_control.h>
|
2020-09-09 12:10:28 +02:00
|
|
|
#include <lightningd/dual_open_control.h>
|
2023-10-24 05:50:11 +02:00
|
|
|
#include <lightningd/gossip_control.h>
|
2017-06-24 08:50:23 +02:00
|
|
|
#include <lightningd/hsm_control.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <lightningd/jsonrpc.h>
|
2021-12-04 12:23:56 +01:00
|
|
|
#include <lightningd/lightningd.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <lightningd/log.h>
|
2018-11-22 03:17:29 +01:00
|
|
|
#include <lightningd/memdump.h>
|
2018-12-13 13:58:40 +01:00
|
|
|
#include <lightningd/notification.h>
|
2018-02-20 21:59:09 +01:00
|
|
|
#include <lightningd/onchain_control.h>
|
2020-09-09 09:20:53 +02:00
|
|
|
#include <lightningd/opening_common.h>
|
2018-02-20 21:59:04 +01:00
|
|
|
#include <lightningd/opening_control.h>
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
#include <lightningd/options.h>
|
2021-12-04 12:23:56 +01:00
|
|
|
#include <lightningd/peer_control.h>
|
2022-01-11 02:13:59 +01:00
|
|
|
#include <lightningd/peer_fd.h>
|
2017-06-20 07:45:03 +02:00
|
|
|
#include <lightningd/peer_htlcs.h>
|
2019-01-19 15:56:05 +01:00
|
|
|
#include <lightningd/plugin_hook.h>
|
2021-12-04 12:23:56 +01:00
|
|
|
#include <lightningd/subd.h>
|
2020-01-03 14:08:29 +01:00
|
|
|
#include <limits.h>
|
2021-09-16 07:00:42 +02:00
|
|
|
#include <onchaind/onchaind_wiregen.h>
|
2022-03-08 01:31:26 +01:00
|
|
|
#include <openingd/dualopend_wiregen.h>
|
2022-03-08 01:14:41 +01:00
|
|
|
#include <openingd/openingd_wiregen.h>
|
2020-01-03 14:08:29 +01:00
|
|
|
#include <stdlib.h>
|
2017-06-24 08:25:51 +02:00
|
|
|
#include <unistd.h>
|
2017-05-23 13:07:42 +02:00
|
|
|
#include <wally_bip32.h>
|
2020-09-07 23:06:50 +02:00
|
|
|
#include <wire/onion_wire.h>
|
2018-07-23 04:23:02 +02:00
|
|
|
#include <wire/wire_sync.h>
|
2017-06-24 08:25:51 +02:00
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
static void destroy_peer(struct peer *peer)
|
|
|
|
{
|
2023-01-18 06:04:32 +01:00
|
|
|
peer_node_id_map_del(peer->ld->peers, peer);
|
2023-01-18 06:04:32 +01:00
|
|
|
if (peer->dbid)
|
|
|
|
peer_dbid_map_del(peer->ld->peers_by_dbid, peer);
|
2018-02-20 21:59:09 +01:00
|
|
|
}
|
|
|
|
|
2023-01-18 06:04:32 +01:00
|
|
|
void peer_set_dbid(struct peer *peer, u64 dbid)
|
|
|
|
{
|
|
|
|
assert(!peer->dbid);
|
|
|
|
assert(dbid);
|
|
|
|
peer->dbid = dbid;
|
|
|
|
peer_dbid_map_add(peer->ld->peers_by_dbid, peer);
|
|
|
|
}
|
|
|
|
|
2018-02-12 11:12:55 +01:00
|
|
|
struct peer *new_peer(struct lightningd *ld, u64 dbid,
|
2019-04-08 11:58:32 +02:00
|
|
|
const struct node_id *id,
|
2021-03-25 05:13:12 +01:00
|
|
|
const struct wireaddr_internal *addr,
|
2023-06-05 18:17:36 +02:00
|
|
|
const u8 *their_features,
|
2021-03-25 05:13:12 +01:00
|
|
|
bool connected_incoming)
|
2018-02-12 11:12:55 +01:00
|
|
|
{
|
2018-02-12 11:13:04 +01:00
|
|
|
/* We are owned by our channels, and freed manually by destroy_channel */
|
|
|
|
struct peer *peer = tal(NULL, struct peer);
|
2018-02-12 11:12:55 +01:00
|
|
|
|
|
|
|
peer->ld = ld;
|
|
|
|
peer->dbid = dbid;
|
|
|
|
peer->id = *id;
|
2018-02-19 02:06:02 +01:00
|
|
|
peer->uncommitted_channel = NULL;
|
2018-10-26 08:01:26 +02:00
|
|
|
peer->addr = *addr;
|
2021-03-25 05:13:12 +01:00
|
|
|
peer->connected_incoming = connected_incoming;
|
2022-06-01 13:18:55 +02:00
|
|
|
peer->remote_addr = NULL;
|
2018-02-12 11:12:55 +01:00
|
|
|
list_head_init(&peer->channels);
|
2024-07-18 03:25:55 +02:00
|
|
|
peer->direction = node_id_idx(&peer->ld->our_nodeid, &peer->id);
|
2022-07-16 06:49:31 +02:00
|
|
|
peer->connected = PEER_DISCONNECTED;
|
2022-07-28 03:30:36 +02:00
|
|
|
peer->last_connect_attempt.ts.tv_sec
|
|
|
|
= peer->last_connect_attempt.ts.tv_nsec = 0;
|
2023-06-05 18:17:36 +02:00
|
|
|
if (their_features)
|
|
|
|
peer->their_features = tal_dup_talarr(peer, u8, their_features);
|
|
|
|
else
|
|
|
|
peer->their_features = NULL;
|
|
|
|
|
2023-09-21 07:36:28 +02:00
|
|
|
peer->dev_ignore_htlcs = false;
|
2018-04-03 09:19:42 +02:00
|
|
|
|
2023-01-18 06:04:32 +01:00
|
|
|
peer_node_id_map_add(ld->peers, peer);
|
2023-01-18 06:04:32 +01:00
|
|
|
if (dbid)
|
|
|
|
peer_dbid_map_add(ld->peers_by_dbid, peer);
|
2018-02-12 11:12:55 +01:00
|
|
|
tal_add_destructor(peer, destroy_peer);
|
|
|
|
return peer;
|
|
|
|
}
|
|
|
|
|
2018-08-02 08:49:55 +02:00
|
|
|
static void delete_peer(struct peer *peer)
|
2018-02-14 02:53:04 +01:00
|
|
|
{
|
|
|
|
assert(list_empty(&peer->channels));
|
2018-02-19 02:06:02 +01:00
|
|
|
assert(!peer->uncommitted_channel);
|
|
|
|
/* If it only ever existed because of uncommitted channel, it won't
|
|
|
|
* be in the database */
|
|
|
|
if (peer->dbid != 0)
|
2023-03-20 01:15:35 +01:00
|
|
|
wallet_delete_peer_if_unused(peer->ld->wallet, peer->dbid);
|
2018-02-14 02:53:04 +01:00
|
|
|
tal_free(peer);
|
|
|
|
}
|
|
|
|
|
2018-08-02 08:49:55 +02:00
|
|
|
/* Last one out deletes peer. */
|
|
|
|
void maybe_delete_peer(struct peer *peer)
|
|
|
|
{
|
|
|
|
if (!list_empty(&peer->channels))
|
|
|
|
return;
|
2019-02-21 02:22:56 +01:00
|
|
|
if (peer->uncommitted_channel) {
|
|
|
|
/* This isn't sufficient to keep it in db! */
|
|
|
|
if (peer->dbid != 0) {
|
2023-03-20 01:15:35 +01:00
|
|
|
wallet_delete_peer_if_unused(peer->ld->wallet, peer->dbid);
|
2023-01-18 06:04:32 +01:00
|
|
|
peer_dbid_map_del(peer->ld->peers_by_dbid, peer);
|
2019-02-21 02:22:56 +01:00
|
|
|
peer->dbid = 0;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2022-07-18 14:12:28 +02:00
|
|
|
/* Maybe it's reconnected / reconnecting? */
|
|
|
|
if (peer->connected != PEER_DISCONNECTED)
|
|
|
|
return;
|
2018-08-02 08:49:55 +02:00
|
|
|
delete_peer(peer);
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
static void peer_channels_cleanup(struct lightningd *ld,
|
|
|
|
const struct node_id *id)
|
2022-07-16 06:49:29 +02:00
|
|
|
{
|
2022-07-18 14:12:27 +02:00
|
|
|
struct peer *peer;
|
2022-07-16 06:49:29 +02:00
|
|
|
struct channel *c, **channels;
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
peer = peer_by_id(ld, id);
|
|
|
|
if (!peer)
|
|
|
|
return;
|
|
|
|
|
2022-07-16 06:49:29 +02:00
|
|
|
/* Freeing channels can free peer, so gather first. */
|
|
|
|
channels = tal_arr(tmpctx, struct channel *, 0);
|
|
|
|
list_for_each(&peer->channels, c, list)
|
|
|
|
tal_arr_expand(&channels, c);
|
|
|
|
|
|
|
|
if (peer->uncommitted_channel) {
|
|
|
|
/* Frees peer if no channels */
|
|
|
|
kill_uncommitted_channel(peer->uncommitted_channel,
|
|
|
|
"Disconnected");
|
|
|
|
} else if (tal_count(channels) == 0)
|
|
|
|
/* Was completely idle. */
|
|
|
|
tal_free(peer);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < tal_count(channels); i++) {
|
|
|
|
c = channels[i];
|
2023-10-02 00:59:51 +02:00
|
|
|
if (channel_state_wants_peercomms(c->state)) {
|
2022-07-16 06:49:29 +02:00
|
|
|
channel_cleanup_commands(c, "Disconnected");
|
2023-10-02 00:59:48 +02:00
|
|
|
channel_fail_transient(c, true, "Disconnected");
|
2023-10-02 00:59:51 +02:00
|
|
|
} else if (channel_state_uncommitted(c->state)) {
|
2022-07-16 06:49:29 +02:00
|
|
|
channel_unsaved_close_conn(c, "Disconnected");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-12 11:12:55 +01:00
|
|
|
struct peer *find_peer_by_dbid(struct lightningd *ld, u64 dbid)
|
|
|
|
{
|
2023-01-18 06:04:32 +01:00
|
|
|
return peer_dbid_map_get(ld->peers_by_dbid, dbid);
|
2018-02-12 11:12:55 +01:00
|
|
|
}
|
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
struct peer *peer_by_id(struct lightningd *ld, const struct node_id *id)
|
2018-02-20 21:59:09 +01:00
|
|
|
{
|
2023-01-18 06:04:32 +01:00
|
|
|
return peer_node_id_map_get(ld->peers, id);
|
2018-02-20 21:59:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
struct peer *peer_from_json(struct lightningd *ld,
|
|
|
|
const char *buffer,
|
2018-07-20 03:14:02 +02:00
|
|
|
const jsmntok_t *peeridtok)
|
2018-02-20 21:59:09 +01:00
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id peerid;
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2019-04-08 11:58:32 +02:00
|
|
|
if (!json_to_node_id(buffer, peeridtok, &peerid))
|
2018-02-20 21:59:09 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return peer_by_id(ld, &peerid);
|
|
|
|
}
|
|
|
|
|
|
|
|
u8 *p2wpkh_for_keyidx(const tal_t *ctx, struct lightningd *ld, u64 keyidx)
|
|
|
|
{
|
|
|
|
struct pubkey shutdownkey;
|
|
|
|
|
2023-03-21 04:58:15 +01:00
|
|
|
bip32_pubkey(ld, &shutdownkey, keyidx);
|
2018-02-20 21:59:09 +01:00
|
|
|
return scriptpubkey_p2wpkh(ctx, &shutdownkey);
|
|
|
|
}
|
|
|
|
|
2023-07-10 21:59:41 +02:00
|
|
|
u8 *p2tr_for_keyidx(const tal_t *ctx, struct lightningd *ld, u64 keyidx)
|
|
|
|
{
|
|
|
|
struct pubkey shutdownkey;
|
|
|
|
|
|
|
|
bip32_pubkey(ld, &shutdownkey, keyidx);
|
|
|
|
|
|
|
|
return scriptpubkey_p2tr(ctx, &shutdownkey);
|
|
|
|
}
|
|
|
|
|
2023-07-31 13:32:22 +02:00
|
|
|
static struct bitcoin_tx *sign_last_tx(const tal_t *ctx,
|
|
|
|
const struct channel *channel,
|
|
|
|
const struct bitcoin_tx *last_tx,
|
|
|
|
const struct bitcoin_signature *last_sig)
|
2017-06-20 08:17:03 +02:00
|
|
|
{
|
2018-07-23 04:23:02 +02:00
|
|
|
struct lightningd *ld = channel->peer->ld;
|
2018-12-03 00:15:06 +01:00
|
|
|
struct bitcoin_signature sig;
|
2023-04-10 02:12:56 +02:00
|
|
|
const u8 *msg;
|
|
|
|
u8 **witness;
|
2021-12-15 02:37:35 +01:00
|
|
|
u64 commit_index = channel->next_index[LOCAL] - 1;
|
2023-07-31 13:32:22 +02:00
|
|
|
struct bitcoin_tx *tx = clone_bitcoin_tx(ctx, last_tx);
|
2021-12-15 02:37:35 +01:00
|
|
|
|
2023-07-31 13:32:22 +02:00
|
|
|
assert(!tx->wtx->inputs[0].witness);
|
2023-04-10 02:12:56 +02:00
|
|
|
msg = towire_hsmd_sign_commitment_tx(NULL,
|
2021-05-20 02:09:56 +02:00
|
|
|
&channel->peer->id,
|
|
|
|
channel->dbid,
|
2023-07-31 13:32:22 +02:00
|
|
|
tx,
|
2021-05-20 02:09:56 +02:00
|
|
|
&channel->channel_info
|
2021-12-15 02:37:35 +01:00
|
|
|
.remote_fundingkey,
|
|
|
|
commit_index);
|
2017-08-18 06:43:52 +02:00
|
|
|
|
2023-04-10 02:12:56 +02:00
|
|
|
msg = hsm_sync_req(tmpctx, ld, take(msg));
|
2020-08-25 03:55:38 +02:00
|
|
|
if (!fromwire_hsmd_sign_commitment_tx_reply(msg, &sig))
|
2018-07-23 04:23:02 +02:00
|
|
|
fatal("HSM gave bad sign_commitment_tx_reply %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2017-08-18 06:43:52 +02:00
|
|
|
|
2019-03-21 14:24:43 +01:00
|
|
|
witness =
|
2023-07-31 13:32:22 +02:00
|
|
|
bitcoin_witness_2of2(tx, last_sig,
|
2019-03-21 14:24:43 +01:00
|
|
|
&sig, &channel->channel_info.remote_fundingkey,
|
|
|
|
&channel->local_funding_pubkey);
|
|
|
|
|
2023-07-31 13:32:22 +02:00
|
|
|
bitcoin_tx_input_set_witness(tx, 0, take(witness));
|
|
|
|
return tx;
|
2017-12-15 11:29:37 +01:00
|
|
|
}
|
|
|
|
|
2022-07-09 07:53:20 +02:00
|
|
|
bool invalid_last_tx(const struct bitcoin_tx *tx)
|
2019-06-27 01:57:49 +02:00
|
|
|
{
|
|
|
|
/* This problem goes back further, but was discovered just before the
|
|
|
|
* 0.7.1 release. */
|
|
|
|
#ifdef COMPAT_V070
|
|
|
|
/* Old bug had commitment txs with no outputs; bitcoin_txid asserts. */
|
2023-07-27 23:37:52 +02:00
|
|
|
return !tx || !tx->wtx || tx->wtx->num_outputs == 0;
|
2019-06-27 01:57:49 +02:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-06-26 01:08:21 +02:00
|
|
|
static bool commit_tx_send_finished(struct channel *channel,
|
|
|
|
const struct bitcoin_tx *tx,
|
|
|
|
bool success,
|
|
|
|
const char *err,
|
|
|
|
struct anchor_details *adet)
|
|
|
|
{
|
2023-10-26 04:37:10 +02:00
|
|
|
struct bitcoin_txid txid;
|
|
|
|
|
|
|
|
bitcoin_txid(tx, &txid);
|
|
|
|
|
|
|
|
/* If it's already mined, stop retransmitting, stop boosting. */
|
|
|
|
if (wallet_transaction_height(channel->peer->ld->wallet, &txid) != 0) {
|
|
|
|
tal_free(adet);
|
|
|
|
return true;
|
|
|
|
}
|
2023-06-26 01:08:21 +02:00
|
|
|
|
2023-10-26 04:37:10 +02:00
|
|
|
/* Boost (if possible), and keep trying! */
|
|
|
|
commit_tx_boost(channel, adet, success);
|
2023-06-26 01:08:21 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-07-31 13:33:22 +02:00
|
|
|
static struct bitcoin_tx *sign_and_send_last(const tal_t *ctx,
|
|
|
|
struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const char *cmd_id,
|
|
|
|
const struct bitcoin_tx *last_tx,
|
|
|
|
const struct bitcoin_signature *last_sig)
|
2021-05-20 02:09:56 +02:00
|
|
|
{
|
|
|
|
struct bitcoin_txid txid;
|
2023-06-26 01:08:21 +02:00
|
|
|
struct anchor_details *adet;
|
2023-07-31 13:32:22 +02:00
|
|
|
struct bitcoin_tx *tx;
|
2021-05-20 02:09:56 +02:00
|
|
|
|
2023-07-31 13:33:22 +02:00
|
|
|
tx = sign_last_tx(ctx, channel, last_tx, last_sig);
|
2023-07-31 13:32:22 +02:00
|
|
|
bitcoin_txid(tx, &txid);
|
|
|
|
wallet_transaction_add(ld->wallet, tx->wtx, 0, 0);
|
2023-10-19 07:59:35 +02:00
|
|
|
wallet_extract_owned_outputs(ld->wallet, tx->wtx, false, NULL, NULL);
|
2021-05-20 02:09:56 +02:00
|
|
|
|
2023-06-26 01:08:21 +02:00
|
|
|
/* Remember anchor information for commit_tx_boost */
|
2023-07-31 13:32:22 +02:00
|
|
|
adet = create_anchor_details(NULL, channel, tx);
|
2023-06-26 01:08:21 +02:00
|
|
|
|
2021-05-20 02:09:56 +02:00
|
|
|
/* Keep broadcasting until we say stop (can fail due to dup,
|
|
|
|
* if they beat us to the broadcast). */
|
2023-10-24 03:41:30 +02:00
|
|
|
broadcast_tx(channel, ld->topology, channel, tx, cmd_id, false, 0,
|
2023-10-26 04:37:10 +02:00
|
|
|
commit_tx_send_finished, NULL, take(adet));
|
2023-07-31 13:33:22 +02:00
|
|
|
|
|
|
|
return tx;
|
2021-05-20 02:09:56 +02:00
|
|
|
}
|
|
|
|
|
2023-10-02 00:59:52 +02:00
|
|
|
/* FIXME: reorder! */
|
|
|
|
static enum watch_result funding_spent(struct channel *channel,
|
|
|
|
const struct bitcoin_tx *tx,
|
|
|
|
size_t inputnum UNUSED,
|
|
|
|
const struct block *block);
|
|
|
|
|
2023-10-02 00:59:52 +02:00
|
|
|
/* We coop-closed channel: if another inflight confirms, force close */
|
|
|
|
static enum watch_result closed_inflight_depth_cb(struct lightningd *ld,
|
|
|
|
const struct bitcoin_txid *txid,
|
|
|
|
const struct bitcoin_tx *tx,
|
|
|
|
unsigned int depth,
|
|
|
|
struct channel_inflight *inflight)
|
|
|
|
{
|
|
|
|
if (depth == 0)
|
|
|
|
return KEEP_WATCHING;
|
|
|
|
|
|
|
|
/* This is now the main tx. */
|
|
|
|
update_channel_from_inflight(ld, inflight->channel, inflight);
|
|
|
|
channel_fail_permanent(inflight->channel,
|
|
|
|
REASON_UNKNOWN,
|
|
|
|
"Inflight tx %s confirmed after mutual close",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_bitcoin_txid(tmpctx, txid));
|
2023-10-02 00:59:52 +02:00
|
|
|
return DELETE_WATCH;
|
|
|
|
}
|
|
|
|
|
2018-04-10 08:03:15 +02:00
|
|
|
void drop_to_chain(struct lightningd *ld, struct channel *channel,
|
2024-07-02 16:50:26 +02:00
|
|
|
bool cooperative, bool rebroadcast)
|
2017-09-26 06:57:31 +02:00
|
|
|
{
|
2021-05-20 02:09:56 +02:00
|
|
|
struct channel_inflight *inflight;
|
2022-09-12 23:19:11 +02:00
|
|
|
const char *cmd_id;
|
|
|
|
|
2023-10-02 00:59:52 +02:00
|
|
|
/* If we're not already (e.g. close before channel fully open),
|
|
|
|
* make sure we're watching for the funding spend */
|
|
|
|
if (!channel->funding_spend_watch) {
|
|
|
|
log_debug(channel->log, "Adding funding_spend_watch");
|
|
|
|
channel->funding_spend_watch = watch_txo(channel,
|
|
|
|
ld->topology, channel,
|
|
|
|
&channel->funding,
|
|
|
|
funding_spent);
|
|
|
|
}
|
|
|
|
|
2022-09-12 23:19:11 +02:00
|
|
|
/* If this was triggered by a close command, get a copy of the cmd id */
|
2023-07-31 13:33:22 +02:00
|
|
|
cmd_id = cmd_id_from_close_command(tmpctx, ld, channel);
|
2022-09-12 23:19:11 +02:00
|
|
|
|
2018-08-17 07:06:35 +02:00
|
|
|
/* BOLT #2:
|
|
|
|
*
|
2019-08-01 06:56:10 +02:00
|
|
|
* - if `next_revocation_number` is greater than expected
|
2018-08-17 07:06:35 +02:00
|
|
|
* above, AND `your_last_per_commitment_secret` is correct for that
|
2019-08-01 06:56:10 +02:00
|
|
|
* `next_revocation_number` minus 1:
|
2018-08-17 07:06:35 +02:00
|
|
|
* - MUST NOT broadcast its commitment transaction.
|
|
|
|
*/
|
2024-06-19 02:00:01 +02:00
|
|
|
if (channel->has_future_per_commitment_point && !cooperative) {
|
2018-08-17 07:06:35 +02:00
|
|
|
log_broken(channel->log,
|
|
|
|
"Cannot broadcast our commitment tx:"
|
|
|
|
" they have a future one");
|
2023-10-26 18:38:58 +02:00
|
|
|
} else if (channel_state_open_uncommitted(channel->state)) {
|
|
|
|
/* There's no commitment transaction, we can
|
|
|
|
* safely forget this channel */
|
|
|
|
log_info(channel->log,
|
|
|
|
"Initialized channel (v2) received error"
|
|
|
|
", we're deleting the channel");
|
2019-06-27 01:57:49 +02:00
|
|
|
} else if (invalid_last_tx(channel->last_tx)) {
|
|
|
|
log_broken(channel->log,
|
|
|
|
"Cannot broadcast our commitment tx:"
|
|
|
|
" it's invalid! (ancient channel?)");
|
2024-07-02 16:50:26 +02:00
|
|
|
} else if (!rebroadcast && !cooperative) {
|
|
|
|
log_unusual(channel->log,
|
|
|
|
"Not dropping our unilateral close onchain since "
|
|
|
|
"we already saw theirs confirm.");
|
2018-08-17 07:06:35 +02:00
|
|
|
} else {
|
2023-09-21 07:36:26 +02:00
|
|
|
struct bitcoin_tx *tx COMPILER_WANTS_INIT("gcc 12.3.0");
|
2023-07-31 13:33:22 +02:00
|
|
|
|
2021-05-20 02:09:56 +02:00
|
|
|
/* We need to drop *every* commitment transaction to chain */
|
|
|
|
if (!cooperative && !list_empty(&channel->inflights)) {
|
2023-10-27 21:39:34 +02:00
|
|
|
list_for_each(&channel->inflights, inflight, list) {
|
|
|
|
if (!inflight->last_tx)
|
|
|
|
continue;
|
2023-07-31 13:33:22 +02:00
|
|
|
tx = sign_and_send_last(tmpctx, ld, channel, cmd_id,
|
|
|
|
inflight->last_tx,
|
|
|
|
&inflight->last_sig);
|
2023-10-27 21:39:34 +02:00
|
|
|
}
|
2021-05-20 02:09:56 +02:00
|
|
|
} else
|
2023-07-31 13:33:22 +02:00
|
|
|
tx = sign_and_send_last(tmpctx, ld, channel, cmd_id, channel->last_tx,
|
|
|
|
&channel->last_sig);
|
|
|
|
|
|
|
|
resolve_close_command(ld, channel, cooperative, tx);
|
2018-08-17 07:06:35 +02:00
|
|
|
}
|
2023-10-02 00:59:52 +02:00
|
|
|
|
|
|
|
/* In cooperative mode, we're assuming that we closed the right one:
|
|
|
|
* this might not happen if we're splicing, or dual-funding still
|
|
|
|
* opening. So, if we get any unexpected inflight confirming, we
|
|
|
|
* force close. */
|
|
|
|
if (cooperative) {
|
|
|
|
list_for_each(&channel->inflights, inflight, list) {
|
|
|
|
if (bitcoin_outpoint_eq(&inflight->funding->outpoint,
|
|
|
|
&channel->funding)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
watch_txid(inflight, ld->topology,
|
|
|
|
&inflight->funding->outpoint.txid,
|
|
|
|
closed_inflight_depth_cb, inflight);
|
|
|
|
}
|
|
|
|
}
|
2017-05-22 09:28:07 +02:00
|
|
|
}
|
|
|
|
|
2022-07-14 09:41:11 +02:00
|
|
|
void resend_closing_transactions(struct lightningd *ld)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
struct channel *channel;
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
2022-07-14 09:41:11 +02:00
|
|
|
|
2023-01-18 06:04:32 +01:00
|
|
|
for (peer = peer_node_id_map_first(ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(ld->peers, &it)) {
|
2022-07-14 09:41:11 +02:00
|
|
|
list_for_each(&peer->channels, channel, list) {
|
2023-10-02 00:59:49 +02:00
|
|
|
switch (channel->state) {
|
|
|
|
case CHANNELD_AWAITING_LOCKIN:
|
|
|
|
case CHANNELD_NORMAL:
|
|
|
|
case DUALOPEND_OPEN_INIT:
|
2023-10-31 01:38:11 +01:00
|
|
|
case DUALOPEND_OPEN_COMMIT_READY:
|
2023-10-02 00:59:51 +02:00
|
|
|
case DUALOPEND_OPEN_COMMITTED:
|
2023-10-02 00:59:49 +02:00
|
|
|
case DUALOPEND_AWAITING_LOCKIN:
|
|
|
|
case CHANNELD_AWAITING_SPLICE:
|
|
|
|
case CHANNELD_SHUTTING_DOWN:
|
|
|
|
case CLOSINGD_SIGEXCHANGE:
|
|
|
|
case FUNDING_SPEND_SEEN:
|
|
|
|
case ONCHAIN:
|
|
|
|
case CLOSED:
|
|
|
|
continue;
|
|
|
|
case CLOSINGD_COMPLETE:
|
2024-07-02 16:50:26 +02:00
|
|
|
drop_to_chain(ld, channel, true, true);
|
2023-10-02 00:59:49 +02:00
|
|
|
continue;
|
|
|
|
case AWAITING_UNILATERAL:
|
2024-07-02 16:50:26 +02:00
|
|
|
drop_to_chain(ld, channel, false, true);
|
2023-10-02 00:59:49 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
abort();
|
2022-07-14 09:41:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-20 21:59:09 +01:00
|
|
|
void channel_errmsg(struct channel *channel,
|
2022-01-11 02:13:59 +01:00
|
|
|
struct peer_fd *peer_fd,
|
2018-02-20 21:59:09 +01:00
|
|
|
const char *desc,
|
2023-10-22 06:07:32 +02:00
|
|
|
const u8 *err_for_them,
|
|
|
|
bool disconnect,
|
|
|
|
bool warning)
|
2018-02-19 02:06:15 +01:00
|
|
|
{
|
2021-02-09 22:24:23 +01:00
|
|
|
/* Clean up any in-progress open attempts */
|
2021-02-16 20:23:53 +01:00
|
|
|
channel_cleanup_commands(channel, desc);
|
2021-02-09 22:24:23 +01:00
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
if (channel_state_uncommitted(channel->state)) {
|
2021-05-06 23:41:29 +02:00
|
|
|
log_info(channel->log, "%s", "Unsaved peer failed."
|
2022-12-01 22:36:06 +01:00
|
|
|
" Deleting channel.");
|
2021-05-06 23:41:29 +02:00
|
|
|
delete_channel(channel);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-01-11 02:13:59 +01:00
|
|
|
/* No peer_fd means a subd crash or disconnection. */
|
|
|
|
if (!peer_fd) {
|
2021-02-09 22:24:23 +01:00
|
|
|
/* If the channel is unsaved, we forget it */
|
2023-10-22 06:07:32 +02:00
|
|
|
channel_fail_transient(channel, disconnect, "%s: %s",
|
2018-02-19 02:06:15 +01:00
|
|
|
channel->owner->name, desc);
|
|
|
|
return;
|
2018-02-18 13:59:46 +01:00
|
|
|
}
|
|
|
|
|
2018-02-19 02:06:15 +01:00
|
|
|
/* Do we have an error to send? */
|
2021-02-02 13:47:01 +01:00
|
|
|
if (err_for_them && !channel->error && !warning)
|
2020-02-27 03:17:01 +01:00
|
|
|
channel->error = tal_dup_talarr(channel, u8, err_for_them);
|
2018-02-19 02:06:15 +01:00
|
|
|
|
2023-09-12 06:23:14 +02:00
|
|
|
/* LND sends "internal error" and we close the channel. But
|
|
|
|
* prior to 0.11 we would turn this into a warning, and they
|
|
|
|
* would recover after a reconnect. So we downgrade, but snark
|
|
|
|
* about it in the logs. */
|
|
|
|
if (!err_for_them && strends(desc, "internal error")) {
|
2023-10-22 06:07:32 +02:00
|
|
|
channel_fail_transient(channel, disconnect, "%s: %s",
|
2023-09-12 06:23:14 +02:00
|
|
|
channel->owner->name,
|
|
|
|
"lnd sent 'internal error':"
|
|
|
|
" let's give it some space");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This is us, sending a warning. */
|
2021-02-02 13:47:01 +01:00
|
|
|
if (warning) {
|
2023-10-22 06:07:32 +02:00
|
|
|
channel_fail_transient(channel, disconnect, "%s sent %s",
|
2023-07-30 03:41:26 +02:00
|
|
|
channel->owner->name,
|
|
|
|
desc);
|
2019-07-26 04:11:19 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-19 02:06:15 +01:00
|
|
|
/* BOLT #1:
|
|
|
|
*
|
|
|
|
* A sending node:
|
|
|
|
*...
|
2022-03-31 11:10:50 +02:00
|
|
|
* - when sending `error`:
|
|
|
|
* - MUST fail the channel(s) referred to by the error message.
|
|
|
|
* - MAY set `channel_id` to all zero to indicate all channels.
|
2018-02-19 02:06:15 +01:00
|
|
|
*/
|
2018-08-09 02:25:29 +02:00
|
|
|
/* FIXME: Close if it's an all-channels error sent or rcvd */
|
2018-02-19 02:06:15 +01:00
|
|
|
|
|
|
|
/* BOLT #1:
|
|
|
|
*
|
|
|
|
* A sending node:
|
2022-03-31 11:10:50 +02:00
|
|
|
*...
|
2018-02-19 02:06:15 +01:00
|
|
|
* - when sending `error`:
|
2022-03-31 11:10:50 +02:00
|
|
|
* - MUST fail the channel(s) referred to by the error message.
|
|
|
|
* - MAY set `channel_id` to all zero to indicate all channels.
|
2018-02-19 02:06:15 +01:00
|
|
|
*...
|
|
|
|
* The receiving node:
|
|
|
|
* - upon receiving `error`:
|
2022-03-31 11:10:50 +02:00
|
|
|
* - if `channel_id` is all zero:
|
|
|
|
* - MUST fail all channels with the sending node.
|
|
|
|
* - otherwise:
|
|
|
|
* - MUST fail the channel referred to by `channel_id`, if that channel is with the
|
|
|
|
* sending node.
|
2018-02-19 02:06:15 +01:00
|
|
|
*/
|
2019-08-26 20:31:25 +02:00
|
|
|
|
2022-03-31 11:10:50 +02:00
|
|
|
/* FIXME: We don't close all channels */
|
2019-08-26 20:31:25 +02:00
|
|
|
/* We should immediately forget the channel if we receive error during
|
|
|
|
* CHANNELD_AWAITING_LOCKIN if we are fundee. */
|
2019-09-09 18:11:24 +02:00
|
|
|
if (!err_for_them && channel->opener == REMOTE
|
2019-08-26 20:31:25 +02:00
|
|
|
&& channel->state == CHANNELD_AWAITING_LOCKIN)
|
2023-09-12 06:23:14 +02:00
|
|
|
channel_fail_forget(channel, "%s: %s %s",
|
2019-08-26 20:31:25 +02:00
|
|
|
channel->owner->name,
|
|
|
|
err_for_them ? "sent" : "received", desc);
|
|
|
|
else
|
feat: adds state change cause and message
This adds a `state_change` 'cause' to a channel.
A 'cause' is some initial 'reason' a channel was created or closed by:
/* Anything other than the reasons below. Should not happen. */
REASON_UNKNOWN,
/* Unconscious internal reasons, e.g. dev fail of a channel. */
REASON_LOCAL,
/* The operator or a plugin opened or closed a channel by intention. */
REASON_USER,
/* The remote closed or funded a channel with us by intention. */
REASON_REMOTE,
/* E.g. We need to close a channel because of bad signatures and such. */
REASON_PROTOCOL,
/* A channel was closed onchain, while we were offline. */
/* Note: This is very likely a conscious remote decision. */
REASON_ONCHAIN
If a 'cause' is known and a subsequent state change is made with
`REASON_UNKNOWN` the preceding cause will be used as reason, since a lot
(all `REASON_UNKNOWN`) state changes are a subsequent consequences of a prior
cause: local, user, remote, protocol or onchain.
Changelog-Added: Plugins: Channel closure resaon/cause to channel_state_changed notification
2020-10-28 11:46:12 +01:00
|
|
|
channel_fail_permanent(channel,
|
|
|
|
err_for_them ? REASON_LOCAL : REASON_PROTOCOL,
|
2023-09-12 06:23:14 +02:00
|
|
|
"%s: %s %s",
|
2019-08-26 20:31:25 +02:00
|
|
|
channel->owner->name,
|
|
|
|
err_for_them ? "sent" : "received", desc);
|
2018-02-18 13:59:46 +01:00
|
|
|
}
|
|
|
|
|
2019-02-07 15:41:53 +01:00
|
|
|
static void json_add_htlcs(struct lightningd *ld,
|
|
|
|
struct json_stream *response,
|
|
|
|
const struct channel *channel)
|
|
|
|
{
|
|
|
|
/* FIXME: make per-channel htlc maps! */
|
|
|
|
const struct htlc_in *hin;
|
|
|
|
struct htlc_in_map_iter ini;
|
|
|
|
const struct htlc_out *hout;
|
|
|
|
struct htlc_out_map_iter outi;
|
2020-09-17 03:58:59 +02:00
|
|
|
u32 local_feerate = get_feerate(channel->fee_states,
|
2019-09-09 18:11:24 +02:00
|
|
|
channel->opener, LOCAL);
|
2019-02-07 15:41:53 +01:00
|
|
|
|
|
|
|
/* FIXME: Add more fields. */
|
|
|
|
json_array_start(response, "htlcs");
|
2023-01-03 05:46:52 +01:00
|
|
|
for (hin = htlc_in_map_first(ld->htlcs_in, &ini);
|
2019-02-07 15:41:53 +01:00
|
|
|
hin;
|
2023-01-03 05:46:52 +01:00
|
|
|
hin = htlc_in_map_next(ld->htlcs_in, &ini)) {
|
2019-02-07 15:41:53 +01:00
|
|
|
if (hin->key.channel != channel)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
json_object_start(response, NULL);
|
|
|
|
json_add_string(response, "direction", "in");
|
|
|
|
json_add_u64(response, "id", hin->key.id);
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_msat(response, "amount_msat", hin->msat);
|
2021-06-16 03:00:17 +02:00
|
|
|
json_add_u32(response, "expiry", hin->cltv_expiry);
|
2019-08-10 10:39:04 +02:00
|
|
|
json_add_sha256(response, "payment_hash", &hin->payment_hash);
|
2019-02-07 15:41:53 +01:00
|
|
|
json_add_string(response, "state",
|
|
|
|
htlc_state_name(hin->hstate));
|
2019-05-31 09:30:32 +02:00
|
|
|
if (htlc_is_trimmed(REMOTE, hin->msat, local_feerate,
|
2020-08-13 19:44:02 +02:00
|
|
|
channel->our_config.dust_limit, LOCAL,
|
2024-06-19 02:01:01 +02:00
|
|
|
channel_has(channel, OPT_ANCHOR_OUTPUTS_DEPRECATED),
|
2023-06-26 01:18:21 +02:00
|
|
|
channel_has(channel, OPT_ANCHORS_ZERO_FEE_HTLC_TX)))
|
2019-05-31 09:30:32 +02:00
|
|
|
json_add_bool(response, "local_trimmed", true);
|
2021-06-02 18:04:01 +02:00
|
|
|
if (hin->status != NULL)
|
|
|
|
json_add_string(response, "status", hin->status);
|
2019-02-07 15:41:53 +01:00
|
|
|
json_object_end(response);
|
|
|
|
}
|
|
|
|
|
2023-01-03 05:46:52 +01:00
|
|
|
for (hout = htlc_out_map_first(ld->htlcs_out, &outi);
|
2019-02-07 15:41:53 +01:00
|
|
|
hout;
|
2023-01-03 05:46:52 +01:00
|
|
|
hout = htlc_out_map_next(ld->htlcs_out, &outi)) {
|
2019-02-07 15:41:53 +01:00
|
|
|
if (hout->key.channel != channel)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
json_object_start(response, NULL);
|
|
|
|
json_add_string(response, "direction", "out");
|
|
|
|
json_add_u64(response, "id", hout->key.id);
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_msat(response, "amount_msat", hout->msat);
|
2019-02-07 15:41:53 +01:00
|
|
|
json_add_u64(response, "expiry", hout->cltv_expiry);
|
2019-08-10 10:39:04 +02:00
|
|
|
json_add_sha256(response, "payment_hash", &hout->payment_hash);
|
2019-02-07 15:41:53 +01:00
|
|
|
json_add_string(response, "state",
|
|
|
|
htlc_state_name(hout->hstate));
|
2019-05-31 09:30:32 +02:00
|
|
|
if (htlc_is_trimmed(LOCAL, hout->msat, local_feerate,
|
2020-08-13 19:44:02 +02:00
|
|
|
channel->our_config.dust_limit, LOCAL,
|
2024-06-19 02:01:01 +02:00
|
|
|
channel_has(channel, OPT_ANCHOR_OUTPUTS_DEPRECATED),
|
2023-06-26 01:18:21 +02:00
|
|
|
channel_has(channel, OPT_ANCHORS_ZERO_FEE_HTLC_TX)))
|
2019-05-31 09:30:32 +02:00
|
|
|
json_add_bool(response, "local_trimmed", true);
|
2019-02-07 15:41:53 +01:00
|
|
|
json_object_end(response);
|
|
|
|
}
|
|
|
|
json_array_end(response);
|
|
|
|
}
|
|
|
|
|
2020-03-21 12:46:23 +01:00
|
|
|
/* Fee a commitment transaction would currently cost */
|
|
|
|
static struct amount_sat commit_txfee(const struct channel *channel,
|
|
|
|
struct amount_msat amount,
|
|
|
|
enum side side)
|
2019-05-31 09:30:32 +02:00
|
|
|
{
|
|
|
|
/* FIXME: make per-channel htlc maps! */
|
|
|
|
const struct htlc_in *hin;
|
|
|
|
struct htlc_in_map_iter ini;
|
|
|
|
const struct htlc_out *hout;
|
|
|
|
struct htlc_out_map_iter outi;
|
|
|
|
struct lightningd *ld = channel->peer->ld;
|
|
|
|
size_t num_untrimmed_htlcs = 0;
|
2020-09-17 03:58:59 +02:00
|
|
|
u32 feerate = get_feerate(channel->fee_states,
|
2019-09-09 18:11:24 +02:00
|
|
|
channel->opener, side);
|
2020-03-21 12:46:23 +01:00
|
|
|
struct amount_sat dust_limit;
|
2020-08-13 19:47:02 +02:00
|
|
|
struct amount_sat fee;
|
2024-06-19 02:01:01 +02:00
|
|
|
bool option_anchor_outputs = channel_has(channel, OPT_ANCHOR_OUTPUTS_DEPRECATED);
|
2023-06-26 01:18:21 +02:00
|
|
|
bool option_anchors_zero_fee_htlc_tx = channel_has(channel, OPT_ANCHORS_ZERO_FEE_HTLC_TX);
|
2020-08-13 19:47:02 +02:00
|
|
|
|
2020-03-21 12:46:23 +01:00
|
|
|
if (side == LOCAL)
|
|
|
|
dust_limit = channel->our_config.dust_limit;
|
|
|
|
if (side == REMOTE)
|
|
|
|
dust_limit = channel->channel_info.their_config.dust_limit;
|
|
|
|
|
|
|
|
/* Assume we tried to add "amount" */
|
2020-08-13 19:44:02 +02:00
|
|
|
if (!htlc_is_trimmed(side, amount, feerate, dust_limit, side,
|
2023-06-26 01:18:21 +02:00
|
|
|
option_anchor_outputs, option_anchors_zero_fee_htlc_tx))
|
2020-03-08 14:39:32 +01:00
|
|
|
num_untrimmed_htlcs++;
|
|
|
|
|
2023-01-03 05:46:52 +01:00
|
|
|
for (hin = htlc_in_map_first(ld->htlcs_in, &ini);
|
2020-03-08 14:39:32 +01:00
|
|
|
hin;
|
2023-01-03 05:46:52 +01:00
|
|
|
hin = htlc_in_map_next(ld->htlcs_in, &ini)) {
|
2020-03-08 14:39:32 +01:00
|
|
|
if (hin->key.channel != channel)
|
|
|
|
continue;
|
2020-03-21 12:46:23 +01:00
|
|
|
if (!htlc_is_trimmed(!side, hin->msat, feerate, dust_limit,
|
2023-06-26 01:18:21 +02:00
|
|
|
side, option_anchor_outputs, option_anchors_zero_fee_htlc_tx))
|
2020-03-08 14:39:32 +01:00
|
|
|
num_untrimmed_htlcs++;
|
|
|
|
}
|
2023-01-03 05:46:52 +01:00
|
|
|
for (hout = htlc_out_map_first(ld->htlcs_out, &outi);
|
2020-03-08 14:39:32 +01:00
|
|
|
hout;
|
2023-01-03 05:46:52 +01:00
|
|
|
hout = htlc_out_map_next(ld->htlcs_out, &outi)) {
|
2020-03-08 14:39:32 +01:00
|
|
|
if (hout->key.channel != channel)
|
|
|
|
continue;
|
2020-03-21 12:46:23 +01:00
|
|
|
if (!htlc_is_trimmed(side, hout->msat, feerate, dust_limit,
|
2023-06-26 01:18:21 +02:00
|
|
|
side, option_anchor_outputs, option_anchors_zero_fee_htlc_tx))
|
2020-03-08 14:39:32 +01:00
|
|
|
num_untrimmed_htlcs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-04-06 03:21:18 +02:00
|
|
|
* BOLT #2:
|
2020-03-08 14:39:32 +01:00
|
|
|
* A sending node:
|
2020-08-20 08:49:27 +02:00
|
|
|
*...
|
|
|
|
* - SHOULD NOT offer `amount_msat` if, after adding that HTLC to its
|
|
|
|
* commitment transaction, its remaining balance doesn't allow it to
|
|
|
|
* pay the commitment transaction fee when receiving or sending a
|
|
|
|
* future additional non-dust HTLC while maintaining its channel
|
|
|
|
* reserve. It is recommended that this "fee spike buffer" can
|
|
|
|
* handle twice the current `feerate_per_kw` to ensure
|
|
|
|
* predictability between implementations.
|
2024-05-09 07:31:41 +02:00
|
|
|
*/
|
|
|
|
fee = commit_tx_base_fee(marginal_feerate(feerate), num_untrimmed_htlcs + 1,
|
2023-06-26 01:18:21 +02:00
|
|
|
option_anchor_outputs, option_anchors_zero_fee_htlc_tx);
|
2020-08-13 19:47:02 +02:00
|
|
|
|
2023-06-26 01:18:21 +02:00
|
|
|
if (option_anchor_outputs || option_anchors_zero_fee_htlc_tx) {
|
2021-04-06 03:21:18 +02:00
|
|
|
/* BOLT #3:
|
2021-09-08 02:08:14 +02:00
|
|
|
* If `option_anchors` applies to the commitment
|
2020-08-13 19:47:02 +02:00
|
|
|
* transaction, also subtract two times the fixed anchor size
|
|
|
|
* of 330 sats from the funder (either `to_local` or
|
|
|
|
* `to_remote`).
|
|
|
|
*/
|
|
|
|
if (!amount_sat_add(&fee, fee, AMOUNT_SAT(660)))
|
|
|
|
; /* fee is somehow astronomical already.... */
|
|
|
|
}
|
|
|
|
|
|
|
|
return fee;
|
2020-03-08 14:39:32 +01:00
|
|
|
}
|
|
|
|
|
2019-05-31 09:30:33 +02:00
|
|
|
static void subtract_offered_htlcs(const struct channel *channel,
|
|
|
|
struct amount_msat *amount)
|
|
|
|
{
|
|
|
|
const struct htlc_out *hout;
|
|
|
|
struct htlc_out_map_iter outi;
|
|
|
|
struct lightningd *ld = channel->peer->ld;
|
|
|
|
|
2023-01-03 05:46:52 +01:00
|
|
|
for (hout = htlc_out_map_first(ld->htlcs_out, &outi);
|
2019-05-31 09:30:33 +02:00
|
|
|
hout;
|
2023-01-03 05:46:52 +01:00
|
|
|
hout = htlc_out_map_next(ld->htlcs_out, &outi)) {
|
2019-05-31 09:30:33 +02:00
|
|
|
if (hout->key.channel != channel)
|
|
|
|
continue;
|
|
|
|
if (!amount_msat_sub(amount, *amount, hout->msat))
|
|
|
|
*amount = AMOUNT_MSAT(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-05 14:20:44 +01:00
|
|
|
static void subtract_received_htlcs(const struct channel *channel,
|
|
|
|
struct amount_msat *amount)
|
|
|
|
{
|
|
|
|
const struct htlc_in *hin;
|
|
|
|
struct htlc_in_map_iter ini;
|
|
|
|
struct lightningd *ld = channel->peer->ld;
|
|
|
|
|
2023-01-03 05:46:52 +01:00
|
|
|
for (hin = htlc_in_map_first(ld->htlcs_in, &ini);
|
2020-03-05 14:20:44 +01:00
|
|
|
hin;
|
2023-01-03 05:46:52 +01:00
|
|
|
hin = htlc_in_map_next(ld->htlcs_in, &ini)) {
|
2020-03-05 14:20:44 +01:00
|
|
|
if (hin->key.channel != channel)
|
|
|
|
continue;
|
|
|
|
if (!amount_msat_sub(amount, *amount, hin->msat))
|
|
|
|
*amount = AMOUNT_MSAT(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-22 23:59:20 +01:00
|
|
|
struct amount_msat channel_amount_spendable(const struct channel *channel)
|
2020-09-08 05:48:35 +02:00
|
|
|
{
|
|
|
|
struct amount_msat spendable;
|
2022-11-29 04:07:44 +01:00
|
|
|
bool wumbo;
|
2020-09-08 05:48:35 +02:00
|
|
|
|
|
|
|
/* Compute how much we can send via this channel in one payment. */
|
|
|
|
if (!amount_msat_sub_sat(&spendable,
|
|
|
|
channel->our_msat,
|
|
|
|
channel->channel_info.their_config.channel_reserve))
|
|
|
|
return AMOUNT_MSAT(0);
|
|
|
|
|
|
|
|
/* Take away any currently-offered HTLCs. */
|
|
|
|
subtract_offered_htlcs(channel, &spendable);
|
|
|
|
|
|
|
|
/* If we're opener, subtract txfees we'll need to spend this */
|
|
|
|
if (channel->opener == LOCAL) {
|
|
|
|
if (!amount_msat_sub_sat(&spendable, spendable,
|
|
|
|
commit_txfee(channel, spendable,
|
|
|
|
LOCAL)))
|
|
|
|
return AMOUNT_MSAT(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We can't offer an HTLC less than the other side will accept. */
|
|
|
|
if (amount_msat_less(spendable,
|
|
|
|
channel->channel_info.their_config.htlc_minimum))
|
|
|
|
return AMOUNT_MSAT(0);
|
|
|
|
|
2022-11-29 04:07:44 +01:00
|
|
|
wumbo = feature_negotiated(channel->peer->ld->our_features,
|
|
|
|
channel->peer->their_features,
|
|
|
|
OPT_LARGE_CHANNELS);
|
|
|
|
|
2020-09-08 05:48:35 +02:00
|
|
|
/* We can't offer an HTLC over the max payment threshold either. */
|
2022-11-29 04:07:44 +01:00
|
|
|
if (amount_msat_greater(spendable, chainparams->max_payment)
|
|
|
|
&& !wumbo) {
|
2020-09-08 05:48:35 +02:00
|
|
|
spendable = chainparams->max_payment;
|
2022-11-29 04:07:44 +01:00
|
|
|
}
|
2020-09-08 05:48:35 +02:00
|
|
|
|
|
|
|
return spendable;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct amount_msat channel_amount_receivable(const struct channel *channel)
|
|
|
|
{
|
|
|
|
struct amount_msat their_msat, receivable;
|
2022-11-29 04:07:44 +01:00
|
|
|
bool wumbo;
|
2020-09-08 05:48:35 +02:00
|
|
|
|
|
|
|
/* Compute how much we can receive via this channel in one payment */
|
2021-10-13 05:45:36 +02:00
|
|
|
if (!amount_sat_sub_msat(&their_msat,
|
|
|
|
channel->funding_sats, channel->our_msat))
|
2020-09-08 05:48:35 +02:00
|
|
|
their_msat = AMOUNT_MSAT(0);
|
|
|
|
|
|
|
|
if (!amount_msat_sub_sat(&receivable,
|
|
|
|
their_msat,
|
|
|
|
channel->our_config.channel_reserve))
|
|
|
|
return AMOUNT_MSAT(0);
|
|
|
|
|
|
|
|
/* Take away any currently-offered HTLCs. */
|
|
|
|
subtract_received_htlcs(channel, &receivable);
|
|
|
|
|
|
|
|
/* If they're opener, subtract txfees they'll need to spend this */
|
|
|
|
if (channel->opener == REMOTE) {
|
|
|
|
if (!amount_msat_sub_sat(&receivable, receivable,
|
|
|
|
commit_txfee(channel,
|
|
|
|
receivable, REMOTE)))
|
|
|
|
return AMOUNT_MSAT(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* They can't offer an HTLC less than what we will accept. */
|
|
|
|
if (amount_msat_less(receivable, channel->our_config.htlc_minimum))
|
|
|
|
return AMOUNT_MSAT(0);
|
|
|
|
|
2022-11-29 04:07:44 +01:00
|
|
|
wumbo = feature_negotiated(channel->peer->ld->our_features,
|
|
|
|
channel->peer->their_features,
|
|
|
|
OPT_LARGE_CHANNELS);
|
|
|
|
|
2020-09-08 05:48:35 +02:00
|
|
|
/* They can't offer an HTLC over the max payment threshold either. */
|
2022-11-29 04:07:44 +01:00
|
|
|
if (amount_msat_greater(receivable, chainparams->max_payment)
|
|
|
|
&& !wumbo) {
|
2020-09-08 05:48:35 +02:00
|
|
|
receivable = chainparams->max_payment;
|
2022-11-29 04:07:44 +01:00
|
|
|
}
|
2020-09-08 05:48:35 +02:00
|
|
|
|
|
|
|
return receivable;
|
|
|
|
}
|
|
|
|
|
2024-06-19 02:36:35 +02:00
|
|
|
static void NON_NULL_ARGS(1, 2, 4, 5) json_add_channel(struct command *cmd,
|
2024-03-25 01:37:52 +01:00
|
|
|
struct json_stream *response,
|
|
|
|
const char *key,
|
|
|
|
const struct channel *channel,
|
|
|
|
const struct peer *peer)
|
2019-02-07 15:41:53 +01:00
|
|
|
{
|
2024-06-19 02:36:35 +02:00
|
|
|
struct lightningd *ld = cmd->ld;
|
2022-07-29 05:44:17 +02:00
|
|
|
struct amount_msat funding_msat;
|
2020-09-16 23:28:36 +02:00
|
|
|
struct amount_sat peer_funded_sats;
|
2024-01-31 04:16:17 +01:00
|
|
|
const struct peer_update *peer_update;
|
2021-03-03 04:16:57 +01:00
|
|
|
u32 feerate;
|
2019-02-07 15:41:53 +01:00
|
|
|
|
|
|
|
json_object_start(response, key);
|
2024-03-25 01:37:52 +01:00
|
|
|
json_add_node_id(response, "peer_id", &peer->id);
|
|
|
|
json_add_bool(response, "peer_connected", peer->connected == PEER_CONNECTED);
|
|
|
|
if (peer->connected == PEER_CONNECTED) {
|
|
|
|
json_add_bool(response, "reestablished", channel->reestablished);
|
|
|
|
}
|
|
|
|
json_add_channel_type(response, "channel_type", channel->type);
|
|
|
|
if (channel->ignore_fee_limits) {
|
|
|
|
json_add_bool(response, "ignore_fee_limits", channel->ignore_fee_limits);
|
|
|
|
}
|
|
|
|
/* This reflects current gossip */
|
|
|
|
json_object_start(response, "updates");
|
|
|
|
json_object_start(response, "local");
|
|
|
|
json_add_amount_msat(response,
|
|
|
|
"htlc_minimum_msat",
|
|
|
|
channel->htlc_minimum_msat);
|
|
|
|
json_add_amount_msat(response,
|
|
|
|
"htlc_maximum_msat",
|
|
|
|
channel->htlc_maximum_msat);
|
|
|
|
json_add_u32(response, "cltv_expiry_delta", ld->config.cltv_expiry_delta);
|
|
|
|
json_add_amount_msat(response, "fee_base_msat",
|
|
|
|
amount_msat(channel->feerate_base));
|
|
|
|
json_add_u32(response, "fee_proportional_millionths",
|
|
|
|
channel->feerate_ppm);
|
|
|
|
json_object_end(response);
|
|
|
|
|
|
|
|
peer_update = channel_gossip_get_remote_update(channel);
|
|
|
|
if (peer_update) {
|
|
|
|
json_object_start(response, "remote");
|
2023-12-06 21:14:05 +01:00
|
|
|
json_add_amount_msat(response,
|
|
|
|
"htlc_minimum_msat",
|
2024-03-25 01:37:52 +01:00
|
|
|
peer_update->htlc_minimum_msat);
|
2023-12-06 21:14:05 +01:00
|
|
|
json_add_amount_msat(response,
|
|
|
|
"htlc_maximum_msat",
|
2024-03-25 01:37:52 +01:00
|
|
|
peer_update->htlc_maximum_msat);
|
|
|
|
json_add_u32(response, "cltv_expiry_delta", peer_update->cltv_delta);
|
2023-12-06 21:14:05 +01:00
|
|
|
json_add_amount_msat(response, "fee_base_msat",
|
2024-03-25 01:37:52 +01:00
|
|
|
amount_msat(peer_update->fee_base));
|
2023-12-06 21:14:05 +01:00
|
|
|
json_add_u32(response, "fee_proportional_millionths",
|
2024-03-25 01:37:52 +01:00
|
|
|
peer_update->fee_ppm);
|
2023-12-06 21:14:05 +01:00
|
|
|
json_object_end(response);
|
2024-03-25 01:37:52 +01:00
|
|
|
}
|
|
|
|
json_object_end(response);
|
2024-01-31 04:16:20 +01:00
|
|
|
|
2024-03-25 01:37:52 +01:00
|
|
|
if (channel->last_stable_connection != 0) {
|
|
|
|
json_add_u64(response, "last_stable_connection",
|
|
|
|
channel->last_stable_connection);
|
2023-01-12 02:15:10 +01:00
|
|
|
}
|
2023-12-06 21:14:05 +01:00
|
|
|
|
2019-02-07 15:41:53 +01:00
|
|
|
json_add_string(response, "state", channel_state_name(channel));
|
2019-06-27 01:57:49 +02:00
|
|
|
if (channel->last_tx && !invalid_last_tx(channel->last_tx)) {
|
2019-02-07 15:41:53 +01:00
|
|
|
struct bitcoin_txid txid;
|
|
|
|
bitcoin_txid(channel->last_tx, &txid);
|
|
|
|
|
|
|
|
json_add_txid(response, "scratch_txid", &txid);
|
2022-06-19 09:16:11 +02:00
|
|
|
json_add_amount_sat_msat(response, "last_tx_fee_msat",
|
2021-03-03 04:16:57 +01:00
|
|
|
bitcoin_tx_compute_fee(channel->last_tx));
|
2019-02-07 15:41:53 +01:00
|
|
|
}
|
2021-03-03 04:16:57 +01:00
|
|
|
|
2024-06-19 02:00:01 +02:00
|
|
|
json_add_bool(response, "lost_state", channel->has_future_per_commitment_point);
|
2021-03-03 04:16:57 +01:00
|
|
|
json_object_start(response, "feerate");
|
|
|
|
feerate = get_feerate(channel->fee_states, channel->opener, LOCAL);
|
|
|
|
json_add_u32(response, feerate_style_name(FEERATE_PER_KSIPA), feerate);
|
|
|
|
json_add_u32(response, feerate_style_name(FEERATE_PER_KBYTE),
|
|
|
|
feerate_to_style(feerate, FEERATE_PER_KBYTE));
|
|
|
|
json_object_end(response);
|
|
|
|
|
2019-02-07 15:41:53 +01:00
|
|
|
if (channel->owner)
|
|
|
|
json_add_string(response, "owner", channel->owner->name);
|
|
|
|
|
2022-05-04 15:51:11 +02:00
|
|
|
if (channel->scid)
|
2019-02-07 15:41:53 +01:00
|
|
|
json_add_short_channel_id(response, "short_channel_id",
|
2024-03-20 02:59:51 +01:00
|
|
|
*channel->scid);
|
2022-05-04 15:51:11 +02:00
|
|
|
|
|
|
|
/* If there is any way we can use the channel we'd better have
|
|
|
|
* a direction attached. Technically we could always add it,
|
|
|
|
* as it's just the lexicographic order between node_ids, but
|
|
|
|
* why bother if we can't use it? */
|
|
|
|
if (channel->scid || channel->alias[LOCAL] || channel->alias[REMOTE])
|
2019-02-07 15:41:53 +01:00
|
|
|
json_add_num(response, "direction",
|
2024-07-18 03:25:55 +02:00
|
|
|
node_id_idx(&ld->our_nodeid, &channel->peer->id));
|
2019-02-07 15:41:53 +01:00
|
|
|
|
|
|
|
json_add_string(response, "channel_id",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_channel_id(tmpctx, &channel->cid));
|
2021-10-13 05:45:36 +02:00
|
|
|
json_add_txid(response, "funding_txid", &channel->funding.txid);
|
2022-01-13 23:35:29 +01:00
|
|
|
json_add_num(response, "funding_outnum", channel->funding.n);
|
2019-10-29 18:20:34 +01:00
|
|
|
|
2021-05-20 02:13:18 +02:00
|
|
|
if (!list_empty(&channel->inflights)) {
|
2021-05-19 23:51:05 +02:00
|
|
|
struct channel_inflight *initial, *inflight;
|
2021-07-09 21:13:20 +02:00
|
|
|
u32 last_feerate, next_feerate;
|
2021-02-24 03:58:28 +01:00
|
|
|
|
|
|
|
initial = list_top(&channel->inflights,
|
|
|
|
struct channel_inflight, list);
|
|
|
|
json_add_string(response, "initial_feerate",
|
2021-07-09 21:13:20 +02:00
|
|
|
tal_fmt(tmpctx, "%d%s",
|
|
|
|
initial->funding->feerate,
|
2021-02-24 03:58:28 +01:00
|
|
|
feerate_style_name(FEERATE_PER_KSIPA)));
|
2021-07-09 21:13:20 +02:00
|
|
|
|
|
|
|
last_feerate = channel_last_funding_feerate(channel);
|
|
|
|
assert(last_feerate > 0);
|
2021-02-24 03:58:28 +01:00
|
|
|
json_add_string(response, "last_feerate",
|
|
|
|
tal_fmt(tmpctx, "%d%s", last_feerate,
|
|
|
|
feerate_style_name(FEERATE_PER_KSIPA)));
|
2021-07-09 21:13:20 +02:00
|
|
|
|
2024-03-18 05:07:05 +01:00
|
|
|
/* BOLT #2:
|
|
|
|
* - MUST set `feerate` greater than or equal to 25/24
|
|
|
|
* times the `feerate` of the previously constructed
|
|
|
|
* transaction, rounded down.
|
2021-07-09 21:13:20 +02:00
|
|
|
*/
|
2024-03-18 05:07:05 +01:00
|
|
|
next_feerate = last_feerate * 25 / 24;
|
2021-07-09 21:13:20 +02:00
|
|
|
assert(next_feerate > last_feerate);
|
2021-02-24 03:58:28 +01:00
|
|
|
json_add_string(response, "next_feerate",
|
|
|
|
tal_fmt(tmpctx, "%d%s", next_feerate,
|
|
|
|
feerate_style_name(FEERATE_PER_KSIPA)));
|
|
|
|
|
2021-05-19 23:51:05 +02:00
|
|
|
/* List the inflights */
|
|
|
|
json_array_start(response, "inflight");
|
|
|
|
list_for_each(&channel->inflights, inflight, list) {
|
2021-05-20 02:11:03 +02:00
|
|
|
struct bitcoin_txid txid;
|
|
|
|
|
2021-05-19 23:51:05 +02:00
|
|
|
json_object_start(response, NULL);
|
|
|
|
json_add_txid(response, "funding_txid",
|
2021-10-13 05:45:36 +02:00
|
|
|
&inflight->funding->outpoint.txid);
|
2021-05-19 23:51:05 +02:00
|
|
|
json_add_num(response, "funding_outnum",
|
2021-10-13 05:45:36 +02:00
|
|
|
inflight->funding->outpoint.n);
|
2021-05-19 23:51:05 +02:00
|
|
|
json_add_string(response, "feerate",
|
|
|
|
tal_fmt(tmpctx, "%d%s",
|
|
|
|
inflight->funding->feerate,
|
|
|
|
feerate_style_name(
|
|
|
|
FEERATE_PER_KSIPA)));
|
2022-06-19 09:16:11 +02:00
|
|
|
json_add_amount_sat_msat(response,
|
2021-05-19 23:51:05 +02:00
|
|
|
"total_funding_msat",
|
|
|
|
inflight->funding->total_funds);
|
2022-06-19 09:16:11 +02:00
|
|
|
json_add_amount_sat_msat(response,
|
2021-05-19 23:51:05 +02:00
|
|
|
"our_funding_msat",
|
|
|
|
inflight->funding->our_funds);
|
2023-07-27 23:37:52 +02:00
|
|
|
json_add_s64(response,
|
|
|
|
"splice_amount",
|
|
|
|
inflight->funding->splice_amnt);
|
2021-05-20 02:11:03 +02:00
|
|
|
/* Add the expected commitment tx id also */
|
2023-10-27 21:40:11 +02:00
|
|
|
if (inflight->last_tx) {
|
|
|
|
bitcoin_txid(inflight->last_tx, &txid);
|
|
|
|
json_add_txid(response, "scratch_txid", &txid);
|
|
|
|
}
|
2021-05-19 23:51:05 +02:00
|
|
|
json_object_end(response);
|
|
|
|
}
|
|
|
|
json_array_end(response);
|
2021-02-24 03:58:28 +01:00
|
|
|
}
|
|
|
|
|
2019-10-29 18:20:34 +01:00
|
|
|
if (channel->shutdown_scriptpubkey[LOCAL]) {
|
|
|
|
char *addr = encode_scriptpubkey_to_addr(tmpctx,
|
2019-10-15 12:58:30 +02:00
|
|
|
chainparams,
|
2019-10-29 18:20:34 +01:00
|
|
|
channel->shutdown_scriptpubkey[LOCAL]);
|
|
|
|
if (addr)
|
|
|
|
json_add_string(response, "close_to_addr", addr);
|
|
|
|
json_add_hex_talarr(response, "close_to",
|
|
|
|
channel->shutdown_scriptpubkey[LOCAL]);
|
|
|
|
}
|
|
|
|
|
2019-02-07 15:41:53 +01:00
|
|
|
json_add_bool(
|
|
|
|
response, "private",
|
|
|
|
!(channel->channel_flags & CHANNEL_FLAGS_ANNOUNCE_CHANNEL));
|
|
|
|
|
2020-10-28 11:46:18 +01:00
|
|
|
/* opener and closer */
|
|
|
|
assert(channel->opener != NUM_SIDES);
|
|
|
|
json_add_string(response, "opener", channel->opener == LOCAL ?
|
|
|
|
"local" : "remote");
|
|
|
|
if (channel->closer != NUM_SIDES)
|
|
|
|
json_add_string(response, "closer", channel->closer == LOCAL ?
|
|
|
|
"local" : "remote");
|
|
|
|
|
2022-04-27 14:08:21 +02:00
|
|
|
if (channel->alias[LOCAL] || channel->alias[REMOTE]) {
|
|
|
|
json_object_start(response, "alias");
|
|
|
|
if (channel->alias[LOCAL])
|
|
|
|
json_add_short_channel_id(response, "local",
|
2024-03-20 02:59:51 +01:00
|
|
|
*channel->alias[LOCAL]);
|
2022-04-27 14:08:21 +02:00
|
|
|
if (channel->alias[REMOTE])
|
|
|
|
json_add_short_channel_id(response, "remote",
|
2024-03-20 02:59:51 +01:00
|
|
|
*channel->alias[REMOTE]);
|
2022-04-27 14:08:21 +02:00
|
|
|
json_object_end(response);
|
|
|
|
}
|
|
|
|
|
2020-08-21 03:57:20 +02:00
|
|
|
json_array_start(response, "features");
|
2021-09-09 07:29:35 +02:00
|
|
|
if (channel_has(channel, OPT_STATIC_REMOTEKEY))
|
2020-08-21 03:57:20 +02:00
|
|
|
json_add_string(response, NULL, "option_static_remotekey");
|
2024-06-19 02:01:01 +02:00
|
|
|
if (channel_has(channel, OPT_ANCHOR_OUTPUTS_DEPRECATED))
|
2020-08-21 03:57:20 +02:00
|
|
|
json_add_string(response, NULL, "option_anchor_outputs");
|
2024-06-19 02:36:35 +02:00
|
|
|
if (channel_has(channel, OPT_ANCHORS_ZERO_FEE_HTLC_TX)) {
|
|
|
|
if (command_deprecated_out_ok(cmd, "features", "v24.08", "v25.08"))
|
|
|
|
json_add_string(response, NULL, "option_anchors_zero_fee_htlc_tx");
|
|
|
|
json_add_string(response, NULL, "option_anchors");
|
|
|
|
}
|
2022-04-25 12:56:10 +02:00
|
|
|
if (channel_has(channel, OPT_ZEROCONF))
|
|
|
|
json_add_string(response, NULL, "option_zeroconf");
|
2023-04-10 02:20:56 +02:00
|
|
|
if (channel_has(channel, OPT_SCID_ALIAS))
|
|
|
|
json_add_string(response, NULL, "option_scid_alias");
|
2020-08-21 03:57:20 +02:00
|
|
|
json_array_end(response);
|
|
|
|
|
2021-10-13 05:45:36 +02:00
|
|
|
if (!amount_sat_sub(&peer_funded_sats, channel->funding_sats,
|
2020-09-16 23:28:36 +02:00
|
|
|
channel->our_funds)) {
|
|
|
|
log_broken(channel->log,
|
|
|
|
"Overflow subtracing funding %s, our funds %s",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_amount_sat(tmpctx, channel->funding_sats),
|
|
|
|
fmt_amount_sat(tmpctx, channel->our_funds));
|
2020-09-16 23:28:36 +02:00
|
|
|
peer_funded_sats = AMOUNT_SAT(0);
|
|
|
|
}
|
2022-07-29 05:44:17 +02:00
|
|
|
|
|
|
|
json_object_start(response, "funding");
|
|
|
|
|
|
|
|
if (channel->lease_commit_sig) {
|
|
|
|
struct amount_sat funds, total;
|
|
|
|
if (!amount_msat_to_sat(&funds, channel->push)) {
|
|
|
|
log_broken(channel->log,
|
|
|
|
"Can't convert channel->push %s to sats"
|
|
|
|
" (lease fees?)",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_amount_msat(tmpctx, channel->push));
|
2022-07-29 05:44:17 +02:00
|
|
|
funds = AMOUNT_SAT(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (channel->opener == LOCAL) {
|
|
|
|
if (!amount_sat_add(&total, funds, channel->our_funds)) {
|
|
|
|
log_broken(channel->log,
|
|
|
|
"Overflow adding our_funds to push");
|
|
|
|
total = channel->our_funds;
|
|
|
|
}
|
2023-05-09 14:49:57 +02:00
|
|
|
json_add_amount_sat_msat(response, "local_funds_msat", total);
|
2022-07-29 05:44:17 +02:00
|
|
|
|
|
|
|
if (!amount_sat_sub(&total, peer_funded_sats, funds)) {
|
|
|
|
log_broken(channel->log,
|
|
|
|
"Underflow sub'ing push from"
|
|
|
|
" peer's funds");
|
|
|
|
total = peer_funded_sats;
|
|
|
|
}
|
2023-05-09 14:49:57 +02:00
|
|
|
json_add_amount_sat_msat(response, "remote_funds_msat", total);
|
2022-07-29 05:44:17 +02:00
|
|
|
|
2023-03-14 06:21:50 +01:00
|
|
|
json_add_amount_msat(response, "fee_paid_msat",
|
|
|
|
channel->push);
|
2022-07-29 05:44:17 +02:00
|
|
|
} else {
|
|
|
|
if (!amount_sat_add(&total, peer_funded_sats, funds)) {
|
|
|
|
log_broken(channel->log,
|
|
|
|
"Overflow adding peer funds to push");
|
|
|
|
total = peer_funded_sats;
|
|
|
|
}
|
2023-05-09 14:49:57 +02:00
|
|
|
json_add_amount_sat_msat(response, "remote_funds_msat", total);
|
2022-07-29 05:44:17 +02:00
|
|
|
|
|
|
|
if (!amount_sat_sub(&total, channel->our_funds, funds)) {
|
|
|
|
log_broken(channel->log,
|
|
|
|
"Underflow sub'ing push from"
|
|
|
|
" our_funds");
|
|
|
|
total = channel->our_funds;
|
|
|
|
}
|
2023-05-09 14:49:57 +02:00
|
|
|
json_add_amount_sat_msat(response, "local_funds_msat", total);
|
2023-03-14 06:21:50 +01:00
|
|
|
json_add_amount_msat(response, "fee_rcvd_msat",
|
|
|
|
channel->push);
|
2022-07-29 05:44:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
2023-05-09 14:49:57 +02:00
|
|
|
json_add_amount_sat_msat(response, "local_funds_msat",
|
2022-07-29 05:44:17 +02:00
|
|
|
channel->our_funds);
|
2023-05-09 14:49:57 +02:00
|
|
|
json_add_amount_sat_msat(response, "remote_funds_msat",
|
2022-07-29 05:44:17 +02:00
|
|
|
peer_funded_sats);
|
2023-03-14 06:21:50 +01:00
|
|
|
json_add_amount_msat(response, "pushed_msat",
|
|
|
|
channel->push);
|
2020-09-16 23:28:36 +02:00
|
|
|
}
|
|
|
|
|
2019-02-21 03:40:27 +01:00
|
|
|
json_object_end(response);
|
|
|
|
|
2021-10-13 05:45:36 +02:00
|
|
|
if (!amount_sat_to_msat(&funding_msat, channel->funding_sats)) {
|
2019-02-21 04:45:55 +01:00
|
|
|
log_broken(channel->log,
|
|
|
|
"Overflow converting funding %s",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_amount_sat(tmpctx, channel->funding_sats));
|
2019-02-21 04:45:55 +01:00
|
|
|
funding_msat = AMOUNT_MSAT(0);
|
|
|
|
}
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_msat(response, "to_us_msat", channel->our_msat);
|
|
|
|
json_add_amount_msat(response,
|
|
|
|
"min_to_us_msat", channel->msat_to_us_min);
|
|
|
|
json_add_amount_msat(response,
|
|
|
|
"max_to_us_msat", channel->msat_to_us_max);
|
|
|
|
json_add_amount_msat(response, "total_msat", funding_msat);
|
2019-02-07 15:41:53 +01:00
|
|
|
|
2020-12-02 15:17:06 +01:00
|
|
|
/* routing fees */
|
2023-03-14 06:21:50 +01:00
|
|
|
json_add_amount_msat(response, "fee_base_msat",
|
|
|
|
amount_msat(channel->feerate_base));
|
2020-12-02 15:17:06 +01:00
|
|
|
json_add_u32(response, "fee_proportional_millionths",
|
|
|
|
channel->feerate_ppm);
|
|
|
|
|
2019-02-07 15:41:53 +01:00
|
|
|
/* channel config */
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_sat_msat(response, "dust_limit_msat",
|
|
|
|
channel->our_config.dust_limit);
|
|
|
|
json_add_amount_msat(response, "max_total_htlc_in_msat",
|
|
|
|
channel->our_config.max_htlc_value_in_flight);
|
2019-02-07 15:41:53 +01:00
|
|
|
|
|
|
|
/* The `channel_reserve_satoshis` is imposed on
|
|
|
|
* the *other* side (see `channel_reserve_msat`
|
|
|
|
* function in, it uses `!side` to flip sides).
|
|
|
|
* So our configuration `channel_reserve_satoshis`
|
|
|
|
* is imposed on their side, while their
|
|
|
|
* configuration `channel_reserve_satoshis` is
|
|
|
|
* imposed on ours. */
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_sat_msat(response,
|
|
|
|
"their_reserve_msat",
|
|
|
|
channel->our_config.channel_reserve);
|
|
|
|
json_add_amount_sat_msat(response,
|
|
|
|
"our_reserve_msat",
|
|
|
|
channel->channel_info.their_config.channel_reserve);
|
2020-03-05 14:20:44 +01:00
|
|
|
|
|
|
|
/* append spendable to JSON output */
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_msat(response,
|
|
|
|
"spendable_msat",
|
|
|
|
channel_amount_spendable(channel));
|
2020-03-05 14:20:44 +01:00
|
|
|
|
|
|
|
/* append receivable to JSON output */
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_msat(response,
|
|
|
|
"receivable_msat",
|
|
|
|
channel_amount_receivable(channel));
|
|
|
|
|
|
|
|
json_add_amount_msat(response,
|
|
|
|
"minimum_htlc_in_msat",
|
|
|
|
channel->our_config.htlc_minimum);
|
2023-03-14 06:21:50 +01:00
|
|
|
json_add_amount_msat(response,
|
|
|
|
"minimum_htlc_out_msat",
|
|
|
|
channel->htlc_minimum_msat);
|
|
|
|
json_add_amount_msat(response,
|
|
|
|
"maximum_htlc_out_msat",
|
|
|
|
channel->htlc_maximum_msat);
|
2019-02-07 15:41:53 +01:00
|
|
|
|
|
|
|
/* The `to_self_delay` is imposed on the *other*
|
|
|
|
* side, so our configuration `to_self_delay` is
|
|
|
|
* imposed on their side, while their configuration
|
|
|
|
* `to_self_delay` is imposed on ours. */
|
|
|
|
json_add_num(response, "their_to_self_delay",
|
|
|
|
channel->our_config.to_self_delay);
|
|
|
|
json_add_num(response, "our_to_self_delay",
|
|
|
|
channel->channel_info.their_config.to_self_delay);
|
|
|
|
json_add_num(response, "max_accepted_htlcs",
|
|
|
|
channel->our_config.max_accepted_htlcs);
|
|
|
|
|
2020-10-28 11:46:23 +01:00
|
|
|
json_array_start(response, "state_changes");
|
2024-09-19 09:04:20 +02:00
|
|
|
for (size_t i = 0; i < tal_count(channel->state_changes); i++) {
|
2020-10-28 11:46:23 +01:00
|
|
|
json_object_start(response, NULL);
|
|
|
|
json_add_timeiso(response, "timestamp",
|
2024-09-19 09:04:20 +02:00
|
|
|
channel->state_changes[i].timestamp);
|
2020-10-28 11:46:23 +01:00
|
|
|
json_add_string(response, "old_state",
|
2024-09-19 09:04:20 +02:00
|
|
|
channel_state_str(channel->state_changes[i].old_state));
|
2020-10-28 11:46:23 +01:00
|
|
|
json_add_string(response, "new_state",
|
2024-09-19 09:04:20 +02:00
|
|
|
channel_state_str(channel->state_changes[i].new_state));
|
2020-10-28 11:46:23 +01:00
|
|
|
json_add_string(response, "cause",
|
2024-09-19 09:04:20 +02:00
|
|
|
channel_change_state_reason_str(channel->state_changes[i].cause));
|
|
|
|
json_add_string(response, "message", channel->state_changes[i].message);
|
2020-10-28 11:46:23 +01:00
|
|
|
json_object_end(response);
|
|
|
|
}
|
|
|
|
json_array_end(response);
|
|
|
|
|
2019-02-07 15:41:53 +01:00
|
|
|
json_array_start(response, "status");
|
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(channel->billboard.permanent); i++) {
|
|
|
|
if (!channel->billboard.permanent[i])
|
|
|
|
continue;
|
|
|
|
json_add_string(response, NULL,
|
|
|
|
channel->billboard.permanent[i]);
|
|
|
|
}
|
|
|
|
if (channel->billboard.transient)
|
|
|
|
json_add_string(response, NULL, channel->billboard.transient);
|
|
|
|
json_array_end(response);
|
|
|
|
|
|
|
|
/* Provide channel statistics */
|
|
|
|
json_add_u64(response, "in_payments_offered",
|
2024-09-19 09:00:39 +02:00
|
|
|
channel->stats.in_payments_offered);
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_msat(response,
|
|
|
|
"in_offered_msat",
|
2024-09-19 09:00:39 +02:00
|
|
|
channel->stats.in_msatoshi_offered);
|
2019-02-07 15:41:53 +01:00
|
|
|
json_add_u64(response, "in_payments_fulfilled",
|
2024-09-19 09:00:39 +02:00
|
|
|
channel->stats.in_payments_fulfilled);
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_msat(response,
|
|
|
|
"in_fulfilled_msat",
|
2024-09-19 09:00:39 +02:00
|
|
|
channel->stats.in_msatoshi_fulfilled);
|
2019-02-07 15:41:53 +01:00
|
|
|
json_add_u64(response, "out_payments_offered",
|
2024-09-19 09:00:39 +02:00
|
|
|
channel->stats.out_payments_offered);
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_msat(response,
|
|
|
|
"out_offered_msat",
|
2024-09-19 09:00:39 +02:00
|
|
|
channel->stats.out_msatoshi_offered);
|
2019-02-07 15:41:53 +01:00
|
|
|
json_add_u64(response, "out_payments_fulfilled",
|
2024-09-19 09:00:39 +02:00
|
|
|
channel->stats.out_payments_fulfilled);
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_msat(response,
|
|
|
|
"out_fulfilled_msat",
|
2024-09-19 09:00:39 +02:00
|
|
|
channel->stats.out_msatoshi_fulfilled);
|
2019-02-07 15:41:53 +01:00
|
|
|
|
|
|
|
json_add_htlcs(ld, response, channel);
|
|
|
|
json_object_end(response);
|
|
|
|
}
|
|
|
|
|
2021-01-26 19:41:41 +01:00
|
|
|
struct peer_connected_hook_payload {
|
|
|
|
struct lightningd *ld;
|
|
|
|
struct wireaddr_internal addr;
|
2021-10-12 13:16:37 +02:00
|
|
|
struct wireaddr *remote_addr;
|
2021-03-25 04:53:31 +01:00
|
|
|
bool incoming;
|
2023-02-03 10:59:47 +01:00
|
|
|
/* We don't keep a pointer to peer: it might be freed! */
|
|
|
|
struct node_id peer_id;
|
2021-01-26 19:41:41 +01:00
|
|
|
u8 *error;
|
|
|
|
};
|
|
|
|
|
2019-01-19 15:56:05 +01:00
|
|
|
static void
|
|
|
|
peer_connected_serialize(struct peer_connected_hook_payload *payload,
|
2021-06-02 17:33:23 +02:00
|
|
|
struct json_stream *stream, struct plugin *plugin)
|
2019-01-19 15:56:05 +01:00
|
|
|
{
|
2019-02-07 15:46:15 +01:00
|
|
|
json_object_start(stream, "peer");
|
2023-02-03 10:59:47 +01:00
|
|
|
json_add_node_id(stream, "id", &payload->peer_id);
|
2021-03-25 04:53:31 +01:00
|
|
|
json_add_string(stream, "direction", payload->incoming ? "in" : "out");
|
2019-02-07 15:46:15 +01:00
|
|
|
json_add_string(
|
|
|
|
stream, "addr",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_wireaddr_internal(stream, &payload->addr));
|
2021-10-12 13:16:37 +02:00
|
|
|
if (payload->remote_addr)
|
|
|
|
json_add_string(
|
|
|
|
stream, "remote_addr",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_wireaddr(stream, payload->remote_addr));
|
2023-02-03 10:59:47 +01:00
|
|
|
/* Since this is start of hook, peer is always in table! */
|
|
|
|
json_add_hex_talarr(stream, "features",
|
|
|
|
peer_by_id(payload->ld, &payload->peer_id)
|
|
|
|
->their_features);
|
2019-02-07 15:46:15 +01:00
|
|
|
json_object_end(stream); /* .peer */
|
2019-01-19 15:56:05 +01:00
|
|
|
}
|
2018-10-26 08:01:26 +02:00
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
/* Talk to connectd about an active channel */
|
|
|
|
static void connect_activate_subd(struct lightningd *ld, struct channel *channel)
|
|
|
|
{
|
|
|
|
const u8 *error;
|
|
|
|
int fds[2];
|
|
|
|
|
|
|
|
/* If we have a canned error for this channel, send it now */
|
|
|
|
if (channel->error) {
|
|
|
|
error = channel->error;
|
|
|
|
goto send_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (channel->state) {
|
|
|
|
case ONCHAIN:
|
|
|
|
case FUNDING_SPEND_SEEN:
|
|
|
|
case CLOSINGD_COMPLETE:
|
|
|
|
case CLOSED:
|
2023-10-02 00:59:51 +02:00
|
|
|
case DUALOPEND_OPEN_INIT:
|
2022-07-18 14:12:18 +02:00
|
|
|
/* Channel is active */
|
|
|
|
abort();
|
|
|
|
case AWAITING_UNILATERAL:
|
|
|
|
/* channel->error is not saved in db, so this can
|
|
|
|
* happen if we restart. */
|
|
|
|
error = towire_errorfmt(tmpctx, &channel->cid,
|
|
|
|
"Awaiting unilateral close");
|
|
|
|
goto send_error;
|
|
|
|
|
2023-10-31 01:38:11 +01:00
|
|
|
case DUALOPEND_OPEN_COMMIT_READY:
|
2023-10-02 00:59:51 +02:00
|
|
|
case DUALOPEND_OPEN_COMMITTED:
|
2022-07-18 14:12:18 +02:00
|
|
|
case DUALOPEND_AWAITING_LOCKIN:
|
|
|
|
assert(!channel->owner);
|
|
|
|
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, fds) != 0) {
|
|
|
|
log_broken(channel->log,
|
|
|
|
"Failed to create socketpair: %s",
|
|
|
|
strerror(errno));
|
|
|
|
error = towire_warningfmt(tmpctx, &channel->cid,
|
|
|
|
"Trouble in paradise?");
|
|
|
|
goto send_error;
|
|
|
|
}
|
|
|
|
if (peer_restart_dualopend(channel->peer,
|
|
|
|
new_peer_fd(tmpctx, fds[0]),
|
2023-07-30 03:41:26 +02:00
|
|
|
channel, false))
|
2022-07-18 14:12:18 +02:00
|
|
|
goto tell_connectd;
|
|
|
|
close(fds[1]);
|
|
|
|
return;
|
|
|
|
|
|
|
|
case CHANNELD_AWAITING_LOCKIN:
|
|
|
|
case CHANNELD_NORMAL:
|
2023-07-27 22:17:33 +02:00
|
|
|
case CHANNELD_AWAITING_SPLICE:
|
2022-07-18 14:12:18 +02:00
|
|
|
case CHANNELD_SHUTTING_DOWN:
|
|
|
|
case CLOSINGD_SIGEXCHANGE:
|
|
|
|
assert(!channel->owner);
|
|
|
|
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, fds) != 0) {
|
|
|
|
log_broken(channel->log,
|
|
|
|
"Failed to create socketpair: %s",
|
|
|
|
strerror(errno));
|
|
|
|
error = towire_warningfmt(tmpctx, &channel->cid,
|
|
|
|
"Trouble in paradise?");
|
|
|
|
goto send_error;
|
|
|
|
}
|
|
|
|
if (peer_start_channeld(channel,
|
|
|
|
new_peer_fd(tmpctx, fds[0]),
|
|
|
|
NULL, true,
|
|
|
|
NULL)) {
|
|
|
|
goto tell_connectd;
|
|
|
|
}
|
|
|
|
close(fds[1]);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
abort();
|
|
|
|
|
|
|
|
tell_connectd:
|
|
|
|
subd_send_msg(ld->connectd,
|
|
|
|
take(towire_connectd_peer_connect_subd(NULL,
|
|
|
|
&channel->peer->id,
|
2022-07-18 14:12:27 +02:00
|
|
|
channel->peer->connectd_counter,
|
2022-07-18 14:12:18 +02:00
|
|
|
&channel->cid)));
|
|
|
|
subd_send_fd(ld->connectd, fds[1]);
|
|
|
|
return;
|
|
|
|
|
|
|
|
send_error:
|
|
|
|
log_debug(channel->log, "Telling connectd to send error %s",
|
|
|
|
tal_hex(tmpctx, error));
|
|
|
|
/* Get connectd to send error and close. */
|
|
|
|
subd_send_msg(ld->connectd,
|
2024-01-31 04:16:17 +01:00
|
|
|
take(towire_connectd_peer_send_msg(NULL, &channel->peer->id,
|
|
|
|
channel->peer->connectd_counter,
|
|
|
|
error)));
|
|
|
|
subd_send_msg(ld->connectd,
|
|
|
|
take(towire_connectd_discard_peer(NULL,
|
|
|
|
&channel->peer->id,
|
|
|
|
channel->peer->connectd_counter)));
|
2022-07-18 14:12:18 +02:00
|
|
|
}
|
|
|
|
|
2021-01-25 14:47:47 +01:00
|
|
|
static void peer_connected_hook_final(struct peer_connected_hook_payload *payload STEALS)
|
2019-01-19 15:56:05 +01:00
|
|
|
{
|
|
|
|
struct lightningd *ld = payload->ld;
|
2022-03-22 21:30:54 +01:00
|
|
|
struct channel *channel;
|
2019-01-19 15:56:05 +01:00
|
|
|
struct wireaddr_internal addr = payload->addr;
|
2023-02-03 10:59:47 +01:00
|
|
|
struct peer *peer;
|
2019-01-19 15:56:05 +01:00
|
|
|
u8 *error;
|
2018-02-19 02:06:02 +01:00
|
|
|
|
2021-01-14 01:06:50 +01:00
|
|
|
/* Whatever happens, we free payload (it's currently a child
|
|
|
|
* of the peer, which may be freed if we fail to start
|
|
|
|
* subd). */
|
|
|
|
tal_steal(tmpctx, payload);
|
|
|
|
|
2023-02-03 10:59:47 +01:00
|
|
|
/* Peer might have gone away while we were waiting for plugin! */
|
|
|
|
peer = peer_by_id(ld, &payload->peer_id);
|
|
|
|
if (!peer)
|
|
|
|
return;
|
|
|
|
|
2022-07-16 06:49:31 +02:00
|
|
|
/* If we disconnected in the meantime, forget about it.
|
2023-02-03 10:59:47 +01:00
|
|
|
* (disconnect will have failed any connect commands).
|
|
|
|
* And if it has reconnected, and we're the second time the
|
|
|
|
* hook has been called, it'll be PEER_CONNECTED. */
|
|
|
|
if (peer->connected != PEER_CONNECTING)
|
2022-07-16 06:49:31 +02:00
|
|
|
return;
|
2022-03-22 09:53:13 +01:00
|
|
|
|
2021-01-25 14:47:47 +01:00
|
|
|
/* Check for specific errors of a hook */
|
|
|
|
if (payload->error) {
|
|
|
|
error = payload->error;
|
|
|
|
goto send_error;
|
2019-02-07 16:53:15 +01:00
|
|
|
}
|
|
|
|
|
2022-07-16 06:49:31 +02:00
|
|
|
/* Now we finally consider ourselves connected! */
|
|
|
|
assert(peer->connected == PEER_CONNECTING);
|
|
|
|
peer->connected = PEER_CONNECTED;
|
|
|
|
|
|
|
|
/* Succeed any connect() commands */
|
|
|
|
connect_succeeded(ld, peer, payload->incoming, &payload->addr);
|
|
|
|
|
|
|
|
/* Notify anyone who cares */
|
|
|
|
notify_connect(ld, &peer->id, payload->incoming, &addr);
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
/* Developer hack to fail all channels on permfail line. */
|
|
|
|
if (dev_disconnect_permanent(ld)) {
|
2024-03-25 01:37:52 +01:00
|
|
|
struct channel *c;
|
|
|
|
list_for_each(&peer->channels, c, list) {
|
2024-01-31 04:16:17 +01:00
|
|
|
subd_send_msg(ld->connectd,
|
|
|
|
take(towire_connectd_peer_send_msg(NULL, &peer->id,
|
|
|
|
peer->connectd_counter,
|
2024-03-25 01:37:52 +01:00
|
|
|
/* cppcheck-suppress uninitvar - false positive on c */
|
|
|
|
c->error)));
|
2024-01-31 04:16:17 +01:00
|
|
|
subd_send_msg(ld->connectd,
|
|
|
|
take(towire_connectd_discard_peer(NULL,
|
|
|
|
&peer->id,
|
|
|
|
peer->connectd_counter)));
|
2024-03-25 01:37:52 +01:00
|
|
|
channel_fail_permanent(c, REASON_LOCAL,
|
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
|
|
|
"dev_disconnect permfail");
|
|
|
|
}
|
2022-07-18 14:12:18 +02:00
|
|
|
return;
|
|
|
|
}
|
2017-10-28 04:19:10 +02:00
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
/* connect appropriate subds for all (active) channels! */
|
|
|
|
list_for_each(&peer->channels, channel, list) {
|
2023-08-21 02:48:48 +02:00
|
|
|
/* FIXME: It can race by opening a channel before this! */
|
2023-10-02 00:59:51 +02:00
|
|
|
if (channel_state_wants_peercomms(channel->state) && !channel->owner) {
|
2022-07-18 14:12:18 +02:00
|
|
|
log_debug(channel->log, "Peer has reconnected, state %s: connecting subd",
|
2022-03-22 21:30:54 +01:00
|
|
|
channel_state_name(channel));
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
connect_activate_subd(ld, channel);
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
}
|
|
|
|
}
|
2024-01-31 04:16:17 +01:00
|
|
|
|
2021-06-03 05:22:21 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
send_error:
|
2022-03-22 21:27:29 +01:00
|
|
|
log_peer_debug(ld->log, &peer->id, "Telling connectd to send error %s",
|
|
|
|
tal_hex(tmpctx, error));
|
2021-06-03 05:22:21 +02:00
|
|
|
/* Get connectd to send error and close. */
|
|
|
|
subd_send_msg(ld->connectd,
|
2024-01-31 04:16:17 +01:00
|
|
|
take(towire_connectd_peer_send_msg(NULL, &peer->id,
|
|
|
|
peer->connectd_counter,
|
|
|
|
error)));
|
|
|
|
subd_send_msg(ld->connectd,
|
|
|
|
take(towire_connectd_discard_peer(NULL,
|
|
|
|
&peer->id,
|
|
|
|
peer->connectd_counter)));
|
2019-01-19 15:56:05 +01:00
|
|
|
}
|
|
|
|
|
2021-01-25 14:47:47 +01:00
|
|
|
static bool
|
|
|
|
peer_connected_hook_deserialize(struct peer_connected_hook_payload *payload,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *toks)
|
|
|
|
{
|
|
|
|
struct lightningd *ld = payload->ld;
|
|
|
|
|
|
|
|
/* already rejected by prior plugin hook in the chain */
|
|
|
|
if (payload->error != NULL)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!toks || !buffer)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* If we had a hook, interpret result. */
|
|
|
|
const jsmntok_t *t_res = json_get_member(buffer, toks, "result");
|
|
|
|
const jsmntok_t *t_err = json_get_member(buffer, toks, "error_message");
|
|
|
|
|
|
|
|
/* fail */
|
|
|
|
if (!t_res)
|
|
|
|
fatal("Plugin returned an invalid response to the "
|
|
|
|
"peer_connected hook: %s", buffer);
|
|
|
|
|
|
|
|
/* reject */
|
|
|
|
if (json_tok_streq(buffer, t_res, "disconnect")) {
|
|
|
|
payload->error = (u8*)"";
|
|
|
|
if (t_err) {
|
2021-02-03 03:51:41 +01:00
|
|
|
payload->error = towire_warningfmt(tmpctx, NULL, "%.*s",
|
|
|
|
t_err->end - t_err->start,
|
|
|
|
buffer + t_err->start);
|
2021-01-25 14:47:47 +01:00
|
|
|
}
|
|
|
|
log_debug(ld->log, "peer_connected hook rejects and says '%s'",
|
|
|
|
payload->error);
|
|
|
|
/* At this point we suppress other plugins in the chain and
|
|
|
|
* directly move to final */
|
|
|
|
peer_connected_hook_final(payload);
|
|
|
|
return false;
|
|
|
|
} else if (!json_tok_streq(buffer, t_res, "continue"))
|
|
|
|
fatal("Plugin returned an invalid response to the "
|
|
|
|
"peer_connected hook: %s", buffer);
|
|
|
|
|
|
|
|
/* call next hook */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-01-31 04:16:19 +01:00
|
|
|
/* Have they/we committed funds to the channel? */
|
|
|
|
static bool channel_state_relationship(enum channel_state state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case DUALOPEND_OPEN_INIT:
|
|
|
|
case DUALOPEND_OPEN_COMMIT_READY:
|
|
|
|
case DUALOPEND_OPEN_COMMITTED:
|
|
|
|
case DUALOPEND_AWAITING_LOCKIN:
|
|
|
|
case CHANNELD_AWAITING_LOCKIN:
|
|
|
|
return false;
|
|
|
|
case CLOSINGD_COMPLETE:
|
|
|
|
case AWAITING_UNILATERAL:
|
|
|
|
case FUNDING_SPEND_SEEN:
|
|
|
|
case ONCHAIN:
|
|
|
|
case CLOSED:
|
|
|
|
case CHANNELD_NORMAL:
|
|
|
|
case CHANNELD_AWAITING_SPLICE:
|
|
|
|
case CLOSINGD_SIGEXCHANGE:
|
|
|
|
case CHANNELD_SHUTTING_DOWN:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We choose the most popular address we've given (if at least 2 give
|
|
|
|
* it), prefering peers which have a channel with us */
|
|
|
|
struct discovered_addr {
|
|
|
|
bool preferred;
|
|
|
|
/* Port is uniformly set to our configured port in here. */
|
|
|
|
struct wireaddr addr;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int daddr_cmp(const struct discovered_addr *a,
|
|
|
|
const struct discovered_addr *b,
|
|
|
|
void *unused)
|
|
|
|
{
|
|
|
|
return wireaddr_cmp_type(&a->addr, &b->addr, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct wireaddr *best_remote_addr(const tal_t *ctx,
|
|
|
|
struct lightningd *ld,
|
|
|
|
enum wire_addr_type atype)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
struct peer_node_id_map_iter it;
|
|
|
|
struct discovered_addr *daddrs;
|
|
|
|
const struct wireaddr *best, *prev;
|
|
|
|
size_t best_score, cur_score, preferred_bonus;
|
|
|
|
|
|
|
|
daddrs = tal_arr(tmpctx, struct discovered_addr, 0);
|
|
|
|
for (peer = peer_node_id_map_first(ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(ld->peers, &it)) {
|
|
|
|
struct discovered_addr daddr;
|
|
|
|
if (!peer->remote_addr)
|
|
|
|
continue;
|
|
|
|
if (peer->remote_addr->type != atype)
|
|
|
|
continue;
|
|
|
|
daddr.preferred = peer_any_channel(peer,
|
|
|
|
channel_state_relationship,
|
|
|
|
NULL);
|
|
|
|
daddr.addr = *peer->remote_addr;
|
|
|
|
daddr.addr.port = ld->config.ip_discovery_port;
|
|
|
|
log_debug(ld->log, "best_remote_addr: peer %s gave addr %s (%s)",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_node_id(tmpctx, &peer->id),
|
2024-01-31 04:16:19 +01:00
|
|
|
fmt_wireaddr(tmpctx, &daddr.addr),
|
|
|
|
daddr.preferred ? "preferred" : "no chan");
|
|
|
|
tal_arr_expand(&daddrs, daddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sort into matching addresses */
|
|
|
|
asort(daddrs, tal_count(daddrs), daddr_cmp, NULL);
|
|
|
|
|
|
|
|
/* All the non-preferred peers cannot outvote 1 preferred peer */
|
|
|
|
preferred_bonus = tal_count(daddrs);
|
|
|
|
best_score = cur_score = 0;
|
|
|
|
best = prev = NULL;
|
|
|
|
for (size_t i = 0; i < tal_count(daddrs); i++) {
|
|
|
|
if (prev && !wireaddr_eq(prev, &daddrs[i].addr)) {
|
|
|
|
if (cur_score > best_score) {
|
|
|
|
best_score = cur_score;
|
|
|
|
best = prev;
|
|
|
|
}
|
|
|
|
cur_score = 0;
|
|
|
|
}
|
|
|
|
cur_score += daddrs[i].preferred ? preferred_bonus : 1;
|
|
|
|
prev = &daddrs[i].addr;
|
|
|
|
}
|
|
|
|
if (cur_score > best_score) {
|
|
|
|
best_score = cur_score;
|
|
|
|
best = prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!best) {
|
|
|
|
log_debug(ld->log,
|
|
|
|
"node_address: no peers gave remote addresses");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Does it agree with what we already know? */
|
|
|
|
if (wireaddr_arr_contains(ld->announceable, best)) {
|
|
|
|
log_debug(ld->log,
|
|
|
|
"node_address: best address already being announced");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This means we got it from one preferred peer and at least one other */
|
|
|
|
if (best_score > preferred_bonus) {
|
|
|
|
log_debug(ld->log,
|
|
|
|
"node_address: %zu peers gave remote addresses,"
|
|
|
|
" best score %zu (preferred)",
|
|
|
|
tal_count(daddrs), best_score);
|
|
|
|
return tal_dup(ctx, struct wireaddr, best);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No preferred peers gave us addresses? Use > 1 untrusted */
|
|
|
|
if (best_score < preferred_bonus && best_score > 1) {
|
|
|
|
log_debug(ld->log,
|
|
|
|
"node_address: %zu peers gave remote addresses,"
|
|
|
|
" best score %zu (no preferred)",
|
|
|
|
tal_count(daddrs), best_score);
|
|
|
|
return tal_dup(ctx, struct wireaddr, best);
|
|
|
|
}
|
|
|
|
|
|
|
|
log_debug(ld->log,
|
|
|
|
"node_address: %zu peers gave remote addresses,"
|
|
|
|
" best score %zu: not using",
|
|
|
|
tal_count(daddrs), best_score);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Consider `remote_addr` from peer: if it could change things, reconsider
|
|
|
|
* what our discoverd IP is. Returns new address, or NULL. */
|
|
|
|
static const struct wireaddr *update_remote_addr(struct lightningd *ld,
|
|
|
|
const struct wireaddr *remote_addr)
|
2022-02-23 18:40:10 +01:00
|
|
|
{
|
2022-03-09 14:28:20 +01:00
|
|
|
/* failsafe to prevent privacy leakage. */
|
2022-12-20 18:28:46 +01:00
|
|
|
if (ld->always_use_proxy ||
|
|
|
|
ld->config.ip_discovery == OPT_AUTOBOOL_FALSE)
|
2024-01-31 04:16:19 +01:00
|
|
|
return NULL;
|
2022-03-09 14:28:20 +01:00
|
|
|
|
2022-02-23 18:40:10 +01:00
|
|
|
switch (remote_addr->type) {
|
|
|
|
case ADDR_TYPE_IPV4:
|
2024-01-31 04:16:19 +01:00
|
|
|
/* If it's telling us what we already know, don't reevaluate */
|
|
|
|
if (!ld->discovered_ip_v4
|
|
|
|
|| !wireaddr_eq(ld->discovered_ip_v4, remote_addr)) {
|
|
|
|
ld->discovered_ip_v4 = tal_free(ld->discovered_ip_v4);
|
|
|
|
ld->discovered_ip_v4 = best_remote_addr(ld, ld, ADDR_TYPE_IPV4);
|
|
|
|
return ld->discovered_ip_v4;
|
2022-02-23 18:40:10 +01:00
|
|
|
}
|
2024-01-31 04:16:19 +01:00
|
|
|
return NULL;
|
2022-02-23 18:40:10 +01:00
|
|
|
case ADDR_TYPE_IPV6:
|
2024-01-31 04:16:19 +01:00
|
|
|
/* If it's telling us what we already know, don't reevaluate */
|
|
|
|
if (!ld->discovered_ip_v6
|
|
|
|
|| !wireaddr_eq(ld->discovered_ip_v6, remote_addr)) {
|
|
|
|
ld->discovered_ip_v6 = tal_free(ld->discovered_ip_v6);
|
|
|
|
ld->discovered_ip_v6 = best_remote_addr(ld, ld, ADDR_TYPE_IPV6);
|
|
|
|
return ld->discovered_ip_v6;
|
2022-02-23 18:40:10 +01:00
|
|
|
}
|
2024-01-31 04:16:19 +01:00
|
|
|
return NULL;
|
2022-02-23 18:40:10 +01:00
|
|
|
/* ignore all other cases */
|
|
|
|
case ADDR_TYPE_TOR_V2_REMOVED:
|
|
|
|
case ADDR_TYPE_TOR_V3:
|
|
|
|
case ADDR_TYPE_DNS:
|
2024-01-31 04:16:19 +01:00
|
|
|
return NULL;
|
2022-02-23 18:40:10 +01:00
|
|
|
}
|
2024-01-31 04:16:19 +01:00
|
|
|
abort();
|
2022-02-23 18:40:10 +01:00
|
|
|
}
|
|
|
|
|
2021-01-25 14:47:47 +01:00
|
|
|
REGISTER_PLUGIN_HOOK(peer_connected,
|
|
|
|
peer_connected_hook_deserialize,
|
|
|
|
peer_connected_hook_final,
|
|
|
|
peer_connected_serialize,
|
|
|
|
struct peer_connected_hook_payload *);
|
2019-01-19 15:56:05 +01:00
|
|
|
|
|
|
|
/* Connectd tells us a peer has connected: it never hands us duplicates, since
|
2022-03-22 21:27:29 +01:00
|
|
|
* it holds them until we say peer_disconnected. */
|
|
|
|
void peer_connected(struct lightningd *ld, const u8 *msg)
|
2019-01-19 15:56:05 +01:00
|
|
|
{
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id id;
|
2020-04-03 02:03:59 +02:00
|
|
|
u8 *their_features;
|
2019-01-19 15:56:05 +01:00
|
|
|
struct peer *peer;
|
|
|
|
struct peer_connected_hook_payload *hook_payload;
|
2022-07-18 14:12:27 +02:00
|
|
|
u64 connectd_counter;
|
2022-09-12 23:19:12 +02:00
|
|
|
const char *cmd_id;
|
2019-01-19 15:56:05 +01:00
|
|
|
|
|
|
|
hook_payload = tal(NULL, struct peer_connected_hook_payload);
|
|
|
|
hook_payload->ld = ld;
|
2021-01-25 14:47:47 +01:00
|
|
|
hook_payload->error = NULL;
|
2020-08-25 04:16:22 +02:00
|
|
|
if (!fromwire_connectd_peer_connected(hook_payload, msg,
|
2022-07-18 14:12:27 +02:00
|
|
|
&id, &connectd_counter,
|
|
|
|
&hook_payload->addr,
|
2021-10-12 13:16:37 +02:00
|
|
|
&hook_payload->remote_addr,
|
2021-03-25 04:53:31 +01:00
|
|
|
&hook_payload->incoming,
|
|
|
|
&their_features))
|
2019-01-19 15:56:05 +01:00
|
|
|
fatal("Connectd gave bad CONNECT_PEER_CONNECTED message %s",
|
|
|
|
tal_hex(msg, msg));
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* When a peer disconnects, we give subds time to clean themselves up
|
|
|
|
* (this lets connectd ensure they've seen the final messages). But
|
2022-07-18 14:12:28 +02:00
|
|
|
* now it's reconnected, we've gotta force them out. */
|
2022-07-18 14:12:27 +02:00
|
|
|
peer_channels_cleanup(ld, &id);
|
|
|
|
|
2019-01-19 15:56:05 +01:00
|
|
|
/* If we're already dealing with this peer, hand off to correct
|
|
|
|
* subdaemon. Otherwise, we'll hand to openingd to wait there. */
|
|
|
|
peer = peer_by_id(ld, &id);
|
|
|
|
if (!peer)
|
2021-03-25 05:13:12 +01:00
|
|
|
peer = new_peer(ld, 0, &id, &hook_payload->addr,
|
connectd: fix transient memleak report.
We left their_features dangling allocated off the hook_payload, whereas
we explicitly move it to the peer, so steal/take it onto that.
```
- Node /tmp/ltests-gh55a_h2/test_connection_moved_1/lightning-2/ has memory leaks: [
{
"backtrace": [
"/home/runner/work/lightning/lightning/ccan/ccan/tal/tal.c:477 (tal_alloc_)",
"/home/runner/work/lightning/lightning/ccan/ccan/tal/tal.c:506 (tal_alloc_arr_)",
"/home/runner/work/lightning/lightning/connectd/connectd_wiregen.c:410 (fromwire_connectd_peer_connected)",
"/home/runner/work/lightning/lightning/lightningd/peer_control.c:1418 (peer_connected)",
"/home/runner/work/lightning/lightning/lightningd/connect_control.c:592 (connectd_msg)",
"/home/runner/work/lightning/lightning/lightningd/subd.c:557 (sd_msg_read)",
"/home/runner/work/lightning/lightning/ccan/ccan/io/io.c:59 (next_plan)",
"/home/runner/work/lightning/lightning/ccan/ccan/io/io.c:407 (do_plan)",
"/home/runner/work/lightning/lightning/ccan/ccan/io/io.c:417 (io_ready)",
"/home/runner/work/lightning/lightning/ccan/ccan/io/poll.c:453 (io_loop)",
"/home/runner/work/lightning/lightning/lightningd/io_loop_with_timers.c:22 (io_loop_with_timers)",
"/home/runner/work/lightning/lightning/lightningd/lightningd.c:1243 (main)"
],
"label": "connectd/connectd_wiregen.c:410:u8[]",
"parents": [
"lightningd/peer_control.c:1415:struct peer_connected_hook_payload",
"lightningd/plugin_hook.c:260:struct plugin_hook_request **NOTLEAK**",
"lightningd/plugin_hook.c:87:struct hook_instance *[] **NOTLEAK**"
],
"value": "0x5582622b1ff8"
}
]
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2023-07-20 23:45:48 +02:00
|
|
|
take(their_features), hook_payload->incoming);
|
2023-06-05 18:17:36 +02:00
|
|
|
else {
|
|
|
|
tal_free(peer->their_features);
|
connectd: fix transient memleak report.
We left their_features dangling allocated off the hook_payload, whereas
we explicitly move it to the peer, so steal/take it onto that.
```
- Node /tmp/ltests-gh55a_h2/test_connection_moved_1/lightning-2/ has memory leaks: [
{
"backtrace": [
"/home/runner/work/lightning/lightning/ccan/ccan/tal/tal.c:477 (tal_alloc_)",
"/home/runner/work/lightning/lightning/ccan/ccan/tal/tal.c:506 (tal_alloc_arr_)",
"/home/runner/work/lightning/lightning/connectd/connectd_wiregen.c:410 (fromwire_connectd_peer_connected)",
"/home/runner/work/lightning/lightning/lightningd/peer_control.c:1418 (peer_connected)",
"/home/runner/work/lightning/lightning/lightningd/connect_control.c:592 (connectd_msg)",
"/home/runner/work/lightning/lightning/lightningd/subd.c:557 (sd_msg_read)",
"/home/runner/work/lightning/lightning/ccan/ccan/io/io.c:59 (next_plan)",
"/home/runner/work/lightning/lightning/ccan/ccan/io/io.c:407 (do_plan)",
"/home/runner/work/lightning/lightning/ccan/ccan/io/io.c:417 (io_ready)",
"/home/runner/work/lightning/lightning/ccan/ccan/io/poll.c:453 (io_loop)",
"/home/runner/work/lightning/lightning/lightningd/io_loop_with_timers.c:22 (io_loop_with_timers)",
"/home/runner/work/lightning/lightning/lightningd/lightningd.c:1243 (main)"
],
"label": "connectd/connectd_wiregen.c:410:u8[]",
"parents": [
"lightningd/peer_control.c:1415:struct peer_connected_hook_payload",
"lightningd/plugin_hook.c:260:struct plugin_hook_request **NOTLEAK**",
"lightningd/plugin_hook.c:87:struct hook_instance *[] **NOTLEAK**"
],
"value": "0x5582622b1ff8"
}
]
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2023-07-20 23:45:48 +02:00
|
|
|
peer->their_features = tal_steal(peer, their_features);
|
2023-06-05 18:17:36 +02:00
|
|
|
}
|
2022-07-16 06:49:31 +02:00
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* We track this, because messages can race between connectd and us.
|
|
|
|
* For example, we could tell it to attach a subd, but it's actually
|
|
|
|
* already reconnected: we would tell it again when we read the
|
|
|
|
* "peer_connected" message, and it would get upset (plus, our first
|
|
|
|
* subd wouldn't die as expected. So we echo this back to connectd
|
2022-07-18 14:12:28 +02:00
|
|
|
* on peer commands, and it knows to ignore if it's wrong. */
|
2022-07-18 14:12:27 +02:00
|
|
|
peer->connectd_counter = connectd_counter;
|
|
|
|
|
2022-07-16 06:49:31 +02:00
|
|
|
/* We mark peer in "connecting" state until hooks have passed. */
|
|
|
|
assert(peer->connected == PEER_DISCONNECTED);
|
|
|
|
peer->connected = PEER_CONNECTING;
|
|
|
|
|
2022-03-23 00:01:36 +01:00
|
|
|
/* Update peer address and direction */
|
|
|
|
peer->addr = hook_payload->addr;
|
|
|
|
peer->connected_incoming = hook_payload->incoming;
|
2024-01-31 04:16:19 +01:00
|
|
|
peer->remote_addr = tal_free(peer->remote_addr);
|
2023-02-03 10:59:47 +01:00
|
|
|
hook_payload->peer_id = id;
|
2019-01-19 15:56:05 +01:00
|
|
|
|
2022-09-12 23:19:12 +02:00
|
|
|
/* If there's a connect command, use its id as basis for hook id */
|
|
|
|
cmd_id = connect_any_cmd_id(tmpctx, ld, peer);
|
|
|
|
|
2022-02-23 18:40:10 +01:00
|
|
|
/* Log and update remote_addr for Nat/IP discovery. */
|
|
|
|
if (hook_payload->remote_addr) {
|
2024-01-31 04:16:19 +01:00
|
|
|
const struct wireaddr *best;
|
2022-07-18 00:36:56 +02:00
|
|
|
log_peer_debug(ld->log, &id, "Peer says it sees our address as: %s",
|
|
|
|
fmt_wireaddr(tmpctx, hook_payload->remote_addr));
|
2022-06-01 13:18:55 +02:00
|
|
|
peer->remote_addr = tal_dup(peer, struct wireaddr,
|
|
|
|
hook_payload->remote_addr);
|
2024-01-31 04:16:19 +01:00
|
|
|
best = update_remote_addr(ld, peer->remote_addr);
|
|
|
|
if (best) {
|
|
|
|
log_debug(ld->log,
|
|
|
|
"Update our node_announcement for discovered address: %s",
|
|
|
|
fmt_wireaddr(tmpctx, best));
|
2024-01-31 04:16:19 +01:00
|
|
|
channel_gossip_node_announce(ld);
|
2024-01-31 04:16:19 +01:00
|
|
|
}
|
2021-10-12 13:16:37 +02:00
|
|
|
}
|
|
|
|
|
2022-09-12 23:19:12 +02:00
|
|
|
plugin_hook_call_peer_connected(ld, cmd_id, hook_payload);
|
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
|
|
|
}
|
2017-05-22 13:26:49 +02:00
|
|
|
|
2024-05-29 08:47:06 +02:00
|
|
|
static void send_reestablish(struct lightningd *ld, struct channel *channel)
|
|
|
|
{
|
|
|
|
u8 *msg;
|
|
|
|
struct secret last_remote_per_commit_secret;
|
|
|
|
u64 num_revocations;
|
|
|
|
|
|
|
|
/* BOLT #2:
|
|
|
|
* - if `next_revocation_number` equals 0:
|
|
|
|
* - MUST set `your_last_per_commitment_secret` to all zeroes
|
|
|
|
* - otherwise:
|
|
|
|
* - MUST set `your_last_per_commitment_secret` to the last
|
|
|
|
* `per_commitment_secret` it received
|
|
|
|
*/
|
|
|
|
num_revocations = revocations_received(&channel->their_shachain.chain);
|
|
|
|
if (num_revocations == 0)
|
|
|
|
memset(&last_remote_per_commit_secret, 0,
|
|
|
|
sizeof(last_remote_per_commit_secret));
|
|
|
|
else if (!shachain_get_secret(&channel->their_shachain.chain,
|
|
|
|
num_revocations-1,
|
|
|
|
&last_remote_per_commit_secret)) {
|
|
|
|
channel_fail_permanent(channel,
|
|
|
|
REASON_LOCAL,
|
|
|
|
"Could not get revocation secret %"PRIu64,
|
|
|
|
num_revocations-1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg = towire_channel_reestablish(tmpctx, &channel->cid,
|
|
|
|
channel->next_index[LOCAL],
|
|
|
|
num_revocations,
|
|
|
|
&last_remote_per_commit_secret,
|
|
|
|
&channel->channel_info.remote_per_commit,
|
|
|
|
/* No upgrade for you, since we're closed! */
|
|
|
|
NULL);
|
|
|
|
subd_send_msg(ld->connectd,
|
|
|
|
take(towire_connectd_peer_send_msg(NULL, &channel->peer->id,
|
|
|
|
channel->peer->connectd_counter,
|
|
|
|
msg)));
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
/* connectd tells us a peer has a message and we've not already attached
|
|
|
|
* a subd. Normally this is a race, but it happens for real when opening
|
|
|
|
* a new channel, or referring to a channel we no longer want to talk to
|
|
|
|
* it about. */
|
|
|
|
void peer_spoke(struct lightningd *ld, const u8 *msg)
|
2022-03-22 21:27:29 +01:00
|
|
|
{
|
|
|
|
struct node_id id;
|
2022-07-18 14:12:18 +02:00
|
|
|
u16 msgtype;
|
2022-07-18 14:12:27 +02:00
|
|
|
u64 connectd_counter;
|
2022-03-22 21:27:29 +01:00
|
|
|
struct channel *channel;
|
2022-03-22 21:27:30 +01:00
|
|
|
struct channel_id channel_id;
|
2022-03-22 21:27:29 +01:00
|
|
|
struct peer *peer;
|
|
|
|
bool dual_fund;
|
|
|
|
u8 *error;
|
2022-07-18 14:12:18 +02:00
|
|
|
int fds[2];
|
2023-10-22 06:07:32 +02:00
|
|
|
char *errmsg;
|
2022-03-22 21:27:29 +01:00
|
|
|
|
2023-10-22 06:07:32 +02:00
|
|
|
if (!fromwire_connectd_peer_spoke(msg, msg, &id, &connectd_counter, &msgtype, &channel_id, &errmsg))
|
2022-07-18 14:12:18 +02:00
|
|
|
fatal("Connectd gave bad CONNECTD_PEER_SPOKE message %s",
|
2022-03-22 21:27:29 +01:00
|
|
|
tal_hex(msg, msg));
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* We must know it, and it must be the right connectd_id */
|
2022-03-22 21:27:29 +01:00
|
|
|
peer = peer_by_id(ld, &id);
|
2022-07-18 14:12:27 +02:00
|
|
|
assert(peer->connectd_counter == connectd_counter);
|
2022-03-22 21:27:29 +01:00
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
/* Do we know what channel they're talking about? */
|
|
|
|
channel = find_channel_by_id(peer, &channel_id);
|
2022-03-22 21:27:29 +01:00
|
|
|
if (channel) {
|
2024-05-29 08:47:06 +02:00
|
|
|
/* In this case, we'll send an error below, but send reestablish reply first
|
|
|
|
* in case they lost their state and need it */
|
|
|
|
if (msgtype == WIRE_CHANNEL_REESTABLISH && channel_state_closed(channel->state))
|
|
|
|
send_reestablish(ld, channel);
|
|
|
|
|
2022-03-22 21:30:54 +01:00
|
|
|
/* If we have a canned error for this channel, send it now */
|
|
|
|
if (channel->error) {
|
|
|
|
error = channel->error;
|
|
|
|
goto send_error;
|
|
|
|
}
|
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
if (channel_state_wants_peercomms(channel->state)) {
|
2023-10-22 06:07:32 +02:00
|
|
|
/* If they send an error, handle it immediately. */
|
|
|
|
if (errmsg) {
|
|
|
|
channel_fail_permanent(channel, REASON_REMOTE,
|
|
|
|
"They sent %s", errmsg);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If channel is active there are two possibilities:
|
|
|
|
* 1. We have started subd, but channeld hasn't processed
|
|
|
|
* the connectd_peer_connect_subd message yet.
|
|
|
|
* 2. subd exited */
|
|
|
|
if (channel->owner) {
|
|
|
|
/* We raced... */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
log_debug(channel->log, "channel already active");
|
|
|
|
if (channel->state == DUALOPEND_AWAITING_LOCKIN) {
|
2022-12-01 22:36:06 +01:00
|
|
|
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, fds) != 0) {
|
|
|
|
log_broken(ld->log,
|
|
|
|
"Failed to create socketpair: %s",
|
|
|
|
strerror(errno));
|
|
|
|
error = towire_warningfmt(tmpctx, &channel_id,
|
|
|
|
"Trouble in paradise?");
|
|
|
|
goto send_error;
|
|
|
|
}
|
2023-07-30 03:41:26 +02:00
|
|
|
if (peer_restart_dualopend(peer, new_peer_fd(tmpctx, fds[0]), channel, false))
|
2022-12-01 22:36:06 +01:00
|
|
|
goto tell_connectd;
|
|
|
|
/* FIXME: Send informative error? */
|
|
|
|
close(fds[1]);
|
|
|
|
}
|
2022-07-16 06:49:31 +02:00
|
|
|
return;
|
2022-12-01 22:36:06 +01:00
|
|
|
}
|
2022-07-18 14:12:18 +02:00
|
|
|
|
|
|
|
/* Send generic error. */
|
|
|
|
error = towire_errorfmt(tmpctx, &channel_id,
|
|
|
|
"channel in state %s",
|
|
|
|
channel_state_name(channel));
|
|
|
|
goto send_error;
|
2022-03-22 21:27:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
dual_fund = feature_negotiated(ld->our_features,
|
|
|
|
peer->their_features,
|
|
|
|
OPT_DUAL_FUND);
|
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
/* OK, it's an unknown channel. Create a new one if they're trying. */
|
2022-07-18 14:12:18 +02:00
|
|
|
switch (msgtype) {
|
2022-03-22 21:27:30 +01:00
|
|
|
case WIRE_OPEN_CHANNEL:
|
|
|
|
if (dual_fund) {
|
|
|
|
error = towire_errorfmt(tmpctx, &channel_id,
|
|
|
|
"OPT_DUAL_FUND: cannot use open_channel");
|
|
|
|
goto send_error;
|
|
|
|
}
|
|
|
|
if (peer->uncommitted_channel) {
|
|
|
|
error = towire_errorfmt(tmpctx, &channel_id,
|
|
|
|
"Multiple simulteneous opens not supported");
|
|
|
|
goto send_error;
|
|
|
|
}
|
|
|
|
peer->uncommitted_channel = new_uncommitted_channel(peer);
|
2022-07-16 06:49:29 +02:00
|
|
|
peer->uncommitted_channel->cid = channel_id;
|
2022-07-18 14:12:18 +02:00
|
|
|
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, fds) != 0) {
|
|
|
|
log_broken(ld->log,
|
|
|
|
"Failed to create socketpair: %s",
|
|
|
|
strerror(errno));
|
|
|
|
error = towire_warningfmt(tmpctx, &channel_id,
|
|
|
|
"Trouble in paradise?");
|
|
|
|
goto send_error;
|
|
|
|
}
|
|
|
|
if (peer_start_openingd(peer, new_peer_fd(tmpctx, fds[0]))) {
|
|
|
|
goto tell_connectd;
|
|
|
|
}
|
|
|
|
/* FIXME: Send informative error? */
|
|
|
|
close(fds[1]);
|
|
|
|
return;
|
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
case WIRE_OPEN_CHANNEL2:
|
|
|
|
if (!dual_fund) {
|
|
|
|
error = towire_errorfmt(tmpctx, &channel_id,
|
|
|
|
"Didn't negotiate OPT_DUAL_FUND: cannot use open_channel2");
|
|
|
|
goto send_error;
|
2022-03-22 21:27:29 +01:00
|
|
|
}
|
2022-03-22 21:27:30 +01:00
|
|
|
channel = new_unsaved_channel(peer,
|
|
|
|
peer->ld->config.fee_base,
|
|
|
|
peer->ld->config.fee_per_satoshi);
|
2022-03-22 21:27:30 +01:00
|
|
|
channel->cid = channel_id;
|
2022-07-18 14:12:18 +02:00
|
|
|
if (socketpair(AF_LOCAL, SOCK_STREAM, 0, fds) != 0) {
|
|
|
|
log_broken(ld->log,
|
|
|
|
"Failed to create socketpair: %s",
|
|
|
|
strerror(errno));
|
|
|
|
error = towire_warningfmt(tmpctx, &channel_id,
|
|
|
|
"Trouble in paradise?");
|
|
|
|
goto send_error;
|
|
|
|
}
|
|
|
|
if (peer_start_dualopend(peer, new_peer_fd(tmpctx, fds[0]), channel))
|
|
|
|
goto tell_connectd;
|
|
|
|
/* FIXME: Send informative error? */
|
|
|
|
close(fds[1]);
|
2022-03-22 21:27:30 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
/* Weird message? Log and reply with error. */
|
|
|
|
log_peer_unusual(ld->log, &peer->id,
|
|
|
|
"Unknown channel %s for %s",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_channel_id(tmpctx,
|
2022-07-18 14:12:18 +02:00
|
|
|
&channel_id),
|
|
|
|
peer_wire_name(msgtype));
|
|
|
|
error = towire_errorfmt(tmpctx, &channel_id,
|
|
|
|
"Unknown channel for %s", peer_wire_name(msgtype));
|
2022-03-22 21:27:30 +01:00
|
|
|
|
2022-03-22 21:27:29 +01:00
|
|
|
send_error:
|
|
|
|
log_peer_debug(ld->log, &peer->id, "Telling connectd to send error %s",
|
|
|
|
tal_hex(tmpctx, error));
|
|
|
|
/* Get connectd to send error and close. */
|
|
|
|
subd_send_msg(ld->connectd,
|
2024-01-31 04:16:17 +01:00
|
|
|
take(towire_connectd_peer_send_msg(NULL, &peer->id,
|
|
|
|
peer->connectd_counter,
|
|
|
|
error)));
|
|
|
|
subd_send_msg(ld->connectd,
|
|
|
|
take(towire_connectd_discard_peer(NULL,
|
|
|
|
&peer->id,
|
|
|
|
peer->connectd_counter)));
|
2022-07-18 14:12:18 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
tell_connectd:
|
|
|
|
subd_send_msg(ld->connectd,
|
2022-07-18 14:12:27 +02:00
|
|
|
take(towire_connectd_peer_connect_subd(NULL, &id,
|
|
|
|
peer->connectd_counter,
|
|
|
|
&channel_id)));
|
2022-07-18 14:12:18 +02:00
|
|
|
subd_send_fd(ld->connectd, fds[1]);
|
2022-03-22 21:27:29 +01:00
|
|
|
}
|
|
|
|
|
2022-03-22 21:26:30 +01:00
|
|
|
struct disconnect_command {
|
|
|
|
struct list_node list;
|
|
|
|
/* Command structure. This is the parent of the close command. */
|
|
|
|
struct command *cmd;
|
|
|
|
/* node being disconnected. */
|
|
|
|
struct node_id id;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void destroy_disconnect_command(struct disconnect_command *dc)
|
|
|
|
{
|
|
|
|
list_del(&dc->list);
|
|
|
|
}
|
|
|
|
|
|
|
|
void peer_disconnect_done(struct lightningd *ld, const u8 *msg)
|
|
|
|
{
|
|
|
|
struct node_id id;
|
2022-07-18 14:12:27 +02:00
|
|
|
u64 connectd_counter;
|
2022-03-22 21:26:30 +01:00
|
|
|
struct disconnect_command *i, *next;
|
|
|
|
struct peer *p;
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
if (!fromwire_connectd_peer_disconnect_done(msg, &id, &connectd_counter))
|
2022-03-22 21:26:30 +01:00
|
|
|
fatal("Connectd gave bad PEER_DISCONNECT_DONE message %s",
|
|
|
|
tal_hex(msg, msg));
|
|
|
|
|
|
|
|
/* If we still have peer, it's disconnected now */
|
2022-07-18 14:12:27 +02:00
|
|
|
/* FIXME: We should keep peers until it tells us they're disconnected,
|
|
|
|
* and not free when no more channels. */
|
2022-03-22 21:26:30 +01:00
|
|
|
p = peer_by_id(ld, &id);
|
2022-03-22 21:27:29 +01:00
|
|
|
if (p) {
|
2024-01-31 04:16:19 +01:00
|
|
|
struct channel *channel;
|
2022-07-18 14:12:27 +02:00
|
|
|
assert(p->connectd_counter == connectd_counter);
|
2022-03-23 00:01:36 +01:00
|
|
|
log_peer_debug(ld->log, &id, "peer_disconnect_done");
|
2022-07-16 06:49:31 +02:00
|
|
|
p->connected = PEER_DISCONNECTED;
|
2024-01-31 04:16:19 +01:00
|
|
|
|
|
|
|
list_for_each(&p->channels, channel, list)
|
|
|
|
channel_gossip_channel_disconnect(channel);
|
2022-03-22 21:27:29 +01:00
|
|
|
}
|
2022-03-22 21:26:30 +01:00
|
|
|
|
2022-07-16 06:49:31 +02:00
|
|
|
/* If you were trying to connect, it failed. */
|
2022-07-18 14:12:28 +02:00
|
|
|
connect_failed_disconnect(ld, &id,
|
|
|
|
p && !p->connected_incoming ? &p->addr : NULL);
|
2022-07-16 06:49:31 +02:00
|
|
|
|
2022-03-22 21:26:30 +01:00
|
|
|
/* Fire off plugin notifications */
|
|
|
|
notify_disconnect(ld, &id);
|
|
|
|
|
2022-03-22 21:26:30 +01:00
|
|
|
/* Wake any disconnect commands (removes self from list) */
|
|
|
|
list_for_each_safe(&ld->disconnect_commands, i, next, list) {
|
|
|
|
if (!node_id_eq(&i->id, &id))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
was_pending(command_success(i->cmd,
|
|
|
|
json_stream_success(i->cmd)));
|
|
|
|
}
|
2022-07-18 14:12:28 +02:00
|
|
|
|
|
|
|
/* If connection was only thing keeping it, this will delete it. */
|
|
|
|
if (p)
|
|
|
|
maybe_delete_peer(p);
|
2022-03-22 21:26:30 +01:00
|
|
|
}
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
void update_channel_from_inflight(struct lightningd *ld,
|
|
|
|
struct channel *channel,
|
|
|
|
const struct channel_inflight *inflight)
|
2021-05-20 23:50:42 +02:00
|
|
|
{
|
|
|
|
struct wally_psbt *psbt_copy;
|
|
|
|
|
2021-10-13 05:45:36 +02:00
|
|
|
channel->funding = inflight->funding->outpoint;
|
|
|
|
channel->funding_sats = inflight->funding->total_funds;
|
2023-07-27 23:37:52 +02:00
|
|
|
|
2021-05-20 23:50:42 +02:00
|
|
|
channel->our_funds = inflight->funding->our_funds;
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
if (!amount_sat_add_sat_s64(&channel->our_funds, channel->our_funds,
|
|
|
|
inflight->funding->splice_amnt)) {
|
|
|
|
|
|
|
|
channel_fail_permanent(channel,
|
|
|
|
REASON_LOCAL,
|
|
|
|
"Updaing channel view for splice causes"
|
|
|
|
" an invalid satoshi amount wrapping,"
|
|
|
|
" channel: %s, initial funds: %s, splice"
|
|
|
|
" banace change: %s",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_channel_id(tmpctx,
|
2023-07-27 23:37:52 +02:00
|
|
|
&channel->cid),
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_amount_sat(tmpctx, channel->our_funds),
|
2023-07-27 23:37:52 +02:00
|
|
|
inflight->funding->splice_amnt);
|
|
|
|
}
|
|
|
|
|
2021-06-17 03:28:18 +02:00
|
|
|
/* Lease infos ! */
|
|
|
|
channel->lease_expiry = inflight->lease_expiry;
|
2021-12-08 18:42:07 +01:00
|
|
|
channel->push = inflight->lease_fee;
|
2021-06-17 03:28:18 +02:00
|
|
|
tal_free(channel->lease_commit_sig);
|
|
|
|
channel->lease_commit_sig
|
2022-01-30 04:37:30 +01:00
|
|
|
= tal_dup_or_null(channel, secp256k1_ecdsa_signature, inflight->lease_commit_sig);
|
2021-06-17 03:28:18 +02:00
|
|
|
channel->lease_chan_max_msat = inflight->lease_chan_max_msat;
|
|
|
|
channel->lease_chan_max_ppt = inflight->lease_chan_max_ppt;
|
|
|
|
|
2021-06-22 20:25:59 +02:00
|
|
|
tal_free(channel->blockheight_states);
|
|
|
|
channel->blockheight_states = new_height_states(channel,
|
|
|
|
channel->opener,
|
|
|
|
&inflight->lease_blockheight_start);
|
|
|
|
|
2021-05-20 23:50:42 +02:00
|
|
|
/* Make a 'clone' of this tx */
|
|
|
|
psbt_copy = clone_psbt(channel, inflight->last_tx->psbt);
|
|
|
|
channel_set_last_tx(channel,
|
|
|
|
bitcoin_tx_with_psbt(channel, psbt_copy),
|
2023-01-30 07:06:03 +01:00
|
|
|
&inflight->last_sig);
|
2021-05-20 23:50:42 +02:00
|
|
|
|
|
|
|
/* Update the reserve */
|
|
|
|
channel_update_reserve(channel,
|
|
|
|
&channel->channel_info.their_config,
|
|
|
|
inflight->funding->total_funds);
|
|
|
|
|
|
|
|
wallet_channel_save(ld->wallet, channel);
|
|
|
|
}
|
|
|
|
|
2019-02-26 17:57:19 +01:00
|
|
|
static enum watch_result funding_depth_cb(struct lightningd *ld,
|
2023-10-02 00:59:51 +02:00
|
|
|
const struct bitcoin_txid *txid,
|
|
|
|
const struct bitcoin_tx *tx,
|
|
|
|
unsigned int depth,
|
|
|
|
struct channel *channel)
|
2017-03-07 02:03:55 +01:00
|
|
|
{
|
2023-10-02 00:59:49 +02:00
|
|
|
/* This is stub channel, we don't activate anything! */
|
2024-03-20 02:59:51 +01:00
|
|
|
if (channel->scid && is_stub_scid(*channel->scid))
|
2023-10-02 00:59:49 +02:00
|
|
|
return DELETE_WATCH;
|
2017-03-07 02:05:03 +01:00
|
|
|
|
2023-10-02 00:59:52 +02:00
|
|
|
/* We only use this to watch the current funding tx */
|
|
|
|
assert(bitcoin_txid_eq(txid, &channel->funding.txid));
|
2019-06-28 03:58:31 +02:00
|
|
|
|
2023-10-02 00:59:49 +02:00
|
|
|
channel->depth = depth;
|
|
|
|
|
2018-02-12 11:13:04 +01:00
|
|
|
log_debug(channel->log, "Funding tx %s depth %u of %u",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_bitcoin_txid(tmpctx, txid),
|
2023-10-02 00:59:49 +02:00
|
|
|
depth, channel->minimum_depth);
|
|
|
|
|
|
|
|
/* Reorged out? */
|
|
|
|
if (depth == 0) {
|
|
|
|
/* That's not entirely unexpected in early states */
|
|
|
|
switch (channel->state) {
|
|
|
|
case DUALOPEND_AWAITING_LOCKIN:
|
|
|
|
case DUALOPEND_OPEN_INIT:
|
2023-10-31 01:38:11 +01:00
|
|
|
case DUALOPEND_OPEN_COMMIT_READY:
|
2023-10-02 00:59:51 +02:00
|
|
|
case DUALOPEND_OPEN_COMMITTED:
|
2023-10-02 00:59:52 +02:00
|
|
|
/* Shouldn't be here! */
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"Bad %s state: %s",
|
|
|
|
__func__,
|
|
|
|
channel_state_name(channel));
|
|
|
|
return DELETE_WATCH;
|
|
|
|
case CHANNELD_AWAITING_LOCKIN:
|
|
|
|
/* That's not entirely unexpected in early states */
|
2023-10-02 00:59:49 +02:00
|
|
|
log_debug(channel->log, "Funding tx %s reorganized out!",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_bitcoin_txid(tmpctx, txid));
|
2023-10-02 00:59:49 +02:00
|
|
|
channel->scid = tal_free(channel->scid);
|
|
|
|
return KEEP_WATCHING;
|
|
|
|
|
|
|
|
/* But it's often Bad News in later states */
|
2023-10-02 00:59:52 +02:00
|
|
|
case CHANNELD_AWAITING_SPLICE:
|
2023-10-02 00:59:49 +02:00
|
|
|
case CHANNELD_NORMAL:
|
|
|
|
/* If we opened, or it's zero-conf, we trust them anyway. */
|
|
|
|
if (channel->opener == LOCAL
|
|
|
|
|| channel->minimum_depth == 0) {
|
|
|
|
const char *str;
|
|
|
|
|
|
|
|
str = tal_fmt(tmpctx,
|
|
|
|
"Funding tx %s reorganized out, but %s...",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_bitcoin_txid(tmpctx, txid),
|
2023-10-02 00:59:49 +02:00
|
|
|
channel->opener == LOCAL ? "we opened it" : "zeroconf anyway");
|
|
|
|
|
|
|
|
/* Log even if not connected! */
|
|
|
|
if (!channel->owner)
|
|
|
|
log_info(channel->log, "%s", str);
|
|
|
|
channel_fail_transient(channel, true, "%s", str);
|
|
|
|
return KEEP_WATCHING;
|
|
|
|
}
|
|
|
|
/* fall thru */
|
|
|
|
case AWAITING_UNILATERAL:
|
|
|
|
case CHANNELD_SHUTTING_DOWN:
|
|
|
|
case CLOSINGD_SIGEXCHANGE:
|
|
|
|
case CLOSINGD_COMPLETE:
|
|
|
|
case FUNDING_SPEND_SEEN:
|
|
|
|
case ONCHAIN:
|
|
|
|
case CLOSED:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
channel_internal_error(channel,
|
|
|
|
"Funding transaction has been reorged out in state %s!",
|
|
|
|
channel_state_name(channel));
|
|
|
|
return KEEP_WATCHING;
|
|
|
|
}
|
|
|
|
|
2023-10-02 00:59:52 +02:00
|
|
|
if (!depthcb_update_scid(channel, txid, &channel->funding))
|
2023-10-02 00:59:49 +02:00
|
|
|
return DELETE_WATCH;
|
|
|
|
|
|
|
|
switch (channel->state) {
|
2023-10-02 00:59:52 +02:00
|
|
|
/* We should not be in the callback! */
|
|
|
|
case DUALOPEND_AWAITING_LOCKIN:
|
|
|
|
case DUALOPEND_OPEN_INIT:
|
2023-10-31 01:38:11 +01:00
|
|
|
case DUALOPEND_OPEN_COMMIT_READY:
|
2023-10-02 00:59:52 +02:00
|
|
|
case DUALOPEND_OPEN_COMMITTED:
|
|
|
|
abort();
|
|
|
|
|
2023-10-02 00:59:49 +02:00
|
|
|
case AWAITING_UNILATERAL:
|
|
|
|
case CHANNELD_SHUTTING_DOWN:
|
|
|
|
case CLOSINGD_SIGEXCHANGE:
|
|
|
|
case CLOSINGD_COMPLETE:
|
|
|
|
case FUNDING_SPEND_SEEN:
|
|
|
|
case ONCHAIN:
|
|
|
|
case CLOSED:
|
|
|
|
/* If not awaiting lockin/announce, it doesn't care any more */
|
|
|
|
log_debug(channel->log,
|
|
|
|
"Funding tx %s confirmed, but peer in state %s",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_bitcoin_txid(tmpctx, txid),
|
2023-10-02 00:59:49 +02:00
|
|
|
channel_state_name(channel));
|
|
|
|
return DELETE_WATCH;
|
2017-03-07 02:31:43 +01:00
|
|
|
|
2023-10-02 00:59:49 +02:00
|
|
|
case CHANNELD_AWAITING_LOCKIN:
|
2024-01-31 04:16:17 +01:00
|
|
|
/* This may be redundant, and may be public later, but
|
|
|
|
* make sure we tell gossipd at least once */
|
2023-10-02 00:59:52 +02:00
|
|
|
if (depth >= channel->minimum_depth
|
|
|
|
&& channel->remote_channel_ready) {
|
2023-10-02 00:59:49 +02:00
|
|
|
lockin_complete(channel, CHANNELD_AWAITING_LOCKIN);
|
2023-10-02 00:59:52 +02:00
|
|
|
}
|
|
|
|
/* Fall thru */
|
2023-10-02 00:59:49 +02:00
|
|
|
case CHANNELD_NORMAL:
|
2023-10-02 00:59:52 +02:00
|
|
|
case CHANNELD_AWAITING_SPLICE:
|
|
|
|
channeld_tell_depth(channel, txid, depth);
|
2024-01-31 04:16:17 +01:00
|
|
|
|
2023-10-02 00:59:52 +02:00
|
|
|
if (depth < ANNOUNCE_MIN_DEPTH || depth < channel->minimum_depth)
|
2023-10-02 00:59:49 +02:00
|
|
|
return KEEP_WATCHING;
|
|
|
|
/* Normal state and past announce depth? Stop bothering us! */
|
|
|
|
return DELETE_WATCH;
|
|
|
|
}
|
|
|
|
abort();
|
2017-03-07 02:03:55 +01:00
|
|
|
}
|
|
|
|
|
2018-04-16 13:20:45 +02:00
|
|
|
static enum watch_result funding_spent(struct channel *channel,
|
|
|
|
const struct bitcoin_tx *tx,
|
|
|
|
size_t inputnum UNUSED,
|
|
|
|
const struct block *block)
|
|
|
|
{
|
|
|
|
struct bitcoin_txid txid;
|
2023-07-27 23:37:52 +02:00
|
|
|
struct channel_inflight *inflight;
|
|
|
|
|
2018-04-16 13:20:45 +02:00
|
|
|
bitcoin_txid(tx, &txid);
|
|
|
|
|
2023-07-27 23:37:52 +02:00
|
|
|
/* If we're doing a splice, we expect the funding transaction to be
|
|
|
|
* spent, so don't freak out and just keep watching in that case */
|
|
|
|
list_for_each(&channel->inflights, inflight, list) {
|
|
|
|
if (bitcoin_txid_eq(&txid,
|
|
|
|
&inflight->funding->outpoint.txid)) {
|
2023-08-16 04:58:53 +02:00
|
|
|
/* splice_locked is a special flag that indicates this
|
|
|
|
* is a memory-only inflight acting as a race condition
|
|
|
|
* safeguard. When we see this, it is our responsability
|
|
|
|
* to clean up this memory-only inflight. */
|
|
|
|
if (inflight->splice_locked_memonly) {
|
|
|
|
tal_free(inflight);
|
|
|
|
return DELETE_WATCH;
|
|
|
|
}
|
2023-07-27 23:37:52 +02:00
|
|
|
return KEEP_WATCHING;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-16 04:58:53 +02:00
|
|
|
wallet_channeltxs_add(channel->peer->ld->wallet, channel,
|
|
|
|
WIRE_ONCHAIND_INIT, &txid, 0, block->height);
|
|
|
|
|
2021-12-01 17:24:31 +01:00
|
|
|
return onchaind_funding_spent(channel, tx, block->height);
|
2018-04-16 13:20:45 +02:00
|
|
|
}
|
|
|
|
|
2021-03-15 21:26:13 +01:00
|
|
|
void channel_watch_wrong_funding(struct lightningd *ld, struct channel *channel)
|
|
|
|
{
|
|
|
|
/* Watch the "wrong" funding too, in case we spend it. */
|
|
|
|
if (channel->shutdown_wrong_funding) {
|
|
|
|
watch_txo(channel, ld->topology, channel,
|
2021-10-13 05:45:36 +02:00
|
|
|
channel->shutdown_wrong_funding,
|
2021-03-15 21:26:13 +01:00
|
|
|
funding_spent);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-20 21:59:04 +01:00
|
|
|
void channel_watch_funding(struct lightningd *ld, struct channel *channel)
|
2017-02-24 06:52:56 +01:00
|
|
|
{
|
2023-02-01 17:37:59 +01:00
|
|
|
log_debug(channel->log, "Watching for funding txid: %s",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_bitcoin_txid(tmpctx, &channel->funding.txid));
|
2023-10-02 00:59:51 +02:00
|
|
|
watch_txid(channel, ld->topology,
|
|
|
|
&channel->funding.txid, funding_depth_cb, channel);
|
2023-10-02 00:59:52 +02:00
|
|
|
|
|
|
|
tal_free(channel->funding_spend_watch);
|
|
|
|
channel->funding_spend_watch = watch_txo(channel, ld->topology, channel,
|
|
|
|
&channel->funding,
|
|
|
|
funding_spent);
|
2021-03-15 21:26:13 +01:00
|
|
|
channel_watch_wrong_funding(ld, channel);
|
2017-02-24 06:52:56 +01:00
|
|
|
}
|
|
|
|
|
2018-08-09 02:25:29 +02:00
|
|
|
static void json_add_peer(struct lightningd *ld,
|
2018-10-19 03:17:49 +02:00
|
|
|
struct json_stream *response,
|
2018-08-09 02:25:29 +02:00
|
|
|
struct peer *p,
|
|
|
|
const enum log_level *ll)
|
2018-02-20 21:59:09 +01:00
|
|
|
{
|
2018-08-09 02:25:29 +02:00
|
|
|
struct channel *channel;
|
2023-02-07 10:31:38 +01:00
|
|
|
u32 num_channels;
|
2018-02-20 21:59:09 +01:00
|
|
|
|
|
|
|
json_object_start(response, NULL);
|
2019-04-08 11:58:32 +02:00
|
|
|
json_add_node_id(response, "id", &p->id);
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2022-07-16 06:49:31 +02:00
|
|
|
json_add_bool(response, "connected", p->connected == PEER_CONNECTED);
|
2023-02-07 10:31:38 +01:00
|
|
|
num_channels = 0;
|
|
|
|
list_for_each(&p->channels, channel, list)
|
|
|
|
num_channels++;
|
|
|
|
json_add_num(response, "num_channels", num_channels);
|
2018-06-22 01:52:57 +02:00
|
|
|
|
2022-07-16 06:49:31 +02:00
|
|
|
if (p->connected == PEER_CONNECTED) {
|
2018-08-09 02:25:29 +02:00
|
|
|
json_array_start(response, "netaddr");
|
2018-10-26 08:01:30 +02:00
|
|
|
json_add_string(response, NULL,
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_wireaddr_internal(tmpctx, &p->addr));
|
2018-02-20 21:59:09 +01:00
|
|
|
json_array_end(response);
|
2022-06-01 13:18:55 +02:00
|
|
|
/* If peer reports our IP remote_addr, add that here */
|
|
|
|
if (p->remote_addr)
|
|
|
|
json_add_string(response, "remote_addr",
|
|
|
|
fmt_wireaddr(response, p->remote_addr));
|
2018-02-20 21:59:09 +01:00
|
|
|
}
|
2023-06-05 18:17:36 +02:00
|
|
|
|
|
|
|
/* Note: If !PEER_CONNECTED, peer may use different features on reconnect */
|
|
|
|
json_add_hex_talarr(response, "features", p->their_features);
|
|
|
|
|
2018-08-09 02:25:29 +02:00
|
|
|
if (ll)
|
2019-11-18 01:27:15 +01:00
|
|
|
json_add_log(response, ld->log_book, &p->id, *ll);
|
2018-02-20 21:59:09 +01:00
|
|
|
json_object_end(response);
|
|
|
|
}
|
|
|
|
|
2018-12-16 05:52:06 +01:00
|
|
|
static struct command_result *json_listpeers(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
2018-02-20 21:59:09 +01:00
|
|
|
{
|
2018-08-09 02:25:29 +02:00
|
|
|
enum log_level *ll;
|
2019-04-08 11:58:32 +02:00
|
|
|
struct node_id *specific_id;
|
2018-08-09 02:25:29 +02:00
|
|
|
struct peer *peer;
|
2018-10-19 03:17:49 +02:00
|
|
|
struct json_stream *response;
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2018-07-20 03:14:02 +02:00
|
|
|
if (!param(cmd, buffer, params,
|
2019-04-08 11:58:32 +02:00
|
|
|
p_opt("id", param_node_id, &specific_id),
|
2018-12-16 05:50:06 +01:00
|
|
|
p_opt("level", param_loglevel, &ll),
|
2018-07-20 03:14:02 +02:00
|
|
|
NULL))
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_param_failed();
|
2018-02-20 21:59:09 +01:00
|
|
|
|
2018-10-19 03:17:48 +02:00
|
|
|
response = json_stream_success(cmd);
|
2018-08-09 02:25:29 +02:00
|
|
|
json_array_start(response, "peers");
|
|
|
|
if (specific_id) {
|
|
|
|
peer = peer_by_id(cmd->ld, specific_id);
|
|
|
|
if (peer)
|
|
|
|
json_add_peer(cmd->ld, response, peer, ll);
|
|
|
|
} else {
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
|
|
|
|
|
|
|
for (peer = peer_node_id_map_first(cmd->ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(cmd->ld->peers, &it)) {
|
2018-08-09 02:25:29 +02:00
|
|
|
json_add_peer(cmd->ld, response, peer, ll);
|
2023-01-18 06:04:32 +01:00
|
|
|
}
|
2018-08-09 02:25:29 +02:00
|
|
|
}
|
|
|
|
json_array_end(response);
|
2019-06-12 02:38:54 +02:00
|
|
|
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_success(cmd, response);
|
2018-02-20 21:59:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command listpeers_command = {
|
|
|
|
"listpeers",
|
|
|
|
json_listpeers,
|
|
|
|
};
|
2021-06-15 07:07:10 +02:00
|
|
|
/* Comment added to satisfice AUTODATA */
|
2018-02-20 21:59:09 +01:00
|
|
|
AUTODATA(json_command, &listpeers_command);
|
|
|
|
|
2022-06-24 02:51:07 +02:00
|
|
|
static void json_add_scb(struct command *cmd,
|
|
|
|
const char *fieldname,
|
|
|
|
struct json_stream *response,
|
|
|
|
struct channel *c)
|
|
|
|
{
|
|
|
|
u8 *scb = tal_arr(cmd, u8, 0);
|
|
|
|
|
|
|
|
towire_scb_chan(&scb, c->scb);
|
|
|
|
json_add_hex_talarr(response, fieldname,
|
|
|
|
scb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This will return a SCB for all the channels currently loaded
|
|
|
|
* in the in-memory channel */
|
|
|
|
static struct command_result *json_staticbackup(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
|
|
|
{
|
|
|
|
struct json_stream *response;
|
|
|
|
struct peer *peer;
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
2022-06-24 02:51:07 +02:00
|
|
|
|
|
|
|
if (!param(cmd, buffer, params, NULL))
|
2023-01-18 06:04:32 +01:00
|
|
|
return command_param_failed();
|
2022-06-24 02:51:07 +02:00
|
|
|
|
|
|
|
response = json_stream_success(cmd);
|
|
|
|
|
|
|
|
json_array_start(response, "scb");
|
2023-01-18 06:04:32 +01:00
|
|
|
for (peer = peer_node_id_map_first(cmd->ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(cmd->ld->peers, &it)) {
|
2024-03-25 01:37:52 +01:00
|
|
|
struct channel *channel;
|
2022-06-24 02:51:07 +02:00
|
|
|
list_for_each(&peer->channels, channel, list){
|
2024-03-25 01:37:52 +01:00
|
|
|
/* cppcheck-suppress uninitvar - false positive on channel */
|
2022-06-24 02:51:07 +02:00
|
|
|
if (!channel->scb)
|
|
|
|
continue;
|
|
|
|
json_add_scb(cmd, NULL, response, channel);
|
|
|
|
}
|
2023-01-18 06:04:32 +01:00
|
|
|
}
|
2022-06-24 02:51:07 +02:00
|
|
|
json_array_end(response);
|
|
|
|
|
|
|
|
return command_success(cmd, response);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command staticbackup_command = {
|
|
|
|
"staticbackup",
|
|
|
|
json_staticbackup,
|
|
|
|
};
|
|
|
|
/* Comment added to satisfice AUTODATA */
|
|
|
|
AUTODATA(json_command, &staticbackup_command);
|
|
|
|
|
2024-06-19 02:36:35 +02:00
|
|
|
static void json_add_peerchannels(struct command *cmd,
|
2023-01-12 02:16:10 +01:00
|
|
|
struct json_stream *response,
|
|
|
|
const struct peer *peer)
|
|
|
|
{
|
|
|
|
struct channel *channel;
|
|
|
|
|
2024-06-19 02:36:35 +02:00
|
|
|
json_add_uncommitted_channel(cmd, response, peer->uncommitted_channel, peer);
|
2023-01-12 02:16:10 +01:00
|
|
|
list_for_each(&peer->channels, channel, list) {
|
2023-10-02 00:59:51 +02:00
|
|
|
if (channel_state_uncommitted(channel->state))
|
2024-06-19 02:36:35 +02:00
|
|
|
json_add_unsaved_channel(cmd, response, channel, peer);
|
2023-01-12 02:16:10 +01:00
|
|
|
else
|
2024-06-19 02:36:35 +02:00
|
|
|
json_add_channel(cmd, response, NULL, channel, peer);
|
2023-01-12 02:16:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct command_result *json_listpeerchannels(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
|
|
|
{
|
|
|
|
struct node_id *peer_id;
|
|
|
|
struct peer *peer;
|
|
|
|
struct json_stream *response;
|
|
|
|
|
|
|
|
/* FIME: filter by status */
|
|
|
|
if (!param(cmd, buffer, params,
|
|
|
|
p_opt("id", param_node_id, &peer_id),
|
|
|
|
NULL))
|
|
|
|
return command_param_failed();
|
|
|
|
|
|
|
|
response = json_stream_success(cmd);
|
|
|
|
json_array_start(response, "channels");
|
|
|
|
|
|
|
|
if (peer_id) {
|
|
|
|
peer = peer_by_id(cmd->ld, peer_id);
|
|
|
|
if (peer)
|
2024-06-19 02:36:35 +02:00
|
|
|
json_add_peerchannels(cmd, response, peer);
|
2023-01-12 02:16:10 +01:00
|
|
|
} else {
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
|
|
|
|
|
|
|
for (peer = peer_node_id_map_first(cmd->ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(cmd->ld->peers, &it)) {
|
2024-06-19 02:36:35 +02:00
|
|
|
json_add_peerchannels(cmd, response, peer);
|
2023-01-18 06:04:32 +01:00
|
|
|
}
|
2023-01-12 02:16:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
json_array_end(response);
|
|
|
|
|
|
|
|
return command_success(cmd, response);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command listpeerchannels_command = {
|
|
|
|
"listpeerchannels",
|
|
|
|
json_listpeerchannels,
|
|
|
|
};
|
|
|
|
AUTODATA(json_command, &listpeerchannels_command);
|
2022-06-24 02:51:07 +02:00
|
|
|
|
2021-09-15 02:59:23 +02:00
|
|
|
struct command_result *
|
2018-04-30 14:54:39 +02:00
|
|
|
command_find_channel(struct command *cmd,
|
2023-10-02 00:59:49 +02:00
|
|
|
const char *name,
|
2018-12-16 05:53:06 +01:00
|
|
|
const char *buffer, const jsmntok_t *tok,
|
2019-03-09 21:29:39 +01:00
|
|
|
struct channel **channel)
|
2018-04-30 14:54:39 +02:00
|
|
|
{
|
|
|
|
struct lightningd *ld = cmd->ld;
|
|
|
|
struct channel_id cid;
|
|
|
|
struct short_channel_id scid;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (json_tok_channel_id(buffer, tok, &cid)) {
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
|
|
|
|
|
|
|
for (peer = peer_node_id_map_first(ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(ld->peers, &it)) {
|
2022-03-22 21:30:54 +01:00
|
|
|
list_for_each(&peer->channels, (*channel), list) {
|
2023-10-02 00:59:51 +02:00
|
|
|
if (!channel_state_wants_peercomms((*channel)->state))
|
2022-03-22 21:30:54 +01:00
|
|
|
continue;
|
|
|
|
if (channel_id_eq(&(*channel)->cid, &cid))
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-04-30 14:54:39 +02:00
|
|
|
}
|
2023-10-02 00:59:49 +02:00
|
|
|
return command_fail_badparam(cmd, name, buffer, tok,
|
|
|
|
"Channel id not found");
|
2019-09-06 08:41:41 +02:00
|
|
|
} else if (json_to_short_channel_id(buffer, tok, &scid)) {
|
2024-03-20 02:59:51 +01:00
|
|
|
*channel = any_channel_by_scid(ld, scid, true);
|
2022-03-22 23:59:20 +01:00
|
|
|
if (!*channel)
|
2023-10-02 00:59:49 +02:00
|
|
|
return command_fail_badparam(cmd, name, buffer, tok,
|
|
|
|
"Short channel id not found");
|
2022-03-22 23:59:20 +01:00
|
|
|
return NULL;
|
2018-04-30 14:54:39 +02:00
|
|
|
} else {
|
2023-10-02 00:59:49 +02:00
|
|
|
return command_fail_badparam(cmd, name, buffer, tok,
|
2020-08-25 23:20:50 +02:00
|
|
|
"should be a channel ID or short channel ID");
|
2018-04-30 14:54:39 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-22 21:30:59 +01:00
|
|
|
static void setup_peer(struct peer *peer, u32 delay)
|
2018-01-03 06:26:44 +01:00
|
|
|
{
|
2018-02-12 11:13:04 +01:00
|
|
|
struct channel *channel;
|
2021-05-20 23:53:48 +02:00
|
|
|
struct channel_inflight *inflight;
|
2018-02-12 11:13:04 +01:00
|
|
|
struct lightningd *ld = peer->ld;
|
2022-03-22 21:30:59 +01:00
|
|
|
bool connect = false;
|
2018-01-03 06:26:44 +01:00
|
|
|
|
2018-02-12 11:13:04 +01:00
|
|
|
list_for_each(&peer->channels, channel, list) {
|
2023-10-02 00:59:51 +02:00
|
|
|
switch (channel->state) {
|
|
|
|
case DUALOPEND_OPEN_INIT:
|
2023-10-31 01:38:11 +01:00
|
|
|
case DUALOPEND_OPEN_COMMIT_READY:
|
2023-10-02 00:59:51 +02:00
|
|
|
case DUALOPEND_OPEN_COMMITTED:
|
|
|
|
/* Nothing to watch */
|
2021-01-22 01:55:23 +01:00
|
|
|
continue;
|
2021-05-20 23:53:48 +02:00
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
/* Normal cases where we watch funding */
|
|
|
|
case CHANNELD_AWAITING_LOCKIN:
|
|
|
|
case CHANNELD_NORMAL:
|
|
|
|
case CHANNELD_SHUTTING_DOWN:
|
|
|
|
case CLOSINGD_SIGEXCHANGE:
|
|
|
|
/* We still want to watch spend, to tell onchaind: */
|
|
|
|
case CLOSINGD_COMPLETE:
|
|
|
|
case AWAITING_UNILATERAL:
|
|
|
|
case FUNDING_SPEND_SEEN:
|
|
|
|
case ONCHAIN:
|
|
|
|
case CLOSED:
|
|
|
|
channel_watch_funding(ld, channel);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* We need to watch all inflights which may open channel */
|
|
|
|
case DUALOPEND_AWAITING_LOCKIN:
|
|
|
|
list_for_each(&channel->inflights, inflight, list)
|
|
|
|
watch_opening_inflight(ld, inflight);
|
|
|
|
break;
|
2021-05-20 23:53:48 +02:00
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
/* We need to watch all inflights which may splice */
|
|
|
|
case CHANNELD_AWAITING_SPLICE:
|
|
|
|
list_for_each(&channel->inflights, inflight, list)
|
|
|
|
watch_splice_inflight(ld, inflight);
|
|
|
|
break;
|
2021-05-20 23:53:48 +02:00
|
|
|
}
|
2023-10-02 00:59:51 +02:00
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
if (channel_state_wants_peercomms(channel->state))
|
2022-03-22 21:30:59 +01:00
|
|
|
connect = true;
|
2018-01-03 06:26:44 +01:00
|
|
|
}
|
2022-03-22 21:30:59 +01:00
|
|
|
|
|
|
|
/* Make sure connectd knows to try reconnecting. */
|
2022-07-28 03:30:36 +02:00
|
|
|
if (connect) {
|
2024-01-31 04:16:19 +01:00
|
|
|
ld->num_startup_connects++;
|
|
|
|
|
2022-07-28 03:30:36 +02:00
|
|
|
/* To delay, make it seem like we just connected. */
|
|
|
|
if (delay > 0) {
|
|
|
|
peer->reconnect_delay = delay;
|
|
|
|
peer->last_connect_attempt = time_now();
|
|
|
|
}
|
|
|
|
try_reconnect(peer, peer, &peer->addr);
|
|
|
|
}
|
2018-01-03 06:26:44 +01:00
|
|
|
}
|
|
|
|
|
2022-03-22 21:30:59 +01:00
|
|
|
void setup_peers(struct lightningd *ld)
|
2018-01-03 06:26:44 +01:00
|
|
|
{
|
|
|
|
struct peer *p;
|
2019-08-01 03:10:11 +02:00
|
|
|
/* Avoid thundering herd: after first five, delay by 1 second. */
|
|
|
|
int delay = -5;
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
2018-01-03 06:26:44 +01:00
|
|
|
|
2023-01-18 06:04:32 +01:00
|
|
|
for (p = peer_node_id_map_first(ld->peers, &it);
|
|
|
|
p;
|
|
|
|
p = peer_node_id_map_next(ld->peers, &it)) {
|
2022-03-22 21:30:59 +01:00
|
|
|
setup_peer(p, delay > 0 ? delay : 0);
|
2019-08-01 03:10:11 +02:00
|
|
|
delay++;
|
|
|
|
}
|
2024-01-31 04:16:19 +01:00
|
|
|
|
|
|
|
/* In case there are no peers at all to connect to */
|
|
|
|
if (ld->num_startup_connects == 0)
|
|
|
|
channel_gossip_startup_done(ld);
|
2018-01-03 06:26:44 +01:00
|
|
|
}
|
|
|
|
|
2018-09-03 03:08:53 +02:00
|
|
|
/* Pull peers, channels and HTLCs from db, and wire them up. */
|
2024-05-09 05:29:58 +02:00
|
|
|
struct htlc_in_map *load_channels_from_wallet(struct lightningd *ld,
|
|
|
|
size_t *num_channels)
|
2018-09-03 03:08:53 +02:00
|
|
|
{
|
|
|
|
struct peer *peer;
|
2019-12-12 00:39:10 +01:00
|
|
|
struct htlc_in_map *unconnected_htlcs_in = tal(ld, struct htlc_in_map);
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
2018-09-03 03:08:53 +02:00
|
|
|
|
2019-08-09 18:01:31 +02:00
|
|
|
/* Load channels from database */
|
|
|
|
if (!wallet_init_channels(ld->wallet))
|
2018-09-03 03:08:53 +02:00
|
|
|
fatal("Could not load channels from the database");
|
|
|
|
|
2024-05-09 05:29:58 +02:00
|
|
|
*num_channels = 0;
|
2019-12-12 00:39:10 +01:00
|
|
|
/* First we load the incoming htlcs */
|
2023-01-18 06:04:32 +01:00
|
|
|
for (peer = peer_node_id_map_first(ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(ld->peers, &it)) {
|
2018-09-03 03:08:53 +02:00
|
|
|
struct channel *channel;
|
|
|
|
|
|
|
|
list_for_each(&peer->channels, channel, list) {
|
2019-12-12 00:39:10 +01:00
|
|
|
if (!wallet_htlcs_load_in_for_channel(ld->wallet,
|
|
|
|
channel,
|
2023-01-03 05:46:52 +01:00
|
|
|
ld->htlcs_in)) {
|
2018-09-03 03:08:53 +02:00
|
|
|
fatal("could not load htlcs for channel");
|
|
|
|
}
|
2024-05-09 05:29:58 +02:00
|
|
|
(*num_channels)++;
|
2018-09-03 03:08:53 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-12 00:39:10 +01:00
|
|
|
/* Make a copy of the htlc_map: entries removed as they're matched */
|
2023-01-03 05:46:52 +01:00
|
|
|
htlc_in_map_copy(unconnected_htlcs_in, ld->htlcs_in);
|
2019-12-12 00:39:10 +01:00
|
|
|
|
|
|
|
/* Now we load the outgoing HTLCs, so we can connect them. */
|
2023-01-18 06:04:32 +01:00
|
|
|
for (peer = peer_node_id_map_first(ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(ld->peers, &it)) {
|
2019-12-12 00:39:10 +01:00
|
|
|
struct channel *channel;
|
|
|
|
|
|
|
|
list_for_each(&peer->channels, channel, list) {
|
|
|
|
if (!wallet_htlcs_load_out_for_channel(ld->wallet,
|
|
|
|
channel,
|
2023-01-03 05:46:52 +01:00
|
|
|
ld->htlcs_out,
|
2019-12-12 00:39:10 +01:00
|
|
|
unconnected_htlcs_in)) {
|
|
|
|
fatal("could not load outgoing htlcs for channel");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_V061
|
|
|
|
fixup_htlcs_out(ld);
|
|
|
|
#endif /* COMPAT_V061 */
|
|
|
|
|
|
|
|
return unconnected_htlcs_in;
|
2018-09-03 03:08:53 +02:00
|
|
|
}
|
|
|
|
|
2023-10-02 00:59:49 +02:00
|
|
|
static struct command_result *param_peer(struct command *cmd,
|
|
|
|
const char *name,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *tok,
|
|
|
|
struct peer **peer)
|
|
|
|
{
|
|
|
|
struct node_id peerid;
|
|
|
|
|
|
|
|
if (!json_to_node_id(buffer, tok, &peerid))
|
|
|
|
return command_fail_badparam(cmd, name, buffer, tok,
|
|
|
|
"invalid peer id");
|
|
|
|
*peer = peer_by_id(cmd->ld, &peerid);
|
|
|
|
if (!*peer)
|
|
|
|
return command_fail(cmd, JSONRPC2_INVALID_PARAMS,
|
|
|
|
"Unknown peer '%.*s'",
|
|
|
|
tok->end - tok->start,
|
|
|
|
buffer + tok->start);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-12-16 05:52:06 +01:00
|
|
|
static struct command_result *json_disconnect(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
2018-03-05 17:16:20 +01:00
|
|
|
{
|
2022-03-22 21:26:30 +01:00
|
|
|
struct disconnect_command *dc;
|
2018-08-09 02:25:29 +02:00
|
|
|
struct peer *peer;
|
2022-07-18 14:12:28 +02:00
|
|
|
struct channel *channel;
|
2018-12-10 02:03:42 +01:00
|
|
|
bool *force;
|
2018-03-05 17:16:20 +01:00
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (!param_check(cmd, buffer, params,
|
|
|
|
p_req("id", param_peer, &peer),
|
|
|
|
p_opt_def("force", param_bool, &force, false),
|
|
|
|
NULL))
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_param_failed();
|
2018-03-05 17:16:20 +01:00
|
|
|
|
2022-07-16 06:49:31 +02:00
|
|
|
if (peer->connected == PEER_DISCONNECTED) {
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_fail(cmd, LIGHTNINGD, "Peer not connected");
|
2018-08-09 02:25:29 +02:00
|
|
|
}
|
2022-03-22 23:59:20 +01:00
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
channel = peer_any_channel(peer, channel_state_wants_peercomms, NULL);
|
2022-03-22 23:59:20 +01:00
|
|
|
if (channel && !*force) {
|
|
|
|
return command_fail(cmd, LIGHTNINGD,
|
|
|
|
"Peer has (at least one) channel in state %s",
|
2018-12-16 05:52:06 +01:00
|
|
|
channel_state_name(channel));
|
2018-08-09 02:25:29 +02:00
|
|
|
}
|
2022-03-22 23:59:20 +01:00
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (command_check_only(cmd))
|
|
|
|
return command_check_done(cmd);
|
|
|
|
|
2023-10-22 06:07:31 +02:00
|
|
|
force_peer_disconnect(cmd->ld, peer, "disconnect command");
|
2022-03-22 21:26:30 +01:00
|
|
|
|
|
|
|
/* Connectd tells us when it's finally disconnected */
|
|
|
|
dc = tal(cmd, struct disconnect_command);
|
|
|
|
dc->cmd = cmd;
|
2023-10-02 00:59:49 +02:00
|
|
|
dc->id = peer->id;
|
2022-03-22 21:26:30 +01:00
|
|
|
list_add_tail(&cmd->ld->disconnect_commands, &dc->list);
|
|
|
|
tal_add_destructor(dc, destroy_disconnect_command);
|
|
|
|
|
|
|
|
return command_still_pending(cmd);
|
2018-03-05 17:16:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command disconnect_command = {
|
|
|
|
"disconnect",
|
|
|
|
json_disconnect,
|
|
|
|
};
|
|
|
|
AUTODATA(json_command, &disconnect_command);
|
|
|
|
|
2018-12-16 05:52:06 +01:00
|
|
|
static struct command_result *json_getinfo(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
2018-11-15 15:00:34 +01:00
|
|
|
{
|
2022-09-11 11:08:22 +02:00
|
|
|
struct json_stream *response;
|
|
|
|
struct peer *peer;
|
|
|
|
struct channel *channel;
|
|
|
|
unsigned int pending_channels = 0, active_channels = 0,
|
|
|
|
inactive_channels = 0, num_peers = 0;
|
|
|
|
size_t count_announceable;
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
2022-09-11 11:08:22 +02:00
|
|
|
|
|
|
|
if (!param(cmd, buffer, params, NULL))
|
|
|
|
return command_param_failed();
|
|
|
|
|
|
|
|
response = json_stream_success(cmd);
|
2024-07-18 03:25:55 +02:00
|
|
|
json_add_node_id(response, "id", &cmd->ld->our_nodeid);
|
2022-09-11 11:08:22 +02:00
|
|
|
json_add_string(response, "alias", (const char *)cmd->ld->alias);
|
|
|
|
json_add_hex_talarr(response, "color", cmd->ld->rgb);
|
|
|
|
|
|
|
|
/* Add some peer and channel stats */
|
2023-01-18 06:04:32 +01:00
|
|
|
for (peer = peer_node_id_map_first(cmd->ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(cmd->ld->peers, &it)) {
|
2022-09-11 11:08:22 +02:00
|
|
|
num_peers++;
|
|
|
|
|
|
|
|
list_for_each(&peer->channels, channel, list) {
|
2023-10-02 00:59:49 +02:00
|
|
|
switch (channel->state) {
|
|
|
|
case CHANNELD_AWAITING_LOCKIN:
|
|
|
|
case DUALOPEND_OPEN_INIT:
|
2023-10-31 01:38:11 +01:00
|
|
|
case DUALOPEND_OPEN_COMMIT_READY:
|
2023-10-02 00:59:51 +02:00
|
|
|
case DUALOPEND_OPEN_COMMITTED:
|
2023-10-02 00:59:49 +02:00
|
|
|
case DUALOPEND_AWAITING_LOCKIN:
|
2022-09-11 11:08:22 +02:00
|
|
|
pending_channels++;
|
2023-10-02 00:59:49 +02:00
|
|
|
continue;
|
|
|
|
case CHANNELD_AWAITING_SPLICE:
|
|
|
|
case CHANNELD_SHUTTING_DOWN:
|
|
|
|
case CHANNELD_NORMAL:
|
|
|
|
case CLOSINGD_SIGEXCHANGE:
|
2022-09-11 11:08:22 +02:00
|
|
|
active_channels++;
|
2023-10-02 00:59:49 +02:00
|
|
|
continue;
|
|
|
|
case CLOSINGD_COMPLETE:
|
|
|
|
case AWAITING_UNILATERAL:
|
|
|
|
case FUNDING_SPEND_SEEN:
|
|
|
|
case ONCHAIN:
|
|
|
|
case CLOSED:
|
2022-09-11 11:08:22 +02:00
|
|
|
inactive_channels++;
|
2023-10-02 00:59:49 +02:00
|
|
|
continue;
|
2022-09-11 11:08:22 +02:00
|
|
|
}
|
2023-10-02 00:59:49 +02:00
|
|
|
abort();
|
2022-09-11 11:08:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
json_add_num(response, "num_peers", num_peers);
|
|
|
|
json_add_num(response, "num_pending_channels", pending_channels);
|
|
|
|
json_add_num(response, "num_active_channels", active_channels);
|
|
|
|
json_add_num(response, "num_inactive_channels", inactive_channels);
|
|
|
|
|
|
|
|
/* Add network info */
|
2023-01-31 00:24:36 +01:00
|
|
|
json_array_start(response, "address");
|
2022-09-11 11:08:22 +02:00
|
|
|
if (cmd->ld->listen) {
|
|
|
|
/* These are the addresses we're announcing */
|
|
|
|
count_announceable = tal_count(cmd->ld->announceable);
|
|
|
|
for (size_t i = 0; i < count_announceable; i++)
|
|
|
|
json_add_address(response, NULL, cmd->ld->announceable+i);
|
|
|
|
|
2022-12-21 17:32:13 +01:00
|
|
|
/* Add discovered IPs if we announce them.
|
|
|
|
* Also see `create_node_announcement` in `gossip_generation.c`. */
|
|
|
|
if ((cmd->ld->config.ip_discovery == OPT_AUTOBOOL_AUTO && count_announceable == 0) ||
|
|
|
|
cmd->ld->config.ip_discovery == OPT_AUTOBOOL_TRUE) {
|
2022-09-12 15:47:44 +02:00
|
|
|
if (cmd->ld->discovered_ip_v4 != NULL &&
|
2022-09-11 11:08:22 +02:00
|
|
|
!wireaddr_arr_contains(
|
|
|
|
cmd->ld->announceable,
|
2022-09-12 15:47:44 +02:00
|
|
|
cmd->ld->discovered_ip_v4))
|
2022-09-11 11:08:22 +02:00
|
|
|
json_add_address(response, NULL,
|
2022-09-12 15:47:44 +02:00
|
|
|
cmd->ld->discovered_ip_v4);
|
|
|
|
if (cmd->ld->discovered_ip_v6 != NULL &&
|
2022-09-11 11:08:22 +02:00
|
|
|
!wireaddr_arr_contains(
|
|
|
|
cmd->ld->announceable,
|
2022-09-12 15:47:44 +02:00
|
|
|
cmd->ld->discovered_ip_v6))
|
2022-09-11 11:08:22 +02:00
|
|
|
json_add_address(response, NULL,
|
2022-09-12 15:47:44 +02:00
|
|
|
cmd->ld->discovered_ip_v6);
|
2022-09-11 11:08:22 +02:00
|
|
|
}
|
|
|
|
json_array_end(response);
|
2018-11-15 15:00:34 +01:00
|
|
|
|
2022-09-11 11:08:22 +02:00
|
|
|
/* This is what we're actually bound to. */
|
|
|
|
json_array_start(response, "binding");
|
|
|
|
for (size_t i = 0; i < tal_count(cmd->ld->binding); i++)
|
|
|
|
json_add_address_internal(response, NULL,
|
|
|
|
cmd->ld->binding+i);
|
2022-09-11 11:01:42 +02:00
|
|
|
}
|
2023-01-31 00:24:36 +01:00
|
|
|
json_array_end(response);
|
|
|
|
|
2022-09-11 11:08:22 +02:00
|
|
|
json_add_string(response, "version", version());
|
2023-02-06 13:11:47 +01:00
|
|
|
/* If we're still syncing, put the height we're up to here, so
|
|
|
|
* they can see progress! Otherwise use the height gossipd knows
|
|
|
|
* about, so tests work properly. */
|
|
|
|
if (!topology_synced(cmd->ld->topology)) {
|
|
|
|
json_add_num(response, "blockheight",
|
|
|
|
get_block_height(cmd->ld->topology));
|
|
|
|
} else {
|
|
|
|
json_add_num(response, "blockheight",
|
|
|
|
cmd->ld->gossip_blockheight);
|
|
|
|
}
|
2022-09-11 11:08:22 +02:00
|
|
|
json_add_string(response, "network", chainparams->network_name);
|
2023-03-14 06:19:50 +01:00
|
|
|
json_add_amount_msat(response,
|
|
|
|
"fees_collected_msat",
|
|
|
|
wallet_total_forward_fees(cmd->ld->wallet));
|
2022-09-11 11:08:22 +02:00
|
|
|
json_add_string(response, "lightning-dir", cmd->ld->config_netdir);
|
|
|
|
|
|
|
|
if (!cmd->ld->topology->bitcoind->synced)
|
|
|
|
json_add_string(response, "warning_bitcoind_sync",
|
|
|
|
"Bitcoind is not up-to-date with network.");
|
|
|
|
else if (!topology_synced(cmd->ld->topology))
|
|
|
|
json_add_string(response, "warning_lightningd_sync",
|
|
|
|
"Still loading latest blocks from bitcoind.");
|
|
|
|
|
|
|
|
u8 **bits = cmd->ld->our_features->bits;
|
|
|
|
json_object_start(response, "our_features");
|
|
|
|
json_add_hex_talarr(response, "init",
|
|
|
|
featurebits_or(cmd,
|
|
|
|
bits[INIT_FEATURE],
|
|
|
|
bits[GLOBAL_INIT_FEATURE]));
|
|
|
|
json_add_hex_talarr(response, "node", bits[NODE_ANNOUNCE_FEATURE]);
|
|
|
|
json_add_hex_talarr(response, "channel", bits[CHANNEL_FEATURE]);
|
|
|
|
json_add_hex_talarr(response, "invoice", bits[BOLT11_FEATURE]);
|
|
|
|
json_object_end(response);
|
|
|
|
|
|
|
|
return command_success(cmd, response);
|
2018-11-15 15:00:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command getinfo_command = {
|
|
|
|
"getinfo",
|
|
|
|
json_getinfo,
|
|
|
|
};
|
|
|
|
AUTODATA(json_command, &getinfo_command);
|
|
|
|
|
2019-12-26 11:19:09 +01:00
|
|
|
/* Wait for at least a specific blockheight, then return, or time out. */
|
|
|
|
struct waitblockheight_waiter {
|
|
|
|
/* struct lightningd::waitblockheight_commands. */
|
|
|
|
struct list_node list;
|
|
|
|
/* Command structure. This is the parent of the close command. */
|
|
|
|
struct command *cmd;
|
|
|
|
/* The block height being waited for. */
|
|
|
|
u32 block_height;
|
|
|
|
/* Whether we have been removed from the list. */
|
|
|
|
bool removed;
|
|
|
|
};
|
|
|
|
/* Completes a pending waitblockheight. */
|
|
|
|
static struct command_result *
|
|
|
|
waitblockheight_complete(struct command *cmd,
|
|
|
|
u32 block_height)
|
|
|
|
{
|
|
|
|
struct json_stream *response;
|
|
|
|
|
|
|
|
response = json_stream_success(cmd);
|
|
|
|
json_add_num(response, "blockheight", block_height);
|
|
|
|
return command_success(cmd, response);
|
|
|
|
}
|
|
|
|
/* Called when command is destroyed without being resolved. */
|
|
|
|
static void
|
|
|
|
destroy_waitblockheight_waiter(struct waitblockheight_waiter *w)
|
|
|
|
{
|
|
|
|
if (!w->removed)
|
|
|
|
list_del(&w->list);
|
|
|
|
}
|
|
|
|
/* Called on timeout. */
|
|
|
|
static void
|
|
|
|
timeout_waitblockheight_waiter(struct waitblockheight_waiter *w)
|
|
|
|
{
|
|
|
|
list_del(&w->list);
|
|
|
|
w->removed = true;
|
|
|
|
tal_steal(tmpctx, w);
|
2021-01-06 16:22:34 +01:00
|
|
|
was_pending(command_fail(w->cmd, WAIT_TIMEOUT,
|
2019-12-26 11:19:09 +01:00
|
|
|
"Timed out."));
|
|
|
|
}
|
|
|
|
/* Called by lightningd at each new block. */
|
|
|
|
void waitblockheight_notify_new_block(struct lightningd *ld,
|
|
|
|
u32 block_height)
|
|
|
|
{
|
|
|
|
struct waitblockheight_waiter *w, *n;
|
|
|
|
char *to_delete = tal(NULL, char);
|
|
|
|
|
|
|
|
/* Use safe since we could resolve commands and thus
|
|
|
|
* trigger removal of list elements.
|
|
|
|
*/
|
|
|
|
list_for_each_safe(&ld->waitblockheight_commands, w, n, list) {
|
|
|
|
/* Skip commands that have not been reached yet. */
|
|
|
|
if (w->block_height > block_height)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
list_del(&w->list);
|
|
|
|
w->removed = true;
|
|
|
|
tal_steal(to_delete, w);
|
|
|
|
was_pending(waitblockheight_complete(w->cmd,
|
|
|
|
block_height));
|
|
|
|
}
|
|
|
|
tal_free(to_delete);
|
|
|
|
}
|
|
|
|
static struct command_result *json_waitblockheight(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj,
|
|
|
|
const jsmntok_t *params)
|
|
|
|
{
|
|
|
|
unsigned int *target_block_height;
|
|
|
|
u32 block_height;
|
|
|
|
unsigned int *timeout;
|
|
|
|
struct waitblockheight_waiter *w;
|
|
|
|
|
|
|
|
if (!param(cmd, buffer, params,
|
|
|
|
p_req("blockheight", param_number, &target_block_height),
|
|
|
|
p_opt_def("timeout", param_number, &timeout, 60),
|
|
|
|
NULL))
|
|
|
|
return command_param_failed();
|
|
|
|
|
|
|
|
/* Check if already reached anyway. */
|
|
|
|
block_height = get_block_height(cmd->ld->topology);
|
|
|
|
if (*target_block_height <= block_height)
|
|
|
|
return waitblockheight_complete(cmd, block_height);
|
|
|
|
|
|
|
|
/* Create a new waitblockheight command. */
|
|
|
|
w = tal(cmd, struct waitblockheight_waiter);
|
|
|
|
tal_add_destructor(w, &destroy_waitblockheight_waiter);
|
|
|
|
list_add(&cmd->ld->waitblockheight_commands, &w->list);
|
|
|
|
w->cmd = cmd;
|
|
|
|
w->block_height = *target_block_height;
|
|
|
|
w->removed = false;
|
|
|
|
/* Install the timeout. */
|
|
|
|
(void) new_reltimer(cmd->ld->timers, w, time_from_sec(*timeout),
|
|
|
|
&timeout_waitblockheight_waiter, w);
|
|
|
|
|
|
|
|
return command_still_pending(cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command waitblockheight_command = {
|
|
|
|
"waitblockheight",
|
|
|
|
&json_waitblockheight,
|
|
|
|
};
|
|
|
|
AUTODATA(json_command, &waitblockheight_command);
|
|
|
|
|
2023-10-02 00:59:49 +02:00
|
|
|
static bool channel_state_can_setchannel(enum channel_state state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case CHANNELD_NORMAL:
|
|
|
|
case CHANNELD_AWAITING_SPLICE:
|
|
|
|
case CHANNELD_AWAITING_LOCKIN:
|
|
|
|
case DUALOPEND_AWAITING_LOCKIN:
|
|
|
|
return true;
|
|
|
|
case DUALOPEND_OPEN_INIT:
|
2023-10-31 01:38:11 +01:00
|
|
|
case DUALOPEND_OPEN_COMMIT_READY:
|
2023-10-02 00:59:51 +02:00
|
|
|
case DUALOPEND_OPEN_COMMITTED:
|
2023-10-02 00:59:49 +02:00
|
|
|
case CLOSINGD_SIGEXCHANGE:
|
|
|
|
case CHANNELD_SHUTTING_DOWN:
|
|
|
|
case CLOSINGD_COMPLETE:
|
|
|
|
case AWAITING_UNILATERAL:
|
|
|
|
case FUNDING_SPEND_SEEN:
|
|
|
|
case ONCHAIN:
|
|
|
|
case CLOSED:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2019-03-16 13:47:40 +01:00
|
|
|
static struct command_result *param_channel_or_all(struct command *cmd,
|
|
|
|
const char *name,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *tok,
|
2022-03-22 23:59:18 +01:00
|
|
|
struct channel ***channels)
|
2019-03-16 13:47:40 +01:00
|
|
|
{
|
|
|
|
struct command_result *res;
|
|
|
|
struct peer *peer;
|
2023-10-02 00:59:49 +02:00
|
|
|
struct channel *channel;
|
|
|
|
|
|
|
|
*channels = tal_arr(cmd, struct channel *, 0);
|
2019-03-16 13:47:40 +01:00
|
|
|
|
|
|
|
/* early return the easy case */
|
|
|
|
if (json_tok_streq(buffer, tok, "all")) {
|
2023-10-02 00:59:49 +02:00
|
|
|
*channels = tal_free(*channels);
|
2019-03-16 13:47:40 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-03-22 23:59:18 +01:00
|
|
|
/* Find channels by peer_id */
|
2019-03-16 13:47:40 +01:00
|
|
|
peer = peer_from_json(cmd->ld, buffer, tok);
|
|
|
|
if (peer) {
|
2022-03-22 23:59:18 +01:00
|
|
|
list_for_each(&peer->channels, channel, list) {
|
2023-10-02 00:59:49 +02:00
|
|
|
if (channel_state_can_setchannel(channel->state))
|
|
|
|
tal_arr_expand(channels, channel);
|
2022-03-22 23:59:18 +01:00
|
|
|
}
|
|
|
|
if (tal_count(*channels) == 0)
|
2019-03-16 13:47:40 +01:00
|
|
|
return command_fail(cmd, LIGHTNINGD,
|
2022-03-22 23:59:18 +01:00
|
|
|
"Could not find any active channels of peer with that id");
|
2019-03-16 13:47:40 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
2023-10-02 00:59:49 +02:00
|
|
|
|
|
|
|
/* Find channel by id or scid */
|
|
|
|
res = command_find_channel(cmd, name, buffer, tok, &channel);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
/* check channel is found and in valid state */
|
|
|
|
if (!channel_state_can_setchannel(channel->state))
|
|
|
|
return command_fail_badparam(cmd, name, buffer, tok,
|
|
|
|
tal_fmt(tmpctx, "Channel in state %s",
|
|
|
|
channel_state_name(channel)));
|
|
|
|
tal_arr_expand(channels, channel);
|
|
|
|
return NULL;
|
2019-03-16 13:47:40 +01:00
|
|
|
}
|
|
|
|
|
2019-03-15 04:00:07 +01:00
|
|
|
/* Fee base is a u32, but it's convenient to let them specify it using
|
|
|
|
* msat etc. suffix. */
|
|
|
|
static struct command_result *param_msat_u32(struct command *cmd,
|
|
|
|
const char *name,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *tok,
|
|
|
|
u32 **num)
|
|
|
|
{
|
|
|
|
struct amount_msat *msat;
|
|
|
|
struct command_result *res;
|
|
|
|
|
|
|
|
/* Parse just like an msat. */
|
|
|
|
res = param_msat(cmd, name, buffer, tok, &msat);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
|
|
|
|
*num = tal(cmd, u32);
|
|
|
|
if (!amount_msat_to_u32(*msat, *num)) {
|
2020-08-25 23:20:50 +02:00
|
|
|
return command_fail_badparam(cmd, name, buffer, tok,
|
|
|
|
"exceeds u32 max");
|
2019-03-15 04:00:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-03-09 21:29:39 +01:00
|
|
|
|
2022-03-21 01:58:28 +01:00
|
|
|
static void set_channel_config(struct command *cmd, struct channel *channel,
|
|
|
|
u32 *base,
|
|
|
|
u32 *ppm,
|
2022-03-21 01:58:54 +01:00
|
|
|
struct amount_msat *htlc_min,
|
2022-03-21 01:58:28 +01:00
|
|
|
struct amount_msat *htlc_max,
|
|
|
|
u32 delaysecs,
|
2023-07-21 09:19:22 +02:00
|
|
|
bool *ignore_fee_limits,
|
2023-07-21 09:18:22 +02:00
|
|
|
struct json_stream *response)
|
2019-03-16 13:47:40 +01:00
|
|
|
{
|
2022-03-21 01:58:57 +01:00
|
|
|
bool warn_cannot_set_min = false, warn_cannot_set_max = false;
|
2022-03-21 01:58:54 +01:00
|
|
|
|
2022-03-21 01:58:28 +01:00
|
|
|
/* We only need to defer values if we *increase* fees (or drop
|
2022-03-21 01:58:54 +01:00
|
|
|
* max, increase min); we always allow users to overpay fees. */
|
2022-03-21 01:58:28 +01:00
|
|
|
if ((base && *base > channel->feerate_base)
|
|
|
|
|| (ppm && *ppm > channel->feerate_ppm)
|
2022-03-21 01:58:54 +01:00
|
|
|
|| (htlc_min
|
|
|
|
&& amount_msat_greater(*htlc_min, channel->htlc_minimum_msat))
|
2022-03-21 01:58:28 +01:00
|
|
|
|| (htlc_max
|
|
|
|
&& amount_msat_less(*htlc_max, channel->htlc_maximum_msat))) {
|
2021-09-23 04:42:47 +02:00
|
|
|
channel->old_feerate_timeout
|
|
|
|
= timeabs_add(time_now(), time_from_sec(delaysecs));
|
|
|
|
channel->old_feerate_base = channel->feerate_base;
|
|
|
|
channel->old_feerate_ppm = channel->feerate_ppm;
|
2022-03-21 01:58:54 +01:00
|
|
|
channel->old_htlc_minimum_msat = channel->htlc_minimum_msat;
|
2022-03-21 01:58:27 +01:00
|
|
|
channel->old_htlc_maximum_msat = channel->htlc_maximum_msat;
|
2021-09-23 04:42:47 +02:00
|
|
|
}
|
|
|
|
|
2019-03-16 13:47:40 +01:00
|
|
|
/* set new values */
|
2022-03-21 01:58:28 +01:00
|
|
|
if (base)
|
|
|
|
channel->feerate_base = *base;
|
|
|
|
if (ppm)
|
|
|
|
channel->feerate_ppm = *ppm;
|
2022-03-21 01:58:54 +01:00
|
|
|
if (htlc_min) {
|
|
|
|
struct amount_msat actual_min;
|
|
|
|
|
|
|
|
/* We can't send something they'll refuse: check that here. */
|
|
|
|
actual_min = channel->channel_info.their_config.htlc_minimum;
|
|
|
|
if (amount_msat_less(*htlc_min, actual_min)) {
|
|
|
|
warn_cannot_set_min = true;
|
|
|
|
channel->htlc_minimum_msat = actual_min;
|
|
|
|
} else
|
|
|
|
channel->htlc_minimum_msat = *htlc_min;
|
|
|
|
}
|
2022-03-21 01:58:57 +01:00
|
|
|
if (htlc_max) {
|
|
|
|
struct amount_msat actual_max;
|
|
|
|
|
|
|
|
/* Can't set it greater than actual capacity. */
|
|
|
|
actual_max = htlc_max_possible_send(channel);
|
|
|
|
if (amount_msat_greater(*htlc_max, actual_max)) {
|
|
|
|
warn_cannot_set_max = true;
|
|
|
|
channel->htlc_maximum_msat = actual_max;
|
|
|
|
} else
|
|
|
|
channel->htlc_maximum_msat = *htlc_max;
|
|
|
|
}
|
2023-07-21 09:19:22 +02:00
|
|
|
if (ignore_fee_limits)
|
|
|
|
channel->ignore_fee_limits = *ignore_fee_limits;
|
2019-03-16 13:47:40 +01:00
|
|
|
|
2023-10-24 05:50:11 +02:00
|
|
|
/* Tell channeld about the new acceptable feerates */
|
|
|
|
if (channel->owner
|
|
|
|
&& streq(channel->owner->name, "channeld")
|
|
|
|
&& ignore_fee_limits) {
|
|
|
|
channel_update_feerates(cmd->ld, channel);
|
2023-07-21 09:19:22 +02:00
|
|
|
}
|
2019-03-16 13:47:40 +01:00
|
|
|
|
|
|
|
/* save values to database */
|
|
|
|
wallet_channel_save(cmd->ld->wallet, channel);
|
|
|
|
|
|
|
|
/* write JSON response entry */
|
|
|
|
json_object_start(response, NULL);
|
2019-04-08 11:58:32 +02:00
|
|
|
json_add_node_id(response, "peer_id", &channel->peer->id);
|
2019-03-16 13:47:40 +01:00
|
|
|
json_add_string(response, "channel_id",
|
2024-03-20 01:47:52 +01:00
|
|
|
fmt_channel_id(tmpctx, &channel->cid));
|
2019-03-16 13:47:40 +01:00
|
|
|
if (channel->scid)
|
2024-03-20 02:59:51 +01:00
|
|
|
json_add_short_channel_id(response, "short_channel_id",
|
|
|
|
*channel->scid);
|
2022-03-21 01:58:28 +01:00
|
|
|
|
2023-07-21 09:18:22 +02:00
|
|
|
json_add_amount_msat(response, "fee_base_msat",
|
|
|
|
amount_msat(channel->feerate_base));
|
|
|
|
json_add_u32(response, "fee_proportional_millionths",
|
|
|
|
channel->feerate_ppm);
|
|
|
|
json_add_amount_msat(response,
|
|
|
|
"minimum_htlc_out_msat",
|
|
|
|
channel->htlc_minimum_msat);
|
|
|
|
if (warn_cannot_set_min)
|
|
|
|
json_add_string(response, "warning_htlcmin_too_low",
|
|
|
|
"Set minimum_htlc_out_msat to minimum allowed by peer");
|
|
|
|
json_add_amount_msat(response,
|
|
|
|
"maximum_htlc_out_msat",
|
|
|
|
channel->htlc_maximum_msat);
|
2023-07-21 09:19:22 +02:00
|
|
|
json_add_bool(response, "ignore_fee_limits", channel->ignore_fee_limits);
|
2023-07-21 09:18:22 +02:00
|
|
|
if (warn_cannot_set_max)
|
|
|
|
json_add_string(response, "warning_htlcmax_too_high",
|
|
|
|
"Set maximum_htlc_out_msat to maximum possible in channel");
|
2019-03-16 13:47:40 +01:00
|
|
|
json_object_end(response);
|
|
|
|
}
|
|
|
|
|
2022-03-21 01:58:28 +01:00
|
|
|
static struct command_result *json_setchannel(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
|
|
|
{
|
|
|
|
struct json_stream *response;
|
|
|
|
struct peer *peer;
|
2022-03-22 23:59:18 +01:00
|
|
|
struct channel **channels;
|
2022-03-21 01:58:28 +01:00
|
|
|
u32 *base, *ppm, *delaysecs;
|
2022-03-21 01:58:54 +01:00
|
|
|
struct amount_msat *htlc_min, *htlc_max;
|
2023-07-21 09:19:22 +02:00
|
|
|
bool *ignore_fee_limits;
|
2022-03-21 01:58:28 +01:00
|
|
|
|
|
|
|
/* Parse the JSON command */
|
2023-10-24 23:09:04 +02:00
|
|
|
if (!param_check(cmd, buffer, params,
|
|
|
|
p_req("id", param_channel_or_all, &channels),
|
|
|
|
p_opt("feebase", param_msat_u32, &base),
|
|
|
|
p_opt("feeppm", param_number, &ppm),
|
|
|
|
p_opt("htlcmin", param_msat, &htlc_min),
|
|
|
|
p_opt("htlcmax", param_msat, &htlc_max),
|
|
|
|
p_opt_def("enforcedelay", param_number, &delaysecs, 600),
|
|
|
|
p_opt("ignorefeelimits", param_bool, &ignore_fee_limits),
|
|
|
|
NULL))
|
2022-03-21 01:58:28 +01:00
|
|
|
return command_param_failed();
|
|
|
|
|
2022-03-21 01:58:57 +01:00
|
|
|
/* Prevent obviously incorrect things! */
|
|
|
|
if (htlc_min && htlc_max
|
|
|
|
&& amount_msat_less(*htlc_max, *htlc_min)) {
|
|
|
|
return command_fail(cmd, LIGHTNINGD,
|
|
|
|
"htlcmax cannot be less than htlcmin");
|
|
|
|
}
|
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (command_check_only(cmd))
|
|
|
|
return command_check_done(cmd);
|
|
|
|
|
2022-03-21 01:58:28 +01:00
|
|
|
/* Open JSON response object for later iteration */
|
|
|
|
response = json_stream_success(cmd);
|
|
|
|
json_array_start(response, "channels");
|
|
|
|
|
|
|
|
/* If the users requested 'all' channels we need to iterate */
|
2022-03-22 23:59:18 +01:00
|
|
|
if (channels == NULL) {
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
|
|
|
|
|
|
|
for (peer = peer_node_id_map_first(cmd->ld->peers, &it);
|
|
|
|
peer;
|
|
|
|
peer = peer_node_id_map_next(cmd->ld->peers, &it)) {
|
2022-03-22 23:59:18 +01:00
|
|
|
struct channel *channel;
|
|
|
|
list_for_each(&peer->channels, channel, list) {
|
2024-03-25 01:37:52 +01:00
|
|
|
/* cppcheck-suppress uninitvar - false positive on channel */
|
2023-10-02 00:59:49 +02:00
|
|
|
if (!channel_state_can_setchannel(channel->state))
|
2022-03-22 23:59:18 +01:00
|
|
|
continue;
|
|
|
|
set_channel_config(cmd, channel, base, ppm,
|
|
|
|
htlc_min, htlc_max,
|
2023-07-21 09:19:22 +02:00
|
|
|
*delaysecs, ignore_fee_limits,
|
|
|
|
response);
|
2022-03-22 23:59:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* single peer should be updated */
|
|
|
|
} else {
|
|
|
|
for (size_t i = 0; i < tal_count(channels); i++) {
|
|
|
|
set_channel_config(cmd, channels[i], base, ppm,
|
2022-03-21 01:58:54 +01:00
|
|
|
htlc_min, htlc_max,
|
2023-07-21 09:19:22 +02:00
|
|
|
*delaysecs, ignore_fee_limits,
|
|
|
|
response);
|
2022-03-21 01:58:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Close and return response */
|
|
|
|
json_array_end(response);
|
|
|
|
return command_success(cmd, response);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command setchannel_command = {
|
|
|
|
"setchannel",
|
|
|
|
json_setchannel,
|
|
|
|
};
|
|
|
|
AUTODATA(json_command, &setchannel_command);
|
|
|
|
|
2023-10-02 00:59:49 +02:00
|
|
|
/* dev hack, don't use for real interfaces, which have to handle channel ids, or multiple channels */
|
|
|
|
static struct command_result *param_dev_channel(struct command *cmd,
|
|
|
|
const char *name,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *tok,
|
|
|
|
struct channel **channel)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
struct command_result *res;
|
|
|
|
bool more_than_one;
|
|
|
|
|
|
|
|
res = param_peer(cmd, name, buffer, tok, &peer);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
*channel = peer_any_channel(peer, channel_state_wants_peercomms, &more_than_one);
|
2023-10-02 00:59:49 +02:00
|
|
|
if (!*channel)
|
|
|
|
return command_fail_badparam(cmd, name, buffer, tok,
|
|
|
|
"No channel with that peer");
|
|
|
|
|
|
|
|
if (more_than_one)
|
|
|
|
return command_fail_badparam(cmd, name, buffer, tok,
|
|
|
|
"More than one channel with that peer");
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-12-16 05:52:06 +01:00
|
|
|
static struct command_result *json_sign_last_tx(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
2017-10-24 04:06:14 +02:00
|
|
|
{
|
2018-10-19 03:17:49 +02:00
|
|
|
struct json_stream *response;
|
2018-02-12 11:12:55 +01:00
|
|
|
struct channel *channel;
|
2023-07-31 13:32:22 +02:00
|
|
|
struct bitcoin_tx *tx;
|
2017-10-24 04:06:14 +02:00
|
|
|
|
2018-07-20 03:14:02 +02:00
|
|
|
if (!param(cmd, buffer, params,
|
2023-10-02 00:59:49 +02:00
|
|
|
p_req("id", param_dev_channel, &channel),
|
2018-07-20 03:14:02 +02:00
|
|
|
NULL))
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_param_failed();
|
2017-10-24 04:06:14 +02:00
|
|
|
|
2018-10-19 03:17:48 +02:00
|
|
|
response = json_stream_success(cmd);
|
2018-02-12 11:13:04 +01:00
|
|
|
log_debug(channel->log, "dev-sign-last-tx: signing tx with %zu outputs",
|
2019-03-25 11:35:56 +01:00
|
|
|
channel->last_tx->wtx->num_outputs);
|
2017-10-24 04:06:14 +02:00
|
|
|
|
2023-07-31 13:32:22 +02:00
|
|
|
tx = sign_last_tx(cmd, channel, channel->last_tx, &channel->last_sig);
|
|
|
|
json_add_tx(response, "tx", tx);
|
2019-06-05 07:29:01 +02:00
|
|
|
|
2021-05-20 02:14:13 +02:00
|
|
|
/* If we've got inflights, return them */
|
|
|
|
if (!list_empty(&channel->inflights)) {
|
|
|
|
struct channel_inflight *inflight;
|
|
|
|
|
|
|
|
json_array_start(response, "inflights");
|
|
|
|
list_for_each(&channel->inflights, inflight, list) {
|
2023-07-31 13:32:22 +02:00
|
|
|
tx = sign_last_tx(cmd, channel, inflight->last_tx,
|
|
|
|
&inflight->last_sig);
|
2021-05-20 02:14:13 +02:00
|
|
|
json_object_start(response, NULL);
|
|
|
|
json_add_txid(response, "funding_txid",
|
2021-10-13 05:45:36 +02:00
|
|
|
&inflight->funding->outpoint.txid);
|
2023-07-31 13:32:22 +02:00
|
|
|
json_add_tx(response, "tx", tx);
|
2021-05-20 02:14:13 +02:00
|
|
|
json_object_end(response);
|
|
|
|
}
|
|
|
|
json_array_end(response);
|
|
|
|
}
|
|
|
|
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_success(cmd, response);
|
2017-10-24 04:06:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command dev_sign_last_tx = {
|
|
|
|
"dev-sign-last-tx",
|
|
|
|
json_sign_last_tx,
|
2023-09-21 07:36:27 +02:00
|
|
|
.dev_only = true,
|
2017-10-24 04:06:14 +02:00
|
|
|
};
|
|
|
|
AUTODATA(json_command, &dev_sign_last_tx);
|
|
|
|
|
2018-12-16 05:52:06 +01:00
|
|
|
static struct command_result *json_dev_fail(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
2017-10-24 04:06:14 +02:00
|
|
|
{
|
2018-02-12 11:13:04 +01:00
|
|
|
struct channel *channel;
|
2017-10-24 04:06:14 +02:00
|
|
|
|
2018-07-20 03:14:02 +02:00
|
|
|
if (!param(cmd, buffer, params,
|
2023-10-02 00:59:49 +02:00
|
|
|
p_req("id", param_dev_channel, &channel),
|
2018-07-20 03:14:02 +02:00
|
|
|
NULL))
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_param_failed();
|
2017-10-24 04:06:14 +02:00
|
|
|
|
feat: adds state change cause and message
This adds a `state_change` 'cause' to a channel.
A 'cause' is some initial 'reason' a channel was created or closed by:
/* Anything other than the reasons below. Should not happen. */
REASON_UNKNOWN,
/* Unconscious internal reasons, e.g. dev fail of a channel. */
REASON_LOCAL,
/* The operator or a plugin opened or closed a channel by intention. */
REASON_USER,
/* The remote closed or funded a channel with us by intention. */
REASON_REMOTE,
/* E.g. We need to close a channel because of bad signatures and such. */
REASON_PROTOCOL,
/* A channel was closed onchain, while we were offline. */
/* Note: This is very likely a conscious remote decision. */
REASON_ONCHAIN
If a 'cause' is known and a subsequent state change is made with
`REASON_UNKNOWN` the preceding cause will be used as reason, since a lot
(all `REASON_UNKNOWN`) state changes are a subsequent consequences of a prior
cause: local, user, remote, protocol or onchain.
Changelog-Added: Plugins: Channel closure resaon/cause to channel_state_changed notification
2020-10-28 11:46:12 +01:00
|
|
|
channel_fail_permanent(channel,
|
|
|
|
REASON_USER,
|
|
|
|
"Failing due to dev-fail command");
|
2019-06-12 02:38:54 +02:00
|
|
|
return command_success(cmd, json_stream_success(cmd));
|
2017-10-24 04:06:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command dev_fail_command = {
|
|
|
|
"dev-fail",
|
|
|
|
json_dev_fail,
|
2023-09-21 07:36:27 +02:00
|
|
|
.dev_only = true,
|
2017-10-24 04:06:14 +02:00
|
|
|
};
|
|
|
|
AUTODATA(json_command, &dev_fail_command);
|
|
|
|
|
2018-02-21 16:06:07 +01:00
|
|
|
static void dev_reenable_commit_finished(struct subd *channeld UNUSED,
|
|
|
|
const u8 *resp UNUSED,
|
|
|
|
const int *fds UNUSED,
|
2017-10-24 04:06:14 +02:00
|
|
|
struct command *cmd)
|
|
|
|
{
|
2019-06-12 02:38:54 +02:00
|
|
|
was_pending(command_success(cmd, json_stream_success(cmd)));
|
2017-10-24 04:06:14 +02:00
|
|
|
}
|
|
|
|
|
2018-12-16 05:52:06 +01:00
|
|
|
static struct command_result *json_dev_reenable_commit(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
2017-10-24 04:06:14 +02:00
|
|
|
{
|
|
|
|
u8 *msg;
|
2018-02-12 11:12:55 +01:00
|
|
|
struct channel *channel;
|
2017-10-24 04:06:14 +02:00
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (!param_check(cmd, buffer, params,
|
|
|
|
p_req("id", param_dev_channel, &channel),
|
|
|
|
NULL))
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_param_failed();
|
2017-10-24 04:06:14 +02:00
|
|
|
|
2018-02-12 11:12:55 +01:00
|
|
|
if (!channel->owner) {
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_fail(cmd, LIGHTNINGD,
|
|
|
|
"Peer has no owner");
|
2017-10-24 04:06:14 +02:00
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
if (!streq(channel->owner->name, "channeld")) {
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_fail(cmd, LIGHTNINGD,
|
|
|
|
"Peer owned by %s", channel->owner->name);
|
2017-10-24 04:06:14 +02:00
|
|
|
}
|
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (command_check_only(cmd))
|
|
|
|
return command_check_done(cmd);
|
|
|
|
|
2020-08-25 03:33:16 +02:00
|
|
|
msg = towire_channeld_dev_reenable_commit(channel);
|
2023-10-02 00:59:49 +02:00
|
|
|
subd_req(channel, channel->owner, take(msg), -1, 0,
|
2017-10-24 04:06:14 +02:00
|
|
|
dev_reenable_commit_finished, cmd);
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_still_pending(cmd);
|
2017-10-24 04:06:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command dev_reenable_commit = {
|
|
|
|
"dev-reenable-commit",
|
|
|
|
json_dev_reenable_commit,
|
2023-09-21 07:36:27 +02:00
|
|
|
.dev_only = true,
|
2017-10-24 04:06:14 +02:00
|
|
|
};
|
|
|
|
AUTODATA(json_command, &dev_reenable_commit);
|
2018-02-06 15:46:34 +01:00
|
|
|
|
|
|
|
struct dev_forget_channel_cmd {
|
|
|
|
struct short_channel_id scid;
|
2018-02-12 11:13:04 +01:00
|
|
|
struct channel *channel;
|
2018-02-06 15:46:34 +01:00
|
|
|
bool force;
|
|
|
|
struct command *cmd;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void process_dev_forget_channel(struct bitcoind *bitcoind UNUSED,
|
|
|
|
const struct bitcoin_tx_output *txout,
|
|
|
|
void *arg)
|
|
|
|
{
|
2018-10-19 03:17:49 +02:00
|
|
|
struct json_stream *response;
|
2018-02-06 15:46:34 +01:00
|
|
|
struct dev_forget_channel_cmd *forget = arg;
|
|
|
|
if (txout != NULL && !forget->force) {
|
2018-12-16 05:53:06 +01:00
|
|
|
was_pending(command_fail(forget->cmd, LIGHTNINGD,
|
2018-02-06 15:46:34 +01:00
|
|
|
"Cowardly refusing to forget channel with an "
|
|
|
|
"unspent funding output, if you know what "
|
|
|
|
"you're doing you can override with "
|
|
|
|
"`force=true`, otherwise consider `close` or "
|
|
|
|
"`dev-fail`! If you force and the channel "
|
|
|
|
"confirms we will not track the funds in the "
|
2018-12-16 05:53:06 +01:00
|
|
|
"channel"));
|
2018-02-06 15:46:34 +01:00
|
|
|
return;
|
|
|
|
}
|
2018-10-19 03:17:48 +02:00
|
|
|
response = json_stream_success(forget->cmd);
|
2018-02-06 15:46:34 +01:00
|
|
|
json_add_bool(response, "forced", forget->force);
|
|
|
|
json_add_bool(response, "funding_unspent", txout != NULL);
|
2021-10-13 05:45:36 +02:00
|
|
|
json_add_txid(response, "funding_txid", &forget->channel->funding.txid);
|
2018-02-06 15:46:34 +01:00
|
|
|
|
2018-08-09 02:25:29 +02:00
|
|
|
/* Set error so we don't try to reconnect. */
|
2021-02-03 03:51:41 +01:00
|
|
|
forget->channel->error = towire_errorfmt(forget->channel,
|
|
|
|
&forget->channel->cid,
|
2018-08-09 02:25:29 +02:00
|
|
|
"dev_forget_channel");
|
2018-02-21 16:50:49 +01:00
|
|
|
delete_channel(forget->channel);
|
2018-02-06 15:46:34 +01:00
|
|
|
|
2018-12-16 05:53:06 +01:00
|
|
|
was_pending(command_success(forget->cmd, response));
|
2018-02-06 15:46:34 +01:00
|
|
|
}
|
|
|
|
|
2018-12-16 05:52:06 +01:00
|
|
|
static struct command_result *json_dev_forget_channel(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t *params)
|
2018-02-06 15:46:34 +01:00
|
|
|
{
|
2018-02-12 11:13:04 +01:00
|
|
|
struct peer *peer;
|
|
|
|
struct channel *channel;
|
2018-07-20 03:14:02 +02:00
|
|
|
struct short_channel_id *scid;
|
2020-09-09 09:20:53 +02:00
|
|
|
struct channel_id *find_cid;
|
2023-10-24 23:09:04 +02:00
|
|
|
struct dev_forget_channel_cmd *forget;
|
2018-08-13 22:31:40 +02:00
|
|
|
bool *force;
|
2023-10-24 23:09:04 +02:00
|
|
|
|
|
|
|
if (!param_check(cmd, buffer, params,
|
|
|
|
p_req("id", param_peer, &peer),
|
|
|
|
p_opt("short_channel_id", param_short_channel_id, &scid),
|
|
|
|
p_opt("channel_id", param_channel_id, &find_cid),
|
|
|
|
p_opt_def("force", param_bool, &force, false),
|
|
|
|
NULL))
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_param_failed();
|
2018-02-06 15:46:34 +01:00
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
forget = tal(cmd, struct dev_forget_channel_cmd);
|
|
|
|
forget->cmd = cmd;
|
2018-08-13 22:31:40 +02:00
|
|
|
forget->force = *force;
|
2018-02-12 11:13:04 +01:00
|
|
|
|
|
|
|
forget->channel = NULL;
|
|
|
|
list_for_each(&peer->channels, channel, list) {
|
2019-09-30 18:31:27 +02:00
|
|
|
/* Check for channel id first */
|
|
|
|
if (find_cid) {
|
2020-09-09 09:20:53 +02:00
|
|
|
if (!channel_id_eq(find_cid, &channel->cid))
|
2019-09-30 18:31:27 +02:00
|
|
|
continue;
|
|
|
|
}
|
2018-07-20 03:14:02 +02:00
|
|
|
if (scid) {
|
2018-02-12 11:13:04 +01:00
|
|
|
if (!channel->scid)
|
|
|
|
continue;
|
2024-03-20 02:59:51 +01:00
|
|
|
if (!short_channel_id_eq(*channel->scid, *scid))
|
2018-02-12 11:13:04 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (forget->channel) {
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_fail(cmd, LIGHTNINGD,
|
|
|
|
"Multiple channels:"
|
|
|
|
" please specify short_channel_id");
|
2018-02-12 11:13:04 +01:00
|
|
|
}
|
|
|
|
forget->channel = channel;
|
|
|
|
}
|
|
|
|
if (!forget->channel) {
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_fail(cmd, LIGHTNINGD,
|
2019-02-08 14:23:22 +01:00
|
|
|
"No channels matching that peer_id%s",
|
|
|
|
scid ? " and that short_channel_id" : "");
|
2018-02-12 11:13:04 +01:00
|
|
|
}
|
|
|
|
|
2018-02-28 23:24:58 +01:00
|
|
|
if (channel_has_htlc_out(forget->channel) ||
|
|
|
|
channel_has_htlc_in(forget->channel)) {
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_fail(cmd, LIGHTNINGD,
|
|
|
|
"This channel has HTLCs attached and it is "
|
|
|
|
"not safe to forget it. Please use `close` "
|
|
|
|
"or `dev-fail` instead.");
|
2018-02-28 23:24:58 +01:00
|
|
|
}
|
|
|
|
|
2023-10-24 23:09:04 +02:00
|
|
|
if (command_check_only(cmd))
|
|
|
|
return command_check_done(cmd);
|
|
|
|
|
2023-10-02 00:59:51 +02:00
|
|
|
if (!channel_state_uncommitted(forget->channel->state))
|
2024-06-22 17:11:33 +02:00
|
|
|
bitcoind_getutxout(cmd, cmd->ld->topology->bitcoind,
|
2021-10-13 05:45:36 +02:00
|
|
|
&forget->channel->funding,
|
2021-01-22 01:55:23 +01:00
|
|
|
process_dev_forget_channel, forget);
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_still_pending(cmd);
|
2018-02-06 15:46:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command dev_forget_channel_command = {
|
2019-05-22 16:08:16 +02:00
|
|
|
"dev-forget-channel",
|
|
|
|
json_dev_forget_channel,
|
2023-09-21 07:36:27 +02:00
|
|
|
.dev_only = true,
|
2018-02-06 15:46:34 +01:00
|
|
|
};
|
|
|
|
AUTODATA(json_command, &dev_forget_channel_command);
|
2018-11-22 03:17:29 +01:00
|
|
|
|
2022-03-08 01:14:41 +01:00
|
|
|
static void channeld_memleak_req_done(struct subd *channeld,
|
|
|
|
const u8 *msg, const int *fds UNUSED,
|
|
|
|
struct leak_detect *leaks)
|
2018-11-22 03:17:29 +01:00
|
|
|
{
|
2022-03-08 01:14:41 +01:00
|
|
|
bool found_leak;
|
2018-11-22 03:17:29 +01:00
|
|
|
|
2022-03-08 01:14:41 +01:00
|
|
|
if (!fromwire_channeld_dev_memleak_reply(msg, &found_leak))
|
|
|
|
fatal("Bad channel_dev_memleak");
|
2018-11-22 03:17:29 +01:00
|
|
|
|
|
|
|
if (found_leak)
|
2022-03-08 01:14:41 +01:00
|
|
|
report_subd_memleak(leaks, channeld);
|
2018-11-22 03:17:29 +01:00
|
|
|
}
|
|
|
|
|
2022-03-08 01:14:41 +01:00
|
|
|
static void onchaind_memleak_req_done(struct subd *onchaind,
|
2018-11-22 03:17:29 +01:00
|
|
|
const u8 *msg, const int *fds UNUSED,
|
2022-03-08 01:14:41 +01:00
|
|
|
struct leak_detect *leaks)
|
2018-11-22 03:17:29 +01:00
|
|
|
{
|
|
|
|
bool found_leak;
|
|
|
|
|
2022-03-08 01:14:41 +01:00
|
|
|
if (!fromwire_onchaind_dev_memleak_reply(msg, &found_leak))
|
|
|
|
fatal("Bad onchaind_dev_memleak");
|
|
|
|
|
|
|
|
if (found_leak)
|
|
|
|
report_subd_memleak(leaks, onchaind);
|
2018-11-22 03:17:29 +01:00
|
|
|
}
|
|
|
|
|
2022-03-08 01:14:41 +01:00
|
|
|
static void openingd_memleak_req_done(struct subd *open_daemon,
|
|
|
|
const u8 *msg, const int *fds UNUSED,
|
|
|
|
struct leak_detect *leaks)
|
2018-11-22 03:17:29 +01:00
|
|
|
{
|
|
|
|
bool found_leak;
|
2018-11-22 03:17:29 +01:00
|
|
|
|
2022-03-08 01:14:41 +01:00
|
|
|
if (!fromwire_openingd_dev_memleak_reply(msg, &found_leak))
|
|
|
|
fatal("Bad opening_dev_memleak");
|
|
|
|
|
|
|
|
if (found_leak)
|
|
|
|
report_subd_memleak(leaks, open_daemon);
|
2018-11-22 03:17:29 +01:00
|
|
|
}
|
|
|
|
|
2022-03-08 01:31:26 +01:00
|
|
|
static void dualopend_memleak_req_done(struct subd *dualopend,
|
|
|
|
const u8 *msg, const int *fds UNUSED,
|
|
|
|
struct leak_detect *leaks)
|
|
|
|
{
|
|
|
|
bool found_leak;
|
|
|
|
|
|
|
|
if (!fromwire_dualopend_dev_memleak_reply(msg, &found_leak))
|
|
|
|
fatal("Bad dualopend_dev_memleak");
|
|
|
|
|
|
|
|
if (found_leak)
|
|
|
|
report_subd_memleak(leaks, dualopend);
|
|
|
|
}
|
|
|
|
|
2022-03-08 01:14:41 +01:00
|
|
|
void peer_dev_memleak(struct lightningd *ld, struct leak_detect *leaks)
|
2018-11-22 03:17:29 +01:00
|
|
|
{
|
|
|
|
struct peer *p;
|
2023-01-18 06:04:32 +01:00
|
|
|
struct peer_node_id_map_iter it;
|
2018-11-22 03:17:29 +01:00
|
|
|
|
2023-01-18 06:04:32 +01:00
|
|
|
for (p = peer_node_id_map_first(ld->peers, &it);
|
|
|
|
p;
|
|
|
|
p = peer_node_id_map_next(ld->peers, &it)) {
|
2018-11-22 03:17:29 +01:00
|
|
|
struct channel *c;
|
2022-07-16 06:49:29 +02:00
|
|
|
if (p->uncommitted_channel && p->uncommitted_channel->open_daemon) {
|
2022-03-08 01:14:41 +01:00
|
|
|
struct subd *openingd = p->uncommitted_channel->open_daemon;
|
|
|
|
start_leak_request(subd_req(openingd, openingd,
|
|
|
|
take(towire_openingd_dev_memleak(NULL)),
|
|
|
|
-1, 0, openingd_memleak_req_done, leaks),
|
|
|
|
leaks);
|
|
|
|
}
|
2018-11-22 03:17:29 +01:00
|
|
|
|
|
|
|
list_for_each(&p->channels, c, list) {
|
|
|
|
if (!c->owner)
|
|
|
|
continue;
|
2019-11-18 01:27:17 +01:00
|
|
|
if (streq(c->owner->name, "channeld")) {
|
2022-03-08 01:14:41 +01:00
|
|
|
start_leak_request(subd_req(c, c->owner,
|
2020-08-25 03:33:16 +02:00
|
|
|
take(towire_channeld_dev_memleak(NULL)),
|
2022-03-08 01:14:41 +01:00
|
|
|
-1, 0, channeld_memleak_req_done, leaks),
|
|
|
|
leaks);
|
|
|
|
} else if (streq(c->owner->name, "onchaind")) {
|
|
|
|
start_leak_request(subd_req(c, c->owner,
|
2020-08-25 04:15:48 +02:00
|
|
|
take(towire_onchaind_dev_memleak(NULL)),
|
2022-03-08 01:14:41 +01:00
|
|
|
-1, 0, onchaind_memleak_req_done, leaks),
|
|
|
|
leaks);
|
2022-03-08 01:31:26 +01:00
|
|
|
} else if (streq(c->owner->name, "dualopend")) {
|
|
|
|
start_leak_request(subd_req(c, c->owner,
|
|
|
|
take(towire_dualopend_dev_memleak(NULL)),
|
|
|
|
-1, 0, dualopend_memleak_req_done, leaks),
|
|
|
|
leaks);
|
2018-11-22 03:17:29 +01:00
|
|
|
}
|
2018-11-22 03:17:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|