2021-12-04 12:23:56 +01:00
# include "config.h"
2017-10-23 06:14:38 +02:00
# include <arpa/inet.h>
2018-06-20 08:47:57 +02:00
# include <bitcoin/feerate.h>
2017-03-07 02:03:55 +01:00
# include <bitcoin/script.h>
2017-02-24 06:52:56 +01:00
# include <bitcoin/tx.h>
2018-02-23 06:53:47 +01:00
# include <ccan/array_size/array_size.h>
2019-09-29 10:53:26 +02:00
# include <ccan/cast/cast.h>
2017-01-10 06:08:33 +01:00
# include <ccan/io/io.h>
2019-09-29 10:53:26 +02:00
# include <ccan/mem/mem.h>
2017-01-10 06:08:33 +01:00
# include <ccan/noerr/noerr.h>
2017-09-04 05:41:34 +02:00
# include <ccan/str/str.h>
2017-01-10 06:08:33 +01:00
# include <ccan/take/take.h>
2017-01-10 06:08:33 +01:00
# include <ccan/tal/str/str.h>
2020-08-25 03:33:16 +02:00
# include <channeld/channeld_wiregen.h>
2019-10-29 18:20:34 +01:00
# include <common/addr.h>
2020-01-03 14:08:29 +01:00
# include <common/closing_fee.h>
2021-09-16 07:00:42 +02:00
# include <common/configdir.h>
2017-08-28 18:05:01 +02:00
# include <common/dev_disconnect.h>
2018-01-12 14:35:52 +01:00
# include <common/features.h>
2019-05-31 09:30:32 +02:00
# include <common/htlc_trim.h>
2017-08-28 18:02:01 +02:00
# include <common/initial_commit_tx.h>
2018-12-08 01:39:28 +01:00
# include <common/json_command.h>
2019-01-15 04:54:27 +01:00
# include <common/json_helpers.h>
2019-12-26 11:19:09 +01:00
# include <common/json_tok.h>
2018-12-08 01:39:28 +01:00
# include <common/jsonrpc_errors.h>
2017-08-28 18:05:01 +02:00
# include <common/key_derive.h>
2018-12-08 01:39:28 +01:00
# include <common/param.h>
2021-06-10 05:51:12 +02:00
# include <common/shutdown_scriptpubkey.h>
2017-08-28 18:05:01 +02:00
# include <common/status.h>
2017-08-28 18:04:01 +02:00
# include <common/timeout.h>
2021-09-16 07:00:42 +02:00
# include <common/type_to_string.h>
2019-10-15 12:58:30 +02:00
# include <common/utils.h>
2018-11-15 15:00:34 +01:00
# include <common/version.h>
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
# include <common/wire_error.h>
2020-08-25 04:16:22 +02:00
# include <connectd/connectd_wiregen.h>
2017-01-10 06:08:33 +01:00
# include <errno.h>
2017-06-24 08:25:51 +02:00
# include <fcntl.h>
2022-02-23 19:11:03 +01:00
# include <gossipd/gossipd_wiregen.h>
2020-08-25 03:55:38 +02:00
# include <hsmd/hsmd_wiregen.h>
2017-01-10 06:08:33 +01:00
# include <inttypes.h>
2018-02-06 15:46:34 +01:00
# include <lightningd/bitcoind.h>
2017-08-28 18:04:01 +02:00
# include <lightningd/chaintopology.h>
2021-09-16 07:00:42 +02:00
# include <lightningd/channel.h>
2018-02-20 21:59:09 +01:00
# include <lightningd/channel_control.h>
2018-02-20 21:59:09 +01:00
# include <lightningd/closing_control.h>
2018-02-20 21:59:09 +01:00
# include <lightningd/connect_control.h>
2020-09-09 12:10:28 +02:00
# include <lightningd/dual_open_control.h>
2017-06-24 08:50:23 +02:00
# include <lightningd/hsm_control.h>
2018-03-16 04:45:08 +01:00
# include <lightningd/json.h>
2017-08-28 18:04:01 +02:00
# include <lightningd/jsonrpc.h>
2021-12-04 12:23:56 +01:00
# include <lightningd/lightningd.h>
2017-08-28 18:04:01 +02:00
# include <lightningd/log.h>
2018-11-22 03:17:29 +01:00
# include <lightningd/memdump.h>
2018-12-13 13:58:40 +01:00
# include <lightningd/notification.h>
2018-02-20 21:59:09 +01:00
# include <lightningd/onchain_control.h>
2020-09-09 09:20:53 +02:00
# include <lightningd/opening_common.h>
2018-02-20 21:59:04 +01:00
# include <lightningd/opening_control.h>
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
# include <lightningd/options.h>
2021-12-04 12:23:56 +01:00
# include <lightningd/peer_control.h>
2022-01-11 02:13:59 +01:00
# include <lightningd/peer_fd.h>
2017-06-20 07:45:03 +02:00
# include <lightningd/peer_htlcs.h>
2019-01-19 15:56:05 +01:00
# include <lightningd/plugin_hook.h>
2021-12-04 12:23:56 +01:00
# include <lightningd/subd.h>
2020-01-03 14:08:29 +01:00
# include <limits.h>
2021-09-16 07:00:42 +02:00
# include <onchaind/onchaind_wiregen.h>
2022-03-08 01:31:26 +01:00
# include <openingd/dualopend_wiregen.h>
2022-03-08 01:14:41 +01:00
# include <openingd/openingd_wiregen.h>
2020-01-03 14:08:29 +01:00
# include <stdlib.h>
2017-06-24 08:25:51 +02:00
# include <unistd.h>
2017-05-23 13:07:42 +02:00
# include <wally_bip32.h>
2020-09-07 23:06:50 +02:00
# include <wire/onion_wire.h>
2018-07-23 04:23:02 +02:00
# include <wire/wire_sync.h>
2017-06-24 08:25:51 +02:00
2018-02-20 21:59:09 +01:00
static void destroy_peer ( struct peer * peer )
{
list_del_from ( & peer - > ld - > peers , & peer - > list ) ;
}
2020-04-03 02:03:59 +02:00
static void peer_update_features ( struct peer * peer ,
const u8 * their_features TAKES )
2018-08-17 06:14:39 +02:00
{
2020-04-03 02:03:59 +02:00
tal_free ( peer - > their_features ) ;
peer - > their_features = tal_dup_talarr ( peer , u8 , their_features ) ;
2018-08-17 06:14:39 +02:00
}
2018-02-12 11:12:55 +01:00
struct peer * new_peer ( struct lightningd * ld , u64 dbid ,
2019-04-08 11:58:32 +02:00
const struct node_id * id ,
2021-03-25 05:13:12 +01:00
const struct wireaddr_internal * addr ,
bool connected_incoming )
2018-02-12 11:12:55 +01:00
{
2018-02-12 11:13:04 +01:00
/* We are owned by our channels, and freed manually by destroy_channel */
struct peer * peer = tal ( NULL , struct peer ) ;
2018-02-12 11:12:55 +01:00
peer - > ld = ld ;
peer - > dbid = dbid ;
peer - > id = * id ;
2018-02-19 02:06:02 +01:00
peer - > uncommitted_channel = NULL ;
2018-10-26 08:01:26 +02:00
peer - > addr = * addr ;
2021-03-25 05:13:12 +01:00
peer - > connected_incoming = connected_incoming ;
2020-04-03 02:03:59 +02:00
peer - > their_features = NULL ;
2018-02-12 11:12:55 +01:00
list_head_init ( & peer - > channels ) ;
2019-04-08 11:58:32 +02:00
peer - > direction = node_id_idx ( & peer - > ld - > id , & peer - > id ) ;
2022-03-22 21:26:30 +01:00
peer - > is_connected = false ;
2018-04-03 09:19:42 +02:00
# if DEVELOPER
peer - > ignore_htlcs = false ;
# endif
2018-02-12 11:12:55 +01:00
list_add_tail ( & ld - > peers , & peer - > list ) ;
tal_add_destructor ( peer , destroy_peer ) ;
return peer ;
}
2018-08-02 08:49:55 +02:00
static void delete_peer ( struct peer * peer )
2018-02-14 02:53:04 +01:00
{
assert ( list_empty ( & peer - > channels ) ) ;
2018-02-19 02:06:02 +01:00
assert ( ! peer - > uncommitted_channel ) ;
/* If it only ever existed because of uncommitted channel, it won't
* be in the database */
if ( peer - > dbid ! = 0 )
wallet_peer_delete ( peer - > ld - > wallet , peer - > dbid ) ;
2018-02-14 02:53:04 +01:00
tal_free ( peer ) ;
}
2018-08-02 08:49:55 +02:00
/* Last one out deletes peer. */
void maybe_delete_peer ( struct peer * peer )
{
if ( ! list_empty ( & peer - > channels ) )
return ;
2019-02-21 02:22:56 +01:00
if ( peer - > uncommitted_channel ) {
/* This isn't sufficient to keep it in db! */
if ( peer - > dbid ! = 0 ) {
wallet_peer_delete ( peer - > ld - > wallet , peer - > dbid ) ;
peer - > dbid = 0 ;
}
return ;
}
2018-08-02 08:49:55 +02:00
delete_peer ( peer ) ;
}
2018-02-12 11:12:55 +01:00
struct peer * find_peer_by_dbid ( struct lightningd * ld , u64 dbid )
{
struct peer * p ;
list_for_each ( & ld - > peers , p , list )
if ( p - > dbid = = dbid )
return p ;
return NULL ;
}
2019-04-08 11:58:32 +02:00
struct peer * peer_by_id ( struct lightningd * ld , const struct node_id * id )
2018-02-20 21:59:09 +01:00
{
struct peer * p ;
list_for_each ( & ld - > peers , p , list )
2019-04-08 11:58:32 +02:00
if ( node_id_eq ( & p - > id , id ) )
2018-02-20 21:59:09 +01:00
return p ;
return NULL ;
}
struct peer * peer_from_json ( struct lightningd * ld ,
const char * buffer ,
2018-07-20 03:14:02 +02:00
const jsmntok_t * peeridtok )
2018-02-20 21:59:09 +01:00
{
2019-04-08 11:58:32 +02:00
struct node_id peerid ;
2018-02-20 21:59:09 +01:00
2019-04-08 11:58:32 +02:00
if ( ! json_to_node_id ( buffer , peeridtok , & peerid ) )
2018-02-20 21:59:09 +01:00
return NULL ;
return peer_by_id ( ld , & peerid ) ;
}
u8 * p2wpkh_for_keyidx ( const tal_t * ctx , struct lightningd * ld , u64 keyidx )
{
struct pubkey shutdownkey ;
if ( ! bip32_pubkey ( ld - > wallet - > bip32_base , & shutdownkey , keyidx ) )
return NULL ;
return scriptpubkey_p2wpkh ( ctx , & shutdownkey ) ;
}
2021-05-20 02:09:56 +02:00
static void sign_last_tx ( struct channel * channel ,
struct bitcoin_tx * last_tx ,
struct bitcoin_signature * last_sig )
2017-06-20 08:17:03 +02:00
{
2018-07-23 04:23:02 +02:00
struct lightningd * ld = channel - > peer - > ld ;
2018-12-03 00:15:06 +01:00
struct bitcoin_signature sig ;
2019-03-21 14:24:43 +01:00
u8 * msg , * * witness ;
2017-08-18 06:43:52 +02:00
2021-12-15 02:37:35 +01:00
u64 commit_index = channel - > next_index [ LOCAL ] - 1 ;
2021-05-20 02:09:56 +02:00
assert ( ! last_tx - > wtx - > inputs [ 0 ] . witness ) ;
2020-08-25 03:55:38 +02:00
msg = towire_hsmd_sign_commitment_tx ( tmpctx ,
2021-05-20 02:09:56 +02:00
& channel - > peer - > id ,
channel - > dbid ,
last_tx ,
& channel - > channel_info
2021-12-15 02:37:35 +01:00
. remote_fundingkey ,
commit_index ) ;
2017-08-18 06:43:52 +02:00
2018-07-23 04:23:02 +02:00
if ( ! wire_sync_write ( ld - > hsm_fd , take ( msg ) ) )
fatal ( " Could not write to HSM: %s " , strerror ( errno ) ) ;
msg = wire_sync_read ( tmpctx , ld - > hsm_fd ) ;
2020-08-25 03:55:38 +02:00
if ( ! fromwire_hsmd_sign_commitment_tx_reply ( msg , & sig ) )
2018-07-23 04:23:02 +02:00
fatal ( " HSM gave bad sign_commitment_tx_reply %s " ,
tal_hex ( tmpctx , msg ) ) ;
2017-08-18 06:43:52 +02:00
2019-03-21 14:24:43 +01:00
witness =
2021-05-20 02:09:56 +02:00
bitcoin_witness_2of2 ( last_tx , last_sig ,
2019-03-21 14:24:43 +01:00
& sig , & channel - > channel_info . remote_fundingkey ,
& channel - > local_funding_pubkey ) ;
2021-05-20 02:09:56 +02:00
bitcoin_tx_input_set_witness ( last_tx , 0 , take ( witness ) ) ;
2017-09-26 06:57:31 +02:00
}
2017-12-15 11:29:37 +01:00
static void remove_sig ( struct bitcoin_tx * signed_tx )
{
2019-03-21 14:24:43 +01:00
bitcoin_tx_input_set_witness ( signed_tx , 0 , NULL ) ;
2017-12-15 11:29:37 +01:00
}
2019-06-27 01:57:49 +02:00
static bool invalid_last_tx ( const struct bitcoin_tx * tx )
{
/* This problem goes back further, but was discovered just before the
* 0.7 .1 release . */
# ifdef COMPAT_V070
/* Old bug had commitment txs with no outputs; bitcoin_txid asserts. */
return tx - > wtx - > num_outputs = = 0 ;
# else
return false ;
# endif
}
2021-05-20 02:09:56 +02:00
static void sign_and_send_last ( struct lightningd * ld ,
struct channel * channel ,
struct bitcoin_tx * last_tx ,
struct bitcoin_signature * last_sig )
{
struct bitcoin_txid txid ;
sign_last_tx ( channel , last_tx , last_sig ) ;
bitcoin_txid ( last_tx , & txid ) ;
wallet_transaction_add ( ld - > wallet , last_tx - > wtx , 0 , 0 ) ;
wallet_transaction_annotate ( ld - > wallet , & txid ,
channel - > last_tx_type ,
channel - > dbid ) ;
/* Keep broadcasting until we say stop (can fail due to dup,
* if they beat us to the broadcast ) . */
broadcast_tx ( ld - > topology , channel , last_tx , NULL ) ;
remove_sig ( last_tx ) ;
}
2018-04-10 08:03:15 +02:00
void drop_to_chain ( struct lightningd * ld , struct channel * channel ,
bool cooperative )
2017-09-26 06:57:31 +02:00
{
2021-05-20 02:09:56 +02:00
struct channel_inflight * inflight ;
2018-08-17 07:06:35 +02:00
/* BOLT #2:
*
2019-08-01 06:56:10 +02:00
* - if ` next_revocation_number ` is greater than expected
2018-08-17 07:06:35 +02:00
* above , AND ` your_last_per_commitment_secret ` is correct for that
2019-08-01 06:56:10 +02:00
* ` next_revocation_number ` minus 1 :
2018-08-17 07:06:35 +02:00
* - MUST NOT broadcast its commitment transaction .
*/
if ( channel - > future_per_commitment_point & & ! cooperative ) {
log_broken ( channel - > log ,
" Cannot broadcast our commitment tx: "
" they have a future one " ) ;
2019-06-27 01:57:49 +02:00
} else if ( invalid_last_tx ( channel - > last_tx ) ) {
log_broken ( channel - > log ,
" Cannot broadcast our commitment tx: "
" it's invalid! (ancient channel?) " ) ;
2018-08-17 07:06:35 +02:00
} else {
2021-05-20 02:09:56 +02:00
/* We need to drop *every* commitment transaction to chain */
if ( ! cooperative & & ! list_empty ( & channel - > inflights ) ) {
list_for_each ( & channel - > inflights , inflight , list )
sign_and_send_last ( ld , channel ,
inflight - > last_tx ,
& inflight - > last_sig ) ;
} else
sign_and_send_last ( ld , channel , channel - > last_tx ,
& channel - > last_sig ) ;
2018-08-17 07:06:35 +02:00
}
2018-04-10 08:03:15 +02:00
resolve_close_command ( ld , channel , cooperative ) ;
2017-05-22 09:28:07 +02:00
}
2018-02-20 21:59:09 +01:00
void channel_errmsg ( struct channel * channel ,
2022-01-11 02:13:59 +01:00
struct peer_fd * peer_fd ,
2018-02-21 16:06:07 +01:00
const struct channel_id * channel_id UNUSED ,
2018-02-20 21:59:09 +01:00
const char * desc ,
2021-02-02 13:47:01 +01:00
bool warning ,
2018-02-20 21:59:09 +01:00
const u8 * err_for_them )
2018-02-19 02:06:15 +01:00
{
2021-02-09 22:24:23 +01:00
/* Clean up any in-progress open attempts */
2021-02-16 20:23:53 +01:00
channel_cleanup_commands ( channel , desc ) ;
2021-02-09 22:24:23 +01:00
2021-05-06 23:41:29 +02:00
if ( channel_unsaved ( channel ) ) {
log_info ( channel - > log , " %s " , " Unsaved peer failed. "
" Disconnecting and deleting channel. " ) ;
delete_channel ( channel ) ;
return ;
}
2022-01-11 02:13:59 +01:00
/* No peer_fd means a subd crash or disconnection. */
if ( ! peer_fd ) {
2021-02-09 22:24:23 +01:00
/* If the channel is unsaved, we forget it */
2019-07-26 04:11:18 +02:00
channel_fail_reconnect ( channel , " %s: %s " ,
2018-02-19 02:06:15 +01:00
channel - > owner - > name , desc ) ;
return ;
2018-02-18 13:59:46 +01:00
}
2018-02-19 02:06:15 +01:00
/* Do we have an error to send? */
2021-02-02 13:47:01 +01:00
if ( err_for_them & & ! channel - > error & & ! warning )
2020-02-27 03:17:01 +01:00
channel - > error = tal_dup_talarr ( channel , u8 , err_for_them ) ;
2018-02-19 02:06:15 +01:00
2019-07-26 04:11:19 +02:00
/* Other implementations chose to ignore errors early on. Not
* surprisingly , they now spew out spurious errors frequently ,
2021-02-02 13:47:01 +01:00
* and we would close the channel on them . We now support warnings
* for this case . */
if ( warning ) {
2021-02-03 05:11:09 +01:00
channel_fail_reconnect_later ( channel , " %s WARNING: %s " ,
2019-07-26 04:11:19 +02:00
channel - > owner - > name , desc ) ;
return ;
}
2018-02-19 02:06:15 +01:00
/* BOLT #1:
*
* A sending node :
* . . .
* - when ` channel_id ` is 0 :
2018-06-28 04:01:21 +02:00
* - MUST fail all channels with the receiving node .
2018-02-19 02:06:15 +01:00
* - MUST close the connection .
*/
2018-08-09 02:25:29 +02:00
/* FIXME: Close if it's an all-channels error sent or rcvd */
2018-02-19 02:06:15 +01:00
/* BOLT #1:
*
* A sending node :
* - when sending ` error ` :
* - MUST fail the channel referred to by the error message .
* . . .
* The receiving node :
* - upon receiving ` error ` :
2018-06-28 04:01:21 +02:00
* - MUST fail the channel referred to by the error message ,
* if that channel is with the sending node .
2018-02-19 02:06:15 +01:00
*/
2019-08-26 20:31:25 +02:00
/* We should immediately forget the channel if we receive error during
* CHANNELD_AWAITING_LOCKIN if we are fundee . */
2019-09-09 18:11:24 +02:00
if ( ! err_for_them & & channel - > opener = = REMOTE
2019-08-26 20:31:25 +02:00
& & channel - > state = = CHANNELD_AWAITING_LOCKIN )
channel_fail_forget ( channel , " %s: %s ERROR %s " ,
channel - > owner - > name ,
err_for_them ? " sent " : " received " , desc ) ;
else
feat: adds state change cause and message
This adds a `state_change` 'cause' to a channel.
A 'cause' is some initial 'reason' a channel was created or closed by:
/* Anything other than the reasons below. Should not happen. */
REASON_UNKNOWN,
/* Unconscious internal reasons, e.g. dev fail of a channel. */
REASON_LOCAL,
/* The operator or a plugin opened or closed a channel by intention. */
REASON_USER,
/* The remote closed or funded a channel with us by intention. */
REASON_REMOTE,
/* E.g. We need to close a channel because of bad signatures and such. */
REASON_PROTOCOL,
/* A channel was closed onchain, while we were offline. */
/* Note: This is very likely a conscious remote decision. */
REASON_ONCHAIN
If a 'cause' is known and a subsequent state change is made with
`REASON_UNKNOWN` the preceding cause will be used as reason, since a lot
(all `REASON_UNKNOWN`) state changes are a subsequent consequences of a prior
cause: local, user, remote, protocol or onchain.
Changelog-Added: Plugins: Channel closure resaon/cause to channel_state_changed notification
2020-10-28 11:46:12 +01:00
channel_fail_permanent ( channel ,
err_for_them ? REASON_LOCAL : REASON_PROTOCOL ,
" %s: %s ERROR %s " ,
2019-08-26 20:31:25 +02:00
channel - > owner - > name ,
err_for_them ? " sent " : " received " , desc ) ;
2018-02-18 13:59:46 +01:00
}
2019-02-07 15:41:53 +01:00
static void json_add_htlcs ( struct lightningd * ld ,
struct json_stream * response ,
const struct channel * channel )
{
/* FIXME: make per-channel htlc maps! */
const struct htlc_in * hin ;
struct htlc_in_map_iter ini ;
const struct htlc_out * hout ;
struct htlc_out_map_iter outi ;
2020-09-17 03:58:59 +02:00
u32 local_feerate = get_feerate ( channel - > fee_states ,
2019-09-09 18:11:24 +02:00
channel - > opener , LOCAL ) ;
2019-02-07 15:41:53 +01:00
/* FIXME: Add more fields. */
json_array_start ( response , " htlcs " ) ;
for ( hin = htlc_in_map_first ( & ld - > htlcs_in , & ini ) ;
hin ;
hin = htlc_in_map_next ( & ld - > htlcs_in , & ini ) ) {
if ( hin - > key . channel ! = channel )
continue ;
json_object_start ( response , NULL ) ;
json_add_string ( response , " direction " , " in " ) ;
json_add_u64 ( response , " id " , hin - > key . id ) ;
2019-05-20 07:07:40 +02:00
json_add_amount_msat_compat ( response , hin - > msat ,
" msatoshi " , " amount_msat " ) ;
2021-06-16 03:00:17 +02:00
json_add_u32 ( response , " expiry " , hin - > cltv_expiry ) ;
2019-08-10 10:39:04 +02:00
json_add_sha256 ( response , " payment_hash " , & hin - > payment_hash ) ;
2019-02-07 15:41:53 +01:00
json_add_string ( response , " state " ,
htlc_state_name ( hin - > hstate ) ) ;
2019-05-31 09:30:32 +02:00
if ( htlc_is_trimmed ( REMOTE , hin - > msat , local_feerate ,
2020-08-13 19:44:02 +02:00
channel - > our_config . dust_limit , LOCAL ,
2021-09-09 07:29:35 +02:00
channel_has ( channel , OPT_ANCHOR_OUTPUTS ) ) )
2019-05-31 09:30:32 +02:00
json_add_bool ( response , " local_trimmed " , true ) ;
2021-06-02 18:04:01 +02:00
if ( hin - > status ! = NULL )
json_add_string ( response , " status " , hin - > status ) ;
2019-02-07 15:41:53 +01:00
json_object_end ( response ) ;
}
for ( hout = htlc_out_map_first ( & ld - > htlcs_out , & outi ) ;
hout ;
hout = htlc_out_map_next ( & ld - > htlcs_out , & outi ) ) {
if ( hout - > key . channel ! = channel )
continue ;
json_object_start ( response , NULL ) ;
json_add_string ( response , " direction " , " out " ) ;
json_add_u64 ( response , " id " , hout - > key . id ) ;
2019-05-20 07:07:40 +02:00
json_add_amount_msat_compat ( response , hout - > msat ,
" msatoshi " , " amount_msat " ) ;
2019-02-07 15:41:53 +01:00
json_add_u64 ( response , " expiry " , hout - > cltv_expiry ) ;
2019-08-10 10:39:04 +02:00
json_add_sha256 ( response , " payment_hash " , & hout - > payment_hash ) ;
2019-02-07 15:41:53 +01:00
json_add_string ( response , " state " ,
htlc_state_name ( hout - > hstate ) ) ;
2019-05-31 09:30:32 +02:00
if ( htlc_is_trimmed ( LOCAL , hout - > msat , local_feerate ,
2020-08-13 19:44:02 +02:00
channel - > our_config . dust_limit , LOCAL ,
2021-09-09 07:29:35 +02:00
channel_has ( channel , OPT_ANCHOR_OUTPUTS ) ) )
2019-05-31 09:30:32 +02:00
json_add_bool ( response , " local_trimmed " , true ) ;
2019-02-07 15:41:53 +01:00
json_object_end ( response ) ;
}
json_array_end ( response ) ;
}
2019-02-21 03:40:27 +01:00
/* We do this replication manually because it's an array. */
static void json_add_sat_only ( struct json_stream * result ,
const char * fieldname ,
struct amount_sat sat )
{
struct amount_msat msat ;
if ( amount_sat_to_msat ( & msat , sat ) )
2019-06-12 02:38:55 +02:00
json_add_string ( result , fieldname ,
2019-02-21 03:40:27 +01:00
type_to_string ( tmpctx , struct amount_msat , & msat ) ) ;
}
2020-03-21 12:46:23 +01:00
/* Fee a commitment transaction would currently cost */
static struct amount_sat commit_txfee ( const struct channel * channel ,
struct amount_msat amount ,
enum side side )
2019-05-31 09:30:32 +02:00
{
/* FIXME: make per-channel htlc maps! */
const struct htlc_in * hin ;
struct htlc_in_map_iter ini ;
const struct htlc_out * hout ;
struct htlc_out_map_iter outi ;
struct lightningd * ld = channel - > peer - > ld ;
size_t num_untrimmed_htlcs = 0 ;
2020-09-17 03:58:59 +02:00
u32 feerate = get_feerate ( channel - > fee_states ,
2019-09-09 18:11:24 +02:00
channel - > opener , side ) ;
2020-03-21 12:46:23 +01:00
struct amount_sat dust_limit ;
2020-08-13 19:47:02 +02:00
struct amount_sat fee ;
2021-09-09 07:29:35 +02:00
bool option_anchor_outputs = channel_has ( channel , OPT_ANCHOR_OUTPUTS ) ;
2020-08-13 19:47:02 +02:00
2020-03-21 12:46:23 +01:00
if ( side = = LOCAL )
dust_limit = channel - > our_config . dust_limit ;
if ( side = = REMOTE )
dust_limit = channel - > channel_info . their_config . dust_limit ;
/* Assume we tried to add "amount" */
2020-08-13 19:44:02 +02:00
if ( ! htlc_is_trimmed ( side , amount , feerate , dust_limit , side ,
2021-09-09 07:29:35 +02:00
option_anchor_outputs ) )
2020-03-08 14:39:32 +01:00
num_untrimmed_htlcs + + ;
for ( hin = htlc_in_map_first ( & ld - > htlcs_in , & ini ) ;
hin ;
hin = htlc_in_map_next ( & ld - > htlcs_in , & ini ) ) {
if ( hin - > key . channel ! = channel )
continue ;
2020-03-21 12:46:23 +01:00
if ( ! htlc_is_trimmed ( ! side , hin - > msat , feerate , dust_limit ,
2021-09-09 07:29:35 +02:00
side , option_anchor_outputs ) )
2020-03-08 14:39:32 +01:00
num_untrimmed_htlcs + + ;
}
for ( hout = htlc_out_map_first ( & ld - > htlcs_out , & outi ) ;
hout ;
hout = htlc_out_map_next ( & ld - > htlcs_out , & outi ) ) {
if ( hout - > key . channel ! = channel )
continue ;
2020-03-21 12:46:23 +01:00
if ( ! htlc_is_trimmed ( side , hout - > msat , feerate , dust_limit ,
2021-09-09 07:29:35 +02:00
side , option_anchor_outputs ) )
2020-03-08 14:39:32 +01:00
num_untrimmed_htlcs + + ;
}
/*
2021-04-06 03:21:18 +02:00
* BOLT # 2 :
2020-03-08 14:39:32 +01:00
* A sending node :
2020-08-20 08:49:27 +02:00
* . . .
* - SHOULD NOT offer ` amount_msat ` if , after adding that HTLC to its
* commitment transaction , its remaining balance doesn ' t allow it to
* pay the commitment transaction fee when receiving or sending a
* future additional non - dust HTLC while maintaining its channel
* reserve . It is recommended that this " fee spike buffer " can
* handle twice the current ` feerate_per_kw ` to ensure
* predictability between implementations .
*/
2020-08-13 19:47:02 +02:00
fee = commit_tx_base_fee ( 2 * feerate , num_untrimmed_htlcs + 1 ,
2021-09-09 07:29:35 +02:00
option_anchor_outputs ) ;
2020-08-13 19:47:02 +02:00
2021-09-09 07:29:35 +02:00
if ( option_anchor_outputs ) {
2021-04-06 03:21:18 +02:00
/* BOLT #3:
2021-09-08 02:08:14 +02:00
* If ` option_anchors ` applies to the commitment
2020-08-13 19:47:02 +02:00
* transaction , also subtract two times the fixed anchor size
* of 330 sats from the funder ( either ` to_local ` or
* ` to_remote ` ) .
*/
if ( ! amount_sat_add ( & fee , fee , AMOUNT_SAT ( 660 ) ) )
; /* fee is somehow astronomical already.... */
}
return fee ;
2020-03-08 14:39:32 +01:00
}
2019-05-31 09:30:33 +02:00
static void subtract_offered_htlcs ( const struct channel * channel ,
struct amount_msat * amount )
{
const struct htlc_out * hout ;
struct htlc_out_map_iter outi ;
struct lightningd * ld = channel - > peer - > ld ;
for ( hout = htlc_out_map_first ( & ld - > htlcs_out , & outi ) ;
hout ;
hout = htlc_out_map_next ( & ld - > htlcs_out , & outi ) ) {
if ( hout - > key . channel ! = channel )
continue ;
if ( ! amount_msat_sub ( amount , * amount , hout - > msat ) )
* amount = AMOUNT_MSAT ( 0 ) ;
}
}
2020-03-05 14:20:44 +01:00
static void subtract_received_htlcs ( const struct channel * channel ,
struct amount_msat * amount )
{
const struct htlc_in * hin ;
struct htlc_in_map_iter ini ;
struct lightningd * ld = channel - > peer - > ld ;
for ( hin = htlc_in_map_first ( & ld - > htlcs_in , & ini ) ;
hin ;
hin = htlc_in_map_next ( & ld - > htlcs_in , & ini ) ) {
if ( hin - > key . channel ! = channel )
continue ;
if ( ! amount_msat_sub ( amount , * amount , hin - > msat ) )
* amount = AMOUNT_MSAT ( 0 ) ;
}
}
2020-09-08 05:48:35 +02:00
static struct amount_msat channel_amount_spendable ( const struct channel * channel )
{
struct amount_msat spendable ;
/* Compute how much we can send via this channel in one payment. */
if ( ! amount_msat_sub_sat ( & spendable ,
channel - > our_msat ,
channel - > channel_info . their_config . channel_reserve ) )
return AMOUNT_MSAT ( 0 ) ;
/* Take away any currently-offered HTLCs. */
subtract_offered_htlcs ( channel , & spendable ) ;
/* If we're opener, subtract txfees we'll need to spend this */
if ( channel - > opener = = LOCAL ) {
if ( ! amount_msat_sub_sat ( & spendable , spendable ,
commit_txfee ( channel , spendable ,
LOCAL ) ) )
return AMOUNT_MSAT ( 0 ) ;
}
/* We can't offer an HTLC less than the other side will accept. */
if ( amount_msat_less ( spendable ,
channel - > channel_info . their_config . htlc_minimum ) )
return AMOUNT_MSAT ( 0 ) ;
/* We can't offer an HTLC over the max payment threshold either. */
if ( amount_msat_greater ( spendable , chainparams - > max_payment ) )
spendable = chainparams - > max_payment ;
return spendable ;
}
struct amount_msat channel_amount_receivable ( const struct channel * channel )
{
struct amount_msat their_msat , receivable ;
/* Compute how much we can receive via this channel in one payment */
2021-10-13 05:45:36 +02:00
if ( ! amount_sat_sub_msat ( & their_msat ,
channel - > funding_sats , channel - > our_msat ) )
2020-09-08 05:48:35 +02:00
their_msat = AMOUNT_MSAT ( 0 ) ;
if ( ! amount_msat_sub_sat ( & receivable ,
their_msat ,
channel - > our_config . channel_reserve ) )
return AMOUNT_MSAT ( 0 ) ;
/* Take away any currently-offered HTLCs. */
subtract_received_htlcs ( channel , & receivable ) ;
/* If they're opener, subtract txfees they'll need to spend this */
if ( channel - > opener = = REMOTE ) {
if ( ! amount_msat_sub_sat ( & receivable , receivable ,
commit_txfee ( channel ,
receivable , REMOTE ) ) )
return AMOUNT_MSAT ( 0 ) ;
}
/* They can't offer an HTLC less than what we will accept. */
if ( amount_msat_less ( receivable , channel - > our_config . htlc_minimum ) )
return AMOUNT_MSAT ( 0 ) ;
/* They can't offer an HTLC over the max payment threshold either. */
if ( amount_msat_greater ( receivable , chainparams - > max_payment ) )
receivable = chainparams - > max_payment ;
return receivable ;
}
2019-02-07 15:41:53 +01:00
static void json_add_channel ( struct lightningd * ld ,
struct json_stream * response , const char * key ,
const struct channel * channel )
{
struct channel_stats channel_stats ;
2020-09-16 23:28:36 +02:00
struct amount_msat funding_msat , peer_msats , our_msats ;
struct amount_sat peer_funded_sats ;
2019-02-07 15:41:53 +01:00
struct peer * p = channel - > peer ;
2020-10-28 11:46:23 +01:00
struct state_change_entry * state_changes ;
2021-03-03 04:16:57 +01:00
u32 feerate ;
2019-02-07 15:41:53 +01:00
json_object_start ( response , key ) ;
json_add_string ( response , " state " , channel_state_name ( channel ) ) ;
2019-06-27 01:57:49 +02:00
if ( channel - > last_tx & & ! invalid_last_tx ( channel - > last_tx ) ) {
2019-02-07 15:41:53 +01:00
struct bitcoin_txid txid ;
bitcoin_txid ( channel - > last_tx , & txid ) ;
json_add_txid ( response , " scratch_txid " , & txid ) ;
2021-06-16 03:00:17 +02:00
if ( deprecated_apis )
json_add_amount_sat_only ( response , " last_tx_fee " ,
bitcoin_tx_compute_fee ( channel - > last_tx ) ) ;
json_add_amount_sat_only ( response , " last_tx_fee_msat " ,
2021-03-03 04:16:57 +01:00
bitcoin_tx_compute_fee ( channel - > last_tx ) ) ;
2019-02-07 15:41:53 +01:00
}
2021-03-03 04:16:57 +01:00
json_object_start ( response , " feerate " ) ;
feerate = get_feerate ( channel - > fee_states , channel - > opener , LOCAL ) ;
json_add_u32 ( response , feerate_style_name ( FEERATE_PER_KSIPA ) , feerate ) ;
json_add_u32 ( response , feerate_style_name ( FEERATE_PER_KBYTE ) ,
feerate_to_style ( feerate , FEERATE_PER_KBYTE ) ) ;
json_object_end ( response ) ;
2019-02-07 15:41:53 +01:00
if ( channel - > owner )
json_add_string ( response , " owner " , channel - > owner - > name ) ;
if ( channel - > scid ) {
json_add_short_channel_id ( response , " short_channel_id " ,
channel - > scid ) ;
json_add_num ( response , " direction " ,
2019-04-08 11:58:32 +02:00
node_id_idx ( & ld - > id , & channel - > peer - > id ) ) ;
2019-02-07 15:41:53 +01:00
}
json_add_string ( response , " channel_id " ,
2020-09-09 09:20:53 +02:00
type_to_string ( tmpctx , struct channel_id , & channel - > cid ) ) ;
2021-10-13 05:45:36 +02:00
json_add_txid ( response , " funding_txid " , & channel - > funding . txid ) ;
2022-01-13 23:35:29 +01:00
json_add_num ( response , " funding_outnum " , channel - > funding . n ) ;
2019-10-29 18:20:34 +01:00
2021-05-20 02:13:18 +02:00
if ( ! list_empty ( & channel - > inflights ) ) {
2021-05-19 23:51:05 +02:00
struct channel_inflight * initial , * inflight ;
2021-07-09 21:13:20 +02:00
u32 last_feerate , next_feerate ;
2021-02-24 03:58:28 +01:00
initial = list_top ( & channel - > inflights ,
struct channel_inflight , list ) ;
json_add_string ( response , " initial_feerate " ,
2021-07-09 21:13:20 +02:00
tal_fmt ( tmpctx , " %d%s " ,
initial - > funding - > feerate ,
2021-02-24 03:58:28 +01:00
feerate_style_name ( FEERATE_PER_KSIPA ) ) ) ;
2021-07-09 21:13:20 +02:00
last_feerate = channel_last_funding_feerate ( channel ) ;
assert ( last_feerate > 0 ) ;
2021-02-24 03:58:28 +01:00
json_add_string ( response , " last_feerate " ,
tal_fmt ( tmpctx , " %d%s " , last_feerate ,
feerate_style_name ( FEERATE_PER_KSIPA ) ) ) ;
2021-07-09 21:13:20 +02:00
/* BOLT-9e7723387c8859b511e178485605a0b9133b9869 #2:
* - MUST set ` funding_feerate_perkw ` greater than or equal to
* 65 / 64 times the last sent ` funding_feerate_perkw `
* rounded down .
*/
next_feerate = last_feerate * 65 / 64 ;
assert ( next_feerate > last_feerate ) ;
2021-02-24 03:58:28 +01:00
json_add_string ( response , " next_feerate " ,
tal_fmt ( tmpctx , " %d%s " , next_feerate ,
feerate_style_name ( FEERATE_PER_KSIPA ) ) ) ;
2021-05-19 23:51:05 +02:00
/* List the inflights */
json_array_start ( response , " inflight " ) ;
list_for_each ( & channel - > inflights , inflight , list ) {
2021-05-20 02:11:03 +02:00
struct bitcoin_txid txid ;
2021-05-19 23:51:05 +02:00
json_object_start ( response , NULL ) ;
json_add_txid ( response , " funding_txid " ,
2021-10-13 05:45:36 +02:00
& inflight - > funding - > outpoint . txid ) ;
2021-05-19 23:51:05 +02:00
json_add_num ( response , " funding_outnum " ,
2021-10-13 05:45:36 +02:00
inflight - > funding - > outpoint . n ) ;
2021-05-19 23:51:05 +02:00
json_add_string ( response , " feerate " ,
tal_fmt ( tmpctx , " %d%s " ,
inflight - > funding - > feerate ,
feerate_style_name (
FEERATE_PER_KSIPA ) ) ) ;
json_add_amount_sat_only ( response ,
" total_funding_msat " ,
inflight - > funding - > total_funds ) ;
json_add_amount_sat_only ( response ,
" our_funding_msat " ,
inflight - > funding - > our_funds ) ;
2021-05-20 02:11:03 +02:00
/* Add the expected commitment tx id also */
bitcoin_txid ( inflight - > last_tx , & txid ) ;
json_add_txid ( response , " scratch_txid " , & txid ) ;
2021-05-19 23:51:05 +02:00
json_object_end ( response ) ;
}
json_array_end ( response ) ;
2021-02-24 03:58:28 +01:00
}
2019-10-29 18:20:34 +01:00
if ( channel - > shutdown_scriptpubkey [ LOCAL ] ) {
char * addr = encode_scriptpubkey_to_addr ( tmpctx ,
2019-10-15 12:58:30 +02:00
chainparams ,
2019-10-29 18:20:34 +01:00
channel - > shutdown_scriptpubkey [ LOCAL ] ) ;
if ( addr )
json_add_string ( response , " close_to_addr " , addr ) ;
json_add_hex_talarr ( response , " close_to " ,
channel - > shutdown_scriptpubkey [ LOCAL ] ) ;
}
2019-02-07 15:41:53 +01:00
json_add_bool (
response , " private " ,
! ( channel - > channel_flags & CHANNEL_FLAGS_ANNOUNCE_CHANNEL ) ) ;
2020-10-28 11:46:18 +01:00
/* opener and closer */
assert ( channel - > opener ! = NUM_SIDES ) ;
json_add_string ( response , " opener " , channel - > opener = = LOCAL ?
" local " : " remote " ) ;
if ( channel - > closer ! = NUM_SIDES )
json_add_string ( response , " closer " , channel - > closer = = LOCAL ?
" local " : " remote " ) ;
2021-06-16 03:00:17 +02:00
else if ( deprecated_apis )
2020-10-28 11:46:18 +01:00
json_add_null ( response , " closer " ) ;
2020-08-21 03:57:20 +02:00
json_array_start ( response , " features " ) ;
2021-09-09 07:29:35 +02:00
if ( channel_has ( channel , OPT_STATIC_REMOTEKEY ) )
2020-08-21 03:57:20 +02:00
json_add_string ( response , NULL , " option_static_remotekey " ) ;
2021-09-09 07:29:35 +02:00
if ( channel_has ( channel , OPT_ANCHOR_OUTPUTS ) )
2020-08-21 03:57:20 +02:00
json_add_string ( response , NULL , " option_anchor_outputs " ) ;
json_array_end ( response ) ;
2021-10-13 05:45:36 +02:00
if ( ! amount_sat_sub ( & peer_funded_sats , channel - > funding_sats ,
2020-09-16 23:28:36 +02:00
channel - > our_funds ) ) {
log_broken ( channel - > log ,
" Overflow subtracing funding %s, our funds %s " ,
type_to_string ( tmpctx , struct amount_sat ,
2021-10-13 05:45:36 +02:00
& channel - > funding_sats ) ,
2020-09-16 23:28:36 +02:00
type_to_string ( tmpctx , struct amount_sat ,
& channel - > our_funds ) ) ;
peer_funded_sats = AMOUNT_SAT ( 0 ) ;
}
if ( ! amount_sat_to_msat ( & peer_msats , peer_funded_sats ) ) {
log_broken ( channel - > log ,
" Overflow converting peer sats %s to msat " ,
type_to_string ( tmpctx , struct amount_sat ,
& peer_funded_sats ) ) ;
peer_msats = AMOUNT_MSAT ( 0 ) ;
2019-02-07 15:41:53 +01:00
}
2020-09-16 23:28:36 +02:00
if ( ! amount_sat_to_msat ( & our_msats , channel - > our_funds ) ) {
log_broken ( channel - > log ,
" Overflow converting peer sats %s to msat " ,
type_to_string ( tmpctx , struct amount_sat ,
& channel - > our_funds ) ) ;
our_msats = AMOUNT_MSAT ( 0 ) ;
}
2021-06-16 03:00:17 +02:00
if ( deprecated_apis ) {
json_object_start ( response , " funding_allocation_msat " ) ;
json_add_u64 ( response , node_id_to_hexstr ( tmpctx , & p - > id ) ,
peer_msats . millisatoshis ) ; /* Raw: JSON field */
json_add_u64 ( response , node_id_to_hexstr ( tmpctx , & ld - > id ) ,
our_msats . millisatoshis ) ; /* Raw: JSON field */
json_object_end ( response ) ;
json_object_start ( response , " funding_msat " ) ;
json_add_sat_only ( response ,
node_id_to_hexstr ( tmpctx , & p - > id ) ,
peer_funded_sats ) ;
json_add_sat_only ( response ,
node_id_to_hexstr ( tmpctx , & ld - > id ) ,
channel - > our_funds ) ;
json_object_end ( response ) ;
}
2019-02-07 15:41:53 +01:00
2021-06-16 03:00:17 +02:00
json_object_start ( response , " funding " ) ;
json_add_sat_only ( response , " local_msat " , channel - > our_funds ) ;
json_add_sat_only ( response , " remote_msat " , peer_funded_sats ) ;
2022-02-08 21:43:51 +01:00
json_add_amount_msat_only ( response , " pushed_msat " , channel - > push ) ;
2019-02-21 03:40:27 +01:00
json_object_end ( response ) ;
2021-10-13 05:45:36 +02:00
if ( ! amount_sat_to_msat ( & funding_msat , channel - > funding_sats ) ) {
2019-02-21 04:45:55 +01:00
log_broken ( channel - > log ,
" Overflow converting funding %s " ,
type_to_string ( tmpctx , struct amount_sat ,
2021-10-13 05:45:36 +02:00
& channel - > funding_sats ) ) ;
2019-02-21 04:45:55 +01:00
funding_msat = AMOUNT_MSAT ( 0 ) ;
}
2019-05-20 07:07:40 +02:00
json_add_amount_msat_compat ( response , channel - > our_msat ,
" msatoshi_to_us " , " to_us_msat " ) ;
json_add_amount_msat_compat ( response , channel - > msat_to_us_min ,
" msatoshi_to_us_min " , " min_to_us_msat " ) ;
json_add_amount_msat_compat ( response , channel - > msat_to_us_max ,
" msatoshi_to_us_max " , " max_to_us_msat " ) ;
json_add_amount_msat_compat ( response , funding_msat ,
" msatoshi_total " , " total_msat " ) ;
2019-02-07 15:41:53 +01:00
2020-12-02 15:17:06 +01:00
/* routing fees */
json_add_amount_msat_only ( response , " fee_base_msat " ,
amount_msat ( channel - > feerate_base ) ) ;
json_add_u32 ( response , " fee_proportional_millionths " ,
channel - > feerate_ppm ) ;
2019-02-07 15:41:53 +01:00
/* channel config */
2019-05-20 07:07:40 +02:00
json_add_amount_sat_compat ( response ,
channel - > our_config . dust_limit ,
" dust_limit_satoshis " , " dust_limit_msat " ) ;
json_add_amount_msat_compat ( response ,
channel - > our_config . max_htlc_value_in_flight ,
" max_htlc_value_in_flight_msat " ,
" max_total_htlc_in_msat " ) ;
2019-02-07 15:41:53 +01:00
/* The `channel_reserve_satoshis` is imposed on
* the * other * side ( see ` channel_reserve_msat `
* function in , it uses ` ! side ` to flip sides ) .
* So our configuration ` channel_reserve_satoshis `
* is imposed on their side , while their
* configuration ` channel_reserve_satoshis ` is
* imposed on ours . */
2019-05-20 07:07:40 +02:00
json_add_amount_sat_compat ( response ,
channel - > our_config . channel_reserve ,
" their_channel_reserve_satoshis " ,
" their_reserve_msat " ) ;
json_add_amount_sat_compat ( response ,
channel - > channel_info . their_config . channel_reserve ,
" our_channel_reserve_satoshis " ,
" our_reserve_msat " ) ;
2020-03-05 14:20:44 +01:00
/* append spendable to JSON output */
2020-09-08 05:48:35 +02:00
json_add_amount_msat_compat ( response ,
channel_amount_spendable ( channel ) ,
2019-05-20 07:07:40 +02:00
" spendable_msatoshi " , " spendable_msat " ) ;
2020-03-05 14:20:44 +01:00
/* append receivable to JSON output */
2020-09-08 05:48:35 +02:00
json_add_amount_msat_compat ( response ,
channel_amount_receivable ( channel ) ,
2020-03-05 14:20:44 +01:00
" receivable_msatoshi " , " receivable_msat " ) ;
2019-05-20 07:07:40 +02:00
json_add_amount_msat_compat ( response ,
channel - > our_config . htlc_minimum ,
" htlc_minimum_msat " ,
" minimum_htlc_in_msat " ) ;
2022-03-21 01:58:54 +01:00
json_add_amount_msat_only ( response ,
" minimum_htlc_out_msat " ,
channel - > htlc_minimum_msat ) ;
2022-03-21 01:58:27 +01:00
json_add_amount_msat_only ( response ,
" maximum_htlc_out_msat " ,
channel - > htlc_maximum_msat ) ;
2019-02-07 15:41:53 +01:00
/* The `to_self_delay` is imposed on the *other*
* side , so our configuration ` to_self_delay ` is
* imposed on their side , while their configuration
* ` to_self_delay ` is imposed on ours . */
json_add_num ( response , " their_to_self_delay " ,
channel - > our_config . to_self_delay ) ;
json_add_num ( response , " our_to_self_delay " ,
channel - > channel_info . their_config . to_self_delay ) ;
json_add_num ( response , " max_accepted_htlcs " ,
channel - > our_config . max_accepted_htlcs ) ;
peer_control: fix leak false positive.
We generally hang things off our JSON response (this pattern predates
tmpctx!) but sometimes it gets reported as a memleak. I'd prefer not
to mark JSON responses as "notleak", since they can be allocated for
a while), so use tmpctx here.
```
E ValueError:
E Node errors:
E Global errors:
E - Node /tmp/ltests-spnausnb/test_htlc_out_timeout_1/lightning-1/ has memory leaks: [
E {
E "backtrace": [
E "ccan/ccan/tal/tal.c:442 (tal_alloc_)",
E "ccan/ccan/tal/tal.c:471 (tal_alloc_arr_)",
E "wallet/wallet.c:1775 (wallet_state_change_get)",
E "lightningd/peer_control.c:922 (json_add_channel)",
E "lightningd/peer_control.c:1424 (json_add_peer)",
E "lightningd/peer_control.c:1454 (json_listpeers)",
E "lightningd/jsonrpc.c:643 (command_exec)",
E "lightningd/jsonrpc.c:767 (rpc_command_hook_final)",
E "lightningd/plugin_hook.c:275 (plugin_hook_call_)",
E "lightningd/jsonrpc.c:855 (plugin_hook_call_rpc_command)",
E "lightningd/jsonrpc.c:942 (parse_request)",
E "lightningd/jsonrpc.c:1033 (read_json)",
E "ccan/ccan/io/io.c:59 (next_plan)",
E "ccan/ccan/io/io.c:435 (io_do_always)",
E "ccan/ccan/io/poll.c:300 (handle_always)",
E "ccan/ccan/io/poll.c:377 (io_loop)",
E "lightningd/io_loop_with_timers.c:24 (io_loop_with_timers)",
E "lightningd/lightningd.c:1097 (main)"
E ],
E "label": "wallet/wallet.c:1775:struct state_change_entry[]",
E "parents": [
E "common/json_stream.c:29:struct json_stream",
E "ccan/ccan/io/io.c:91:struct io_conn",
E "lightningd/lightningd.c:116:struct lightningd"
E ],
E "value": "0x55c6b02150b8"
E }
E ]
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2021-04-08 03:01:35 +02:00
state_changes = wallet_state_change_get ( ld - > wallet , tmpctx , channel - > dbid ) ;
2020-10-28 11:46:23 +01:00
json_array_start ( response , " state_changes " ) ;
for ( size_t i = 0 ; i < tal_count ( state_changes ) ; i + + ) {
json_object_start ( response , NULL ) ;
json_add_timeiso ( response , " timestamp " ,
& state_changes [ i ] . timestamp ) ;
json_add_string ( response , " old_state " ,
channel_state_str ( state_changes [ i ] . old_state ) ) ;
json_add_string ( response , " new_state " ,
channel_state_str ( state_changes [ i ] . new_state ) ) ;
json_add_string ( response , " cause " ,
channel_change_state_reason_str ( state_changes [ i ] . cause ) ) ;
json_add_string ( response , " message " , state_changes [ i ] . message ) ;
json_object_end ( response ) ;
}
json_array_end ( response ) ;
2019-02-07 15:41:53 +01:00
json_array_start ( response , " status " ) ;
for ( size_t i = 0 ; i < ARRAY_SIZE ( channel - > billboard . permanent ) ; i + + ) {
if ( ! channel - > billboard . permanent [ i ] )
continue ;
json_add_string ( response , NULL ,
channel - > billboard . permanent [ i ] ) ;
}
if ( channel - > billboard . transient )
json_add_string ( response , NULL , channel - > billboard . transient ) ;
json_array_end ( response ) ;
/* Provide channel statistics */
wallet_channel_stats_load ( ld - > wallet , channel - > dbid , & channel_stats ) ;
json_add_u64 ( response , " in_payments_offered " ,
channel_stats . in_payments_offered ) ;
2019-05-20 07:07:40 +02:00
json_add_amount_msat_compat ( response ,
channel_stats . in_msatoshi_offered ,
" in_msatoshi_offered " ,
" in_offered_msat " ) ;
2019-02-07 15:41:53 +01:00
json_add_u64 ( response , " in_payments_fulfilled " ,
channel_stats . in_payments_fulfilled ) ;
2019-05-20 07:07:40 +02:00
json_add_amount_msat_compat ( response ,
channel_stats . in_msatoshi_fulfilled ,
" in_msatoshi_fulfilled " ,
" in_fulfilled_msat " ) ;
2019-02-07 15:41:53 +01:00
json_add_u64 ( response , " out_payments_offered " ,
channel_stats . out_payments_offered ) ;
2019-05-20 07:07:40 +02:00
json_add_amount_msat_compat ( response ,
channel_stats . out_msatoshi_offered ,
" out_msatoshi_offered " ,
" out_offered_msat " ) ;
2019-02-07 15:41:53 +01:00
json_add_u64 ( response , " out_payments_fulfilled " ,
channel_stats . out_payments_fulfilled ) ;
2019-05-20 07:07:40 +02:00
json_add_amount_msat_compat ( response ,
channel_stats . out_msatoshi_fulfilled ,
" out_msatoshi_fulfilled " ,
" out_fulfilled_msat " ) ;
2019-02-07 15:41:53 +01:00
json_add_htlcs ( ld , response , channel ) ;
json_object_end ( response ) ;
}
2021-01-26 19:41:41 +01:00
struct peer_connected_hook_payload {
struct lightningd * ld ;
struct channel * channel ;
struct wireaddr_internal addr ;
2021-10-12 13:16:37 +02:00
struct wireaddr * remote_addr ;
2021-03-25 04:53:31 +01:00
bool incoming ;
2021-01-26 19:41:41 +01:00
struct peer * peer ;
u8 * error ;
} ;
2019-01-19 15:56:05 +01:00
static void
peer_connected_serialize ( struct peer_connected_hook_payload * payload ,
2021-06-02 17:33:23 +02:00
struct json_stream * stream , struct plugin * plugin )
2019-01-19 15:56:05 +01:00
{
2019-02-07 15:46:15 +01:00
const struct peer * p = payload - > peer ;
json_object_start ( stream , " peer " ) ;
2019-04-08 11:58:32 +02:00
json_add_node_id ( stream , " id " , & p - > id ) ;
2021-03-25 04:53:31 +01:00
json_add_string ( stream , " direction " , payload - > incoming ? " in " : " out " ) ;
2019-02-07 15:46:15 +01:00
json_add_string (
stream , " addr " ,
type_to_string ( stream , struct wireaddr_internal , & payload - > addr ) ) ;
2021-10-12 13:16:37 +02:00
if ( payload - > remote_addr )
json_add_string (
stream , " remote_addr " ,
type_to_string ( stream , struct wireaddr , payload - > remote_addr ) ) ;
2020-04-03 02:03:59 +02:00
json_add_hex_talarr ( stream , " features " , p - > their_features ) ;
2019-02-07 15:46:15 +01:00
json_object_end ( stream ) ; /* .peer */
2019-01-19 15:56:05 +01:00
}
2018-10-26 08:01:26 +02:00
2021-01-25 14:47:47 +01:00
static void peer_connected_hook_final ( struct peer_connected_hook_payload * payload STEALS )
2019-01-19 15:56:05 +01:00
{
struct lightningd * ld = payload - > ld ;
struct channel * channel = payload - > channel ;
struct wireaddr_internal addr = payload - > addr ;
struct peer * peer = payload - > peer ;
u8 * error ;
2018-02-19 02:06:02 +01:00
2021-01-14 01:06:50 +01:00
/* Whatever happens, we free payload (it's currently a child
* of the peer , which may be freed if we fail to start
* subd ) . */
tal_steal ( tmpctx , payload ) ;
2022-03-22 09:53:13 +01:00
/* Notify anyone who cares */
notify_connect ( ld , & peer - > id , payload - > incoming , & addr ) ;
2021-01-25 14:47:47 +01:00
/* Check for specific errors of a hook */
if ( payload - > error ) {
error = payload - > error ;
goto send_error ;
2019-02-07 16:53:15 +01:00
}
2018-02-12 11:13:04 +01:00
if ( channel ) {
2018-02-12 11:13:04 +01:00
log_debug ( channel - > log , " Peer has reconnected, state %s " ,
2018-02-12 11:12:55 +01:00
channel_state_name ( channel ) ) ;
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
2018-02-12 11:13:04 +01:00
/* If we have a canned error, deliver it now. */
2018-02-12 11:12:55 +01:00
if ( channel - > error ) {
error = channel - > error ;
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
goto send_error ;
}
2017-05-23 14:17:19 +02:00
2017-10-28 04:19:10 +02:00
# if DEVELOPER
if ( dev_disconnect_permanent ( ld ) ) {
2021-01-26 19:41:41 +01:00
channel_fail_permanent ( channel , REASON_LOCAL ,
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
" dev_disconnect permfail " ) ;
2018-02-12 11:12:55 +01:00
error = channel - > error ;
2017-10-28 04:19:10 +02:00
goto send_error ;
openingd: take peer before we're opening, wait for explicit funding msg.
Prior to this, lightningd would hand uninteresting peers back to connectd,
which would then return it to lightningd if it sent a non-gossip msg,
or if lightningd asked it to release the peer.
Now connectd hands the peer to lightningd once we've done the init
handshake, which hands it off to openingd.
This is a deep structural change, so we do the minimum here and cleanup
in the following patches.
Lightningd:
1. Remove peer_nongossip handling from connect_control and peer_control.
2. Remove list of outstanding fundchannel command; it was only needed to
find the race between us asking connectd to release the peer and it
reconnecting.
3. We can no longer tell if the remote end has started trying to fund a
channel (until it has succeeded): it's very transitory anyway so not
worth fixing.
4. We now always have a struct peer, and allocate an uncommitted_channel
for it, though it may never be used if neither end funds a channel.
5. We start funding on messages for openingd: we can get a funder_reply
or a fundee, or an error in response to our request to fund a channel.
so we handle all of them.
6. A new peer_start_openingd() is called after connectd hands us a peer.
7. json_fund_channel just looks through local peers; there are none
hidden in connectd any more.
8. We sometimes start a new openingd just to send an error message.
Openingd:
1. We always have information we need to accept them funding a channel (in
the init message).
2. We have to listen for three fds: peer, gossip and master, so we opencode
the poll.
3. We have an explicit message to start trying to fund a channel.
4. We can be told to send a message in our init message.
Testing:
1. We don't handle some things gracefully yet, so two tests are disabled.
2. 'hand_back_peer .*: now local again' from connectd is no longer a message,
openingd says 'Handed peer, entering loop' once its managing it.
3. peer['state'] used to be set to 'GOSSIPING' (otherwise this field doesn't
exist; 'state' is now per-channel. It doesn't exist at all now.
4. Some tests now need to turn on IO logging in openingd, not connectd.
5. There's a gap between connecting on one node and having connectd on
the peer hand over the connection to openingd. Our tests sometimes
checked getpeers() on the peer, and didn't see anything, so line_graph
needed updating.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-08-08 16:10:58 +02:00
}
2017-10-28 04:19:10 +02:00
# endif
2018-02-12 11:12:55 +01:00
switch ( channel - > state ) {
2018-02-23 07:23:51 +01:00
case ONCHAIN :
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
case FUNDING_SPEND_SEEN :
2018-02-12 11:13:04 +01:00
case CLOSINGD_COMPLETE :
2019-06-22 14:49:30 +02:00
/* Channel is supposed to be active!*/
abort ( ) ;
case CLOSED :
/* Channel should not have been loaded */
2018-02-12 11:13:04 +01:00
abort ( ) ;
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
2018-08-23 03:08:48 +02:00
/* We consider this "active" but we only send an error */
case AWAITING_UNILATERAL : {
/* channel->error is not saved in db, so this can
* happen if we restart . */
2020-09-09 09:20:53 +02:00
error = towire_errorfmt ( tmpctx , & channel - > cid ,
2018-08-23 03:08:48 +02:00
" Awaiting unilateral close " ) ;
goto send_error ;
}
2020-11-24 02:43:02 +01:00
case DUALOPEND_OPEN_INIT :
case DUALOPEND_AWAITING_LOCKIN :
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
case CHANNELD_AWAITING_LOCKIN :
case CHANNELD_NORMAL :
case CHANNELD_SHUTTING_DOWN :
case CLOSINGD_SIGEXCHANGE :
2018-04-26 06:51:01 +02:00
assert ( ! channel - > owner ) ;
2018-02-12 11:13:04 +01:00
channel - > peer - > addr = addr ;
2021-03-25 05:13:12 +01:00
channel - > peer - > connected_incoming = payload - > incoming ;
2022-03-22 21:27:29 +01:00
goto make_active ;
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
}
abort ( ) ;
}
2017-05-22 13:26:49 +02:00
2022-03-22 09:53:13 +01:00
/* If we get here, it means we have no channel */
assert ( ! channel ) ;
2021-06-03 05:22:21 +02:00
return ;
send_error :
2022-03-22 21:27:29 +01:00
log_peer_debug ( ld - > log , & peer - > id , " Telling connectd to send error %s " ,
tal_hex ( tmpctx , error ) ) ;
2021-06-03 05:22:21 +02:00
/* Get connectd to send error and close. */
subd_send_msg ( ld - > connectd ,
take ( towire_connectd_peer_final_msg ( NULL , & peer - > id ,
2022-01-08 14:29:29 +01:00
error ) ) ) ;
2022-03-22 21:27:29 +01:00
return ;
make_active :
log_peer_debug ( ld - > log , & peer - > id ,
" Telling connectd to make active, state %s " ,
channel_state_name ( channel ) ) ;
subd_send_msg ( ld - > connectd ,
take ( towire_connectd_peer_make_active ( NULL , & peer - > id ,
& channel - > cid ) ) ) ;
2019-01-19 15:56:05 +01:00
}
2021-01-25 14:47:47 +01:00
static bool
peer_connected_hook_deserialize ( struct peer_connected_hook_payload * payload ,
const char * buffer ,
const jsmntok_t * toks )
{
struct lightningd * ld = payload - > ld ;
/* already rejected by prior plugin hook in the chain */
if ( payload - > error ! = NULL )
return true ;
if ( ! toks | | ! buffer )
return true ;
/* If we had a hook, interpret result. */
const jsmntok_t * t_res = json_get_member ( buffer , toks , " result " ) ;
const jsmntok_t * t_err = json_get_member ( buffer , toks , " error_message " ) ;
/* fail */
if ( ! t_res )
fatal ( " Plugin returned an invalid response to the "
" peer_connected hook: %s " , buffer ) ;
/* reject */
if ( json_tok_streq ( buffer , t_res , " disconnect " ) ) {
payload - > error = ( u8 * ) " " ;
if ( t_err ) {
2021-02-03 03:51:41 +01:00
payload - > error = towire_warningfmt ( tmpctx , NULL , " %.*s " ,
t_err - > end - t_err - > start ,
buffer + t_err - > start ) ;
2021-01-25 14:47:47 +01:00
}
log_debug ( ld - > log , " peer_connected hook rejects and says '%s' " ,
payload - > error ) ;
/* At this point we suppress other plugins in the chain and
* directly move to final */
peer_connected_hook_final ( payload ) ;
return false ;
} else if ( ! json_tok_streq ( buffer , t_res , " continue " ) )
fatal ( " Plugin returned an invalid response to the "
" peer_connected hook: %s " , buffer ) ;
/* call next hook */
return true ;
}
2022-02-23 18:40:10 +01:00
/* Compare and store `remote_addr` and the `peer_id` that reported it.
* If new address was reported by at least one other , do node_announcement */
static void update_remote_addr ( struct lightningd * ld ,
const struct wireaddr * remote_addr ,
const struct node_id peer_id )
{
2022-03-09 14:28:20 +01:00
/* failsafe to prevent privacy leakage. */
if ( ld - > always_use_proxy )
return ;
2022-02-23 18:40:10 +01:00
switch ( remote_addr - > type ) {
case ADDR_TYPE_IPV4 :
/* init pointers first time */
if ( ld - > remote_addr_v4 = = NULL ) {
ld - > remote_addr_v4 = tal_dup ( ld , struct wireaddr ,
remote_addr ) ;
ld - > remote_addr_v4_peer = peer_id ;
}
/* if updated by the same peer just remember the latest addr */
if ( node_id_eq ( & ld - > remote_addr_v4_peer , & peer_id ) ) {
* ld - > remote_addr_v4 = * remote_addr ;
break ;
}
2022-02-23 19:11:03 +01:00
/* tell gossip we have a valid update */
if ( wireaddr_eq_without_port ( ld - > remote_addr_v4 , remote_addr ) )
subd_send_msg ( ld - > gossip , towire_gossipd_remote_addr (
tmpctx ,
ld - > remote_addr_v4 ) ) ;
2022-02-23 18:40:10 +01:00
/* store latest values */
* ld - > remote_addr_v4 = * remote_addr ;
ld - > remote_addr_v4_peer = peer_id ;
break ;
case ADDR_TYPE_IPV6 :
/* same code :s/4/6/ without the comments ;) */
if ( ld - > remote_addr_v6 = = NULL ) {
ld - > remote_addr_v6 = tal_dup ( ld , struct wireaddr ,
remote_addr ) ;
ld - > remote_addr_v6_peer = peer_id ;
}
if ( node_id_eq ( & ld - > remote_addr_v6_peer , & peer_id ) ) {
* ld - > remote_addr_v6 = * remote_addr ;
break ;
}
2022-02-23 19:11:03 +01:00
if ( wireaddr_eq_without_port ( ld - > remote_addr_v6 , remote_addr ) )
subd_send_msg ( ld - > gossip , towire_gossipd_remote_addr (
tmpctx ,
ld - > remote_addr_v6 ) ) ;
2022-02-23 18:40:10 +01:00
* ld - > remote_addr_v6 = * remote_addr ;
ld - > remote_addr_v6_peer = peer_id ;
break ;
/* ignore all other cases */
case ADDR_TYPE_TOR_V2_REMOVED :
case ADDR_TYPE_TOR_V3 :
case ADDR_TYPE_DNS :
case ADDR_TYPE_WEBSOCKET :
break ;
}
}
2021-01-25 14:47:47 +01:00
REGISTER_PLUGIN_HOOK ( peer_connected ,
peer_connected_hook_deserialize ,
peer_connected_hook_final ,
peer_connected_serialize ,
struct peer_connected_hook_payload * ) ;
2019-01-19 15:56:05 +01:00
/* Connectd tells us a peer has connected: it never hands us duplicates, since
2022-03-22 21:27:29 +01:00
* it holds them until we say peer_disconnected . */
void peer_connected ( struct lightningd * ld , const u8 * msg )
2019-01-19 15:56:05 +01:00
{
2019-04-08 11:58:32 +02:00
struct node_id id ;
2020-04-03 02:03:59 +02:00
u8 * their_features ;
2019-01-19 15:56:05 +01:00
struct peer * peer ;
struct peer_connected_hook_payload * hook_payload ;
hook_payload = tal ( NULL , struct peer_connected_hook_payload ) ;
hook_payload - > ld = ld ;
2021-01-25 14:47:47 +01:00
hook_payload - > error = NULL ;
2020-08-25 04:16:22 +02:00
if ( ! fromwire_connectd_peer_connected ( hook_payload , msg ,
2021-03-25 04:53:31 +01:00
& id , & hook_payload - > addr ,
2021-10-12 13:16:37 +02:00
& hook_payload - > remote_addr ,
2021-03-25 04:53:31 +01:00
& hook_payload - > incoming ,
& their_features ) )
2019-01-19 15:56:05 +01:00
fatal ( " Connectd gave bad CONNECT_PEER_CONNECTED message %s " ,
tal_hex ( msg , msg ) ) ;
/* If we're already dealing with this peer, hand off to correct
* subdaemon . Otherwise , we ' ll hand to openingd to wait there . */
peer = peer_by_id ( ld , & id ) ;
if ( ! peer )
2021-03-25 05:13:12 +01:00
peer = new_peer ( ld , 0 , & id , & hook_payload - > addr ,
hook_payload - > incoming ) ;
2022-03-22 21:26:30 +01:00
peer - > is_connected = true ;
2019-01-19 15:56:05 +01:00
tal_steal ( peer , hook_payload ) ;
hook_payload - > peer = peer ;
2020-04-03 02:03:59 +02:00
peer_update_features ( peer , their_features ) ;
2019-01-19 15:56:05 +01:00
2020-04-02 06:07:47 +02:00
/* Complete any outstanding connect commands. */
2021-03-25 04:53:31 +01:00
connect_succeeded ( ld , peer , hook_payload - > incoming , & hook_payload - > addr ) ;
2020-04-02 06:07:47 +02:00
2019-01-19 15:56:05 +01:00
/* Can't be opening, since we wouldn't have sent peer_disconnected. */
assert ( ! peer - > uncommitted_channel ) ;
hook_payload - > channel = peer_active_channel ( peer ) ;
2021-01-22 01:55:23 +01:00
/* It might be v2 opening, though, since we hang onto these */
if ( ! hook_payload - > channel )
hook_payload - > channel = peer_unsaved_channel ( peer ) ;
2022-02-23 18:40:10 +01:00
/* Log and update remote_addr for Nat/IP discovery. */
if ( hook_payload - > remote_addr ) {
2021-10-12 13:16:37 +02:00
log_info ( ld - > log , " Peer says it sees our address as: %s " ,
2022-02-26 01:50:33 +01:00
fmt_wireaddr ( tmpctx , hook_payload - > remote_addr ) ) ;
2022-02-23 18:40:10 +01:00
/* Currently only from peers we have a channel with, until we
* do stuff like probing for remote_addr to a random node . */
if ( hook_payload - > channel )
update_remote_addr ( ld , hook_payload - > remote_addr , id ) ;
2021-10-12 13:16:37 +02:00
}
2020-04-15 12:20:41 +02:00
plugin_hook_call_peer_connected ( ld , hook_payload ) ;
gossipd: rewrite to do the handshake internally.
Now the flow is much simpler from a lightningd POV:
1. If we want to connect to a peer, just send gossipd `gossipctl_reach_peer`.
2. Every new peer, gossipd hands up to lightningd, with global/local features
and the peer fd and a gossip fd using `gossip_peer_connected`
3. If lightningd doesn't want it, it just hands the peerfd and global/local
features back to gossipd using `gossipctl_handle_peer`
4. If a peer sends a non-gossip msg (eg `open_channel`) the gossipd sends
it up using `gossip_peer_nongossip`.
5. If lightningd wants to fund a channel, it simply calls `release_channel`.
Notes:
* There's no more "unique_id": we use the peer id.
* For the moment, we don't ask gossipd when we're told to list peers, so
connected peers without a channel don't appear in the JSON getpeers API.
* We add a `gossipctl_peer_addrhint` for the moment, so you can connect to
a specific ip/port, but using other sources is a TODO.
* We now (correctly) only give up on reaching a peer after we exchange init
messages, which changes the test_disconnect case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2017-10-11 12:09:49 +02:00
}
2017-05-22 13:26:49 +02:00
2022-03-22 21:27:29 +01:00
/* connectd tells us a peer has an interesting message, and hands us an
* fd to give to the correct subdaemon . Unlike peer_connected , this is racy :
* we might have just told it to disconnect peer . */
void peer_active ( struct lightningd * ld , const u8 * msg , int fd )
{
struct node_id id ;
u16 * msgtype ;
struct channel * channel ;
2022-03-22 21:27:30 +01:00
struct channel_id channel_id ;
2022-03-22 21:27:29 +01:00
struct peer * peer ;
bool dual_fund ;
u8 * error ;
struct peer_fd * peer_fd = new_peer_fd ( tmpctx , fd ) ;
/* FIXME: Use msgtype to determine what to do! */
if ( ! fromwire_connectd_peer_active ( msg , msg , & id , & msgtype , & channel_id ) )
fatal ( " Connectd gave bad CONNECTD_PEER_ACTIVE message %s " ,
tal_hex ( msg , msg ) ) ;
peer = peer_by_id ( ld , & id ) ;
if ( ! peer ) {
/* This race is possible, but I want to see it in CI. */
log_broken ( ld - > log , " Unknown active peer %s " ,
type_to_string ( tmpctx , struct node_id , & id ) ) ;
return ;
}
2022-03-22 21:27:30 +01:00
/* Do we know what channel they're talking about? */
channel = find_channel_by_id ( peer , & channel_id ) ;
2022-03-22 21:27:29 +01:00
if ( channel ) {
switch ( channel - > state ) {
case ONCHAIN :
case FUNDING_SPEND_SEEN :
case CLOSINGD_COMPLETE :
2022-03-22 21:27:30 +01:00
goto channel_is_closed ;
2022-03-22 21:27:29 +01:00
case CLOSED :
/* Channel should not have been loaded */
abort ( ) ;
case AWAITING_UNILATERAL : {
/* channel->error is not saved in db, so this can
* happen if we restart . */
error = towire_errorfmt ( tmpctx , & channel - > cid ,
" Awaiting unilateral close " ) ;
goto send_error ;
}
case DUALOPEND_OPEN_INIT :
/* We asked for this, to open? */
if ( ! msgtype
& & channel - > open_attempt
& & channel - > open_attempt - > open_msg ) {
if ( peer_start_dualopend ( peer , peer_fd , channel ) )
subd_send_msg ( channel - > owner , channel - > open_attempt - > open_msg ) ;
return ;
}
/* Fall through. */
case DUALOPEND_AWAITING_LOCKIN :
assert ( ! channel - > owner ) ;
peer_restart_dualopend ( peer , peer_fd , channel ) ;
return ;
case CHANNELD_AWAITING_LOCKIN :
case CHANNELD_NORMAL :
case CHANNELD_SHUTTING_DOWN :
case CLOSINGD_SIGEXCHANGE :
assert ( ! channel - > owner ) ;
peer_start_channeld ( channel ,
peer_fd ,
NULL , true ,
NULL ) ;
return ;
}
abort ( ) ;
}
dual_fund = feature_negotiated ( ld - > our_features ,
peer - > their_features ,
OPT_DUAL_FUND ) ;
/* Did we ask for this? */
if ( ! msgtype ) {
/* If it was dual_fund, it will have peer_unsaved_channel above */
if ( dual_fund ) {
log_broken ( ld - > log , " Unsolicited active df peer %s? " ,
type_to_string ( tmpctx , struct node_id ,
& peer - > id ) ) ;
} else {
const struct uncommitted_channel * uc
= peer - > uncommitted_channel ;
if ( ! uc - > open_daemon
& & uc - > fc
& & uc - > fc - > open_msg ) {
if ( peer_start_openingd ( peer , peer_fd ) ) {
subd_send_msg ( uc - > open_daemon ,
uc - > fc - > open_msg ) ;
}
} else {
log_broken ( ld - > log , " Unsolicited active peer %s? " ,
type_to_string ( tmpctx , struct node_id ,
& peer - > id ) ) ;
}
}
2022-03-22 21:27:30 +01:00
return ;
}
2022-03-22 21:27:30 +01:00
/* OK, it's an unknown channel. Create a new one if they're trying. */
switch ( * msgtype ) {
case WIRE_OPEN_CHANNEL :
if ( dual_fund ) {
error = towire_errorfmt ( tmpctx , & channel_id ,
" OPT_DUAL_FUND: cannot use open_channel " ) ;
goto send_error ;
}
if ( peer - > uncommitted_channel ) {
error = towire_errorfmt ( tmpctx , & channel_id ,
" Multiple simulteneous opens not supported " ) ;
goto send_error ;
}
peer - > uncommitted_channel = new_uncommitted_channel ( peer ) ;
peer_start_openingd ( peer , peer_fd ) ;
break ;
case WIRE_OPEN_CHANNEL2 :
if ( ! dual_fund ) {
error = towire_errorfmt ( tmpctx , & channel_id ,
" Didn't negotiate OPT_DUAL_FUND: cannot use open_channel2 " ) ;
goto send_error ;
2022-03-22 21:27:29 +01:00
}
2022-03-22 21:27:30 +01:00
channel = new_unsaved_channel ( peer ,
peer - > ld - > config . fee_base ,
peer - > ld - > config . fee_per_satoshi ) ;
2022-03-22 21:27:30 +01:00
channel - > cid = channel_id ;
2022-03-22 21:27:30 +01:00
peer_start_dualopend ( peer , peer_fd , channel ) ;
2022-03-22 21:27:30 +01:00
break ;
default :
log_peer_unusual ( ld - > log , & peer - > id ,
" Unknown channel %s for %s " ,
type_to_string ( tmpctx , struct channel_id ,
& channel_id ) ,
peer_wire_name ( * msgtype ) ) ;
error = towire_errorfmt ( tmpctx , & channel_id ,
" Unknown channel for %s " , peer_wire_name ( * msgtype ) ) ;
goto send_error ;
break ;
2022-03-22 21:27:30 +01:00
}
2022-03-22 21:27:29 +01:00
return ;
2022-03-22 21:27:30 +01:00
channel_is_closed :
if ( msgtype & & * msgtype = = WIRE_CHANNEL_REESTABLISH ) {
log_debug ( channel - > log ,
" Reestablish on %s channel: using channeld to reply " ,
channel_state_name ( channel ) ) ;
peer_start_channeld ( channel , peer_fd , NULL , true , true ) ;
return ;
}
/* Retransmit error if we have one. Otherwise generic error. */
error = channel - > error ;
if ( ! error )
error = towire_errorfmt ( tmpctx , & channel_id ,
" channel in state %s " ,
channel_state_name ( channel ) ) ;
2022-03-22 21:27:29 +01:00
send_error :
log_peer_debug ( ld - > log , & peer - > id , " Telling connectd to send error %s " ,
tal_hex ( tmpctx , error ) ) ;
/* Get connectd to send error and close. */
subd_send_msg ( ld - > connectd ,
take ( towire_connectd_peer_final_msg ( NULL , & peer - > id ,
error ) ) ) ;
}
2022-03-22 21:26:30 +01:00
struct disconnect_command {
struct list_node list ;
/* Command structure. This is the parent of the close command. */
struct command * cmd ;
/* node being disconnected. */
struct node_id id ;
} ;
static void destroy_disconnect_command ( struct disconnect_command * dc )
{
list_del ( & dc - > list ) ;
}
void peer_disconnect_done ( struct lightningd * ld , const u8 * msg )
{
struct node_id id ;
struct disconnect_command * i , * next ;
struct peer * p ;
if ( ! fromwire_connectd_peer_disconnect_done ( msg , & id ) )
fatal ( " Connectd gave bad PEER_DISCONNECT_DONE message %s " ,
tal_hex ( msg , msg ) ) ;
/* If we still have peer, it's disconnected now */
p = peer_by_id ( ld , & id ) ;
2022-03-22 21:27:29 +01:00
if ( p ) {
2022-03-22 21:26:30 +01:00
p - > is_connected = false ;
2022-03-22 21:27:29 +01:00
/* If we only cared about peer because of connectd, free it. */
if ( list_empty ( & p - > channels ) & & ! p - > uncommitted_channel ) {
tal_free ( p ) ;
}
}
2022-03-22 21:26:30 +01:00
2022-03-22 21:26:30 +01:00
/* Fire off plugin notifications */
notify_disconnect ( ld , & id ) ;
2022-03-22 21:26:30 +01:00
/* Wake any disconnect commands (removes self from list) */
list_for_each_safe ( & ld - > disconnect_commands , i , next , list ) {
if ( ! node_id_eq ( & i - > id , & id ) )
continue ;
was_pending ( command_success ( i - > cmd ,
json_stream_success ( i - > cmd ) ) ) ;
}
}
2021-01-08 19:27:19 +01:00
static bool check_funding_details ( const struct bitcoin_tx * tx ,
const u8 * wscript ,
struct amount_sat funding ,
u32 funding_outnum )
2019-06-28 03:58:31 +02:00
{
2019-09-26 21:07:20 +02:00
struct amount_asset asset =
2021-01-08 19:27:19 +01:00
bitcoin_tx_output_get_amount ( tx , funding_outnum ) ;
2019-09-26 21:07:20 +02:00
if ( ! amount_asset_is_main ( & asset ) )
return false ;
2019-06-28 03:58:31 +02:00
2021-01-08 19:27:19 +01:00
if ( funding_outnum > = tx - > wtx - > num_outputs )
2019-06-28 03:58:31 +02:00
return false ;
2021-01-08 19:27:19 +01:00
if ( ! amount_sat_eq ( amount_asset_to_sat ( & asset ) , funding ) )
2019-06-28 03:58:31 +02:00
return false ;
2021-01-08 19:27:19 +01:00
return scripteq ( scriptpubkey_p2wsh ( tmpctx , wscript ) ,
bitcoin_tx_output_get_script ( tmpctx , tx ,
funding_outnum ) ) ;
}
/* FIXME: Unify our watch code so we get notified by txout, instead, like
* the wallet code does . */
static bool check_funding_tx ( const struct bitcoin_tx * tx ,
const struct channel * channel )
{
struct channel_inflight * inflight ;
const u8 * wscript ;
2019-06-28 03:58:31 +02:00
wscript = bitcoin_redeem_2of2 ( tmpctx ,
& channel - > local_funding_pubkey ,
& channel - > channel_info . remote_fundingkey ) ;
2021-01-08 19:27:19 +01:00
/* Since we've enabled "RBF" for funding transactions,
* it ' s possible that it ' s one of " inflights " .
* Worth noting that this check was added to prevent
* a peer from sending us a ' bogus ' transaction id ( that didn ' t
* actually contain the funding output ) . As of v2 ( where
* RBF is introduced ) , this isn ' t a problem so much as
* both sides have full access to the funding transaction */
2021-10-13 05:45:36 +02:00
if ( check_funding_details ( tx , wscript , channel - > funding_sats ,
channel - > funding . n ) )
2021-01-08 19:27:19 +01:00
return true ;
list_for_each ( & channel - > inflights , inflight , list ) {
if ( check_funding_details ( tx , wscript ,
inflight - > funding - > total_funds ,
2021-10-13 05:45:36 +02:00
inflight - > funding - > outpoint . n ) )
2021-01-08 19:27:19 +01:00
return true ;
}
return false ;
2019-06-28 03:58:31 +02:00
}
2021-05-20 23:50:42 +02:00
static void update_channel_from_inflight ( struct lightningd * ld ,
struct channel * channel ,
2022-01-30 04:37:30 +01:00
const struct channel_inflight * inflight )
2021-05-20 23:50:42 +02:00
{
struct wally_psbt * psbt_copy ;
2021-10-13 05:45:36 +02:00
channel - > funding = inflight - > funding - > outpoint ;
channel - > funding_sats = inflight - > funding - > total_funds ;
2021-05-20 23:50:42 +02:00
channel - > our_funds = inflight - > funding - > our_funds ;
2021-06-17 03:28:18 +02:00
/* Lease infos ! */
channel - > lease_expiry = inflight - > lease_expiry ;
2021-12-08 18:42:07 +01:00
channel - > push = inflight - > lease_fee ;
2021-06-17 03:28:18 +02:00
tal_free ( channel - > lease_commit_sig ) ;
channel - > lease_commit_sig
2022-01-30 04:37:30 +01:00
= tal_dup_or_null ( channel , secp256k1_ecdsa_signature , inflight - > lease_commit_sig ) ;
2021-06-17 03:28:18 +02:00
channel - > lease_chan_max_msat = inflight - > lease_chan_max_msat ;
channel - > lease_chan_max_ppt = inflight - > lease_chan_max_ppt ;
2021-06-22 20:25:59 +02:00
tal_free ( channel - > blockheight_states ) ;
channel - > blockheight_states = new_height_states ( channel ,
channel - > opener ,
& inflight - > lease_blockheight_start ) ;
2021-05-20 23:50:42 +02:00
/* Make a 'clone' of this tx */
psbt_copy = clone_psbt ( channel , inflight - > last_tx - > psbt ) ;
channel_set_last_tx ( channel ,
bitcoin_tx_with_psbt ( channel , psbt_copy ) ,
& inflight - > last_sig ,
TX_CHANNEL_UNILATERAL ) ;
/* Update the reserve */
channel_update_reserve ( channel ,
& channel - > channel_info . their_config ,
inflight - > funding - > total_funds ) ;
wallet_channel_save ( ld - > wallet , channel ) ;
}
2019-02-26 17:57:19 +01:00
static enum watch_result funding_depth_cb ( struct lightningd * ld ,
2018-08-13 05:05:33 +02:00
struct channel * channel ,
2018-04-09 15:20:54 +02:00
const struct bitcoin_txid * txid ,
2019-06-28 03:58:31 +02:00
const struct bitcoin_tx * tx ,
2018-02-20 21:59:09 +01:00
unsigned int depth )
2017-03-07 02:03:55 +01:00
{
2017-08-18 06:43:53 +02:00
const char * txidstr ;
2019-04-30 18:10:15 +02:00
struct short_channel_id scid ;
2017-03-07 02:05:03 +01:00
2019-06-28 03:58:31 +02:00
/* Sanity check */
2019-12-31 00:36:00 +01:00
if ( ! check_funding_tx ( tx , channel ) ) {
2019-06-28 03:58:31 +02:00
channel_internal_error ( channel , " Bad tx %s: %s " ,
type_to_string ( tmpctx ,
struct bitcoin_txid , txid ) ,
type_to_string ( tmpctx ,
struct bitcoin_tx , tx ) ) ;
return DELETE_WATCH ;
}
2019-02-26 17:57:19 +01:00
txidstr = type_to_string ( tmpctx , struct bitcoin_txid , txid ) ;
2018-02-12 11:13:04 +01:00
log_debug ( channel - > log , " Funding tx %s depth %u of %u " ,
2018-02-12 11:12:55 +01:00
txidstr , depth , channel - > minimum_depth ) ;
2017-05-23 13:00:17 +02:00
tal_free ( txidstr ) ;
2017-03-07 02:31:43 +01:00
2019-04-30 18:10:15 +02:00
bool min_depth_reached = depth > = channel - > minimum_depth ;
2017-03-07 02:31:43 +01:00
2019-04-30 18:10:15 +02:00
/* Reorg can change scid, so always update/save scid when possible (depth=0
* means the stale block with our funding tx was removed ) */
if ( ( min_depth_reached & & ! channel - > scid ) | | ( depth & & channel - > scid ) ) {
2018-04-23 12:08:01 +02:00
struct txlocator * loc ;
2021-05-20 23:50:42 +02:00
struct channel_inflight * inf ;
/* Update the channel's info to the correct tx, if needed to
* It ' s possible an ' inflight ' has reached depth */
if ( ! list_empty ( & channel - > inflights ) ) {
inf = channel_inflight_find ( channel , txid ) ;
if ( ! inf ) {
channel_fail_permanent ( channel , REASON_LOCAL ,
" Txid %s for channel "
" not found in inflights. (peer %s) " ,
type_to_string ( tmpctx ,
struct bitcoin_txid ,
txid ) ,
type_to_string ( tmpctx ,
struct node_id ,
& channel - > peer - > id ) ) ;
return DELETE_WATCH ;
}
update_channel_from_inflight ( ld , channel , inf ) ;
}
2018-04-23 12:08:01 +02:00
2021-10-13 05:45:36 +02:00
wallet_annotate_txout ( ld - > wallet , & channel - > funding ,
2019-10-03 19:31:00 +02:00
TX_CHANNEL_FUNDING , channel - > dbid ) ;
2018-04-23 12:08:01 +02:00
loc = wallet_transaction_locate ( tmpctx , ld - > wallet , txid ) ;
2019-04-30 18:10:15 +02:00
if ( ! mk_short_channel_id ( & scid ,
2019-01-21 01:57:43 +01:00
loc - > blkheight , loc - > index ,
2021-10-13 05:45:36 +02:00
channel - > funding . n ) ) {
feat: adds state change cause and message
This adds a `state_change` 'cause' to a channel.
A 'cause' is some initial 'reason' a channel was created or closed by:
/* Anything other than the reasons below. Should not happen. */
REASON_UNKNOWN,
/* Unconscious internal reasons, e.g. dev fail of a channel. */
REASON_LOCAL,
/* The operator or a plugin opened or closed a channel by intention. */
REASON_USER,
/* The remote closed or funded a channel with us by intention. */
REASON_REMOTE,
/* E.g. We need to close a channel because of bad signatures and such. */
REASON_PROTOCOL,
/* A channel was closed onchain, while we were offline. */
/* Note: This is very likely a conscious remote decision. */
REASON_ONCHAIN
If a 'cause' is known and a subsequent state change is made with
`REASON_UNKNOWN` the preceding cause will be used as reason, since a lot
(all `REASON_UNKNOWN`) state changes are a subsequent consequences of a prior
cause: local, user, remote, protocol or onchain.
Changelog-Added: Plugins: Channel closure resaon/cause to channel_state_changed notification
2020-10-28 11:46:12 +01:00
channel_fail_permanent ( channel ,
REASON_LOCAL ,
" Invalid funding scid %u:%u:%u " ,
2019-01-21 01:57:43 +01:00
loc - > blkheight , loc - > index ,
2021-10-13 05:45:36 +02:00
channel - > funding . n ) ;
2019-01-21 01:57:43 +01:00
return DELETE_WATCH ;
}
2019-04-30 18:10:15 +02:00
/* If we restart, we could already have peer->scid from database */
if ( ! channel - > scid ) {
channel - > scid = tal ( channel , struct short_channel_id ) ;
* channel - > scid = scid ;
wallet_channel_save ( ld - > wallet , channel ) ;
} else if ( ! short_channel_id_eq ( channel - > scid , & scid ) ) {
/* This normally restarts channeld, initialized with updated scid
* and also adds it ( at least our halve_chan ) to rtable . */
2019-07-26 04:11:18 +02:00
channel_fail_reconnect ( channel ,
" short_channel_id changed to %s (was %s) " ,
short_channel_id_to_str ( tmpctx , & scid ) ,
short_channel_id_to_str ( tmpctx , channel - > scid ) ) ;
2019-04-30 18:10:15 +02:00
* channel - > scid = scid ;
wallet_channel_save ( ld - > wallet , channel ) ;
return KEEP_WATCHING ;
}
2018-01-05 03:42:31 +01:00
}
2017-05-02 07:26:31 +02:00
2018-05-17 07:08:11 +02:00
/* Try to tell subdaemon */
2019-02-26 17:57:19 +01:00
if ( ! channel_tell_depth ( ld , channel , txid , depth ) )
2018-04-23 12:08:01 +02:00
return KEEP_WATCHING ;
2017-03-20 17:09:12 +01:00
2019-04-30 18:10:15 +02:00
if ( ! min_depth_reached )
2019-02-26 17:57:19 +01:00
return KEEP_WATCHING ;
2017-06-27 04:55:01 +02:00
2019-04-30 18:10:15 +02:00
/* We keep telling it depth/scid until we get to announce depth. */
2018-05-17 07:08:11 +02:00
if ( depth < ANNOUNCE_MIN_DEPTH )
return KEEP_WATCHING ;
2017-03-07 02:31:43 +01:00
return DELETE_WATCH ;
2017-03-07 02:03:55 +01:00
}
2018-04-16 13:20:45 +02:00
static enum watch_result funding_spent ( struct channel * channel ,
const struct bitcoin_tx * tx ,
size_t inputnum UNUSED ,
const struct block * block )
{
struct bitcoin_txid txid ;
bitcoin_txid ( tx , & txid ) ;
wallet_channeltxs_add ( channel - > peer - > ld - > wallet , channel ,
2020-08-25 04:15:48 +02:00
WIRE_ONCHAIND_INIT , & txid , 0 , block - > height ) ;
2021-12-01 17:24:31 +01:00
return onchaind_funding_spent ( channel , tx , block - > height ) ;
2018-04-16 13:20:45 +02:00
}
2021-03-15 21:26:13 +01:00
void channel_watch_wrong_funding ( struct lightningd * ld , struct channel * channel )
{
/* Watch the "wrong" funding too, in case we spend it. */
if ( channel - > shutdown_wrong_funding ) {
/* FIXME: Remove arg from cb? */
watch_txo ( channel , ld - > topology , channel ,
2021-10-13 05:45:36 +02:00
channel - > shutdown_wrong_funding ,
2021-03-15 21:26:13 +01:00
funding_spent ) ;
}
}
2018-02-20 21:59:04 +01:00
void channel_watch_funding ( struct lightningd * ld , struct channel * channel )
2017-02-24 06:52:56 +01:00
{
2017-08-23 03:55:16 +02:00
/* FIXME: Remove arg from cb? */
2018-02-20 21:59:04 +01:00
watch_txid ( channel , ld - > topology , channel ,
2021-10-13 05:45:36 +02:00
& channel - > funding . txid , funding_depth_cb ) ;
2018-02-12 11:13:04 +01:00
watch_txo ( channel , ld - > topology , channel ,
2021-10-13 05:45:36 +02:00
& channel - > funding ,
2018-02-20 21:59:09 +01:00
funding_spent ) ;
2021-03-15 21:26:13 +01:00
channel_watch_wrong_funding ( ld , channel ) ;
2017-02-24 06:52:56 +01:00
}
2021-05-20 23:53:48 +02:00
static void channel_watch_inflight ( struct lightningd * ld ,
struct channel * channel ,
struct channel_inflight * inflight )
{
/* FIXME: Remove arg from cb? */
watch_txid ( channel , ld - > topology , channel ,
2021-10-13 05:45:36 +02:00
& inflight - > funding - > outpoint . txid , funding_depth_cb ) ;
2021-05-20 23:53:48 +02:00
watch_txo ( channel , ld - > topology , channel ,
2021-10-13 05:45:36 +02:00
& inflight - > funding - > outpoint ,
2021-05-20 23:53:48 +02:00
funding_spent ) ;
}
2018-08-09 02:25:29 +02:00
static void json_add_peer ( struct lightningd * ld ,
2018-10-19 03:17:49 +02:00
struct json_stream * response ,
2018-08-09 02:25:29 +02:00
struct peer * p ,
const enum log_level * ll )
2018-02-20 21:59:09 +01:00
{
2018-08-09 02:25:29 +02:00
struct channel * channel ;
2018-02-20 21:59:09 +01:00
json_object_start ( response , NULL ) ;
2019-04-08 11:58:32 +02:00
json_add_node_id ( response , " id " , & p - > id ) ;
2018-02-20 21:59:09 +01:00
2022-03-22 21:26:30 +01:00
json_add_bool ( response , " connected " , p - > is_connected ) ;
2018-06-22 01:52:57 +02:00
2018-08-09 02:25:29 +02:00
/* If it's not connected, features are unreliable: we don't
* store them in the database , and they would only reflect
* their features * last * time they connected . */
2022-03-22 21:26:30 +01:00
if ( p - > is_connected ) {
2018-08-09 02:25:29 +02:00
json_array_start ( response , " netaddr " ) ;
2018-10-26 08:01:30 +02:00
json_add_string ( response , NULL ,
lightningd: fix memleak false positive.
```
E - Node /tmp/ltests-uf2g_5gd/test_sendinvoice_obsolete_1/lightning-1/ has memory leaks: [
E {
E "backtrace": [
E "ccan/ccan/tal/tal.c:442 (tal_alloc_)",
E "ccan/ccan/tal/tal.c:471 (tal_alloc_arr_)",
E "ccan/ccan/tal/str/str.c:91 (tal_vfmt_)",
E "ccan/ccan/tal/str/str.c:44 (tal_fmt_)",
E "common/wireaddr.c:232 (fmt_wireaddr_without_port)",
E "common/wireaddr.c:251 (fmt_wireaddr)",
E "common/wireaddr.c:208 (fmt_wireaddr_internal)",
E "common/wireaddr.c:221 (fmt_wireaddr_internal_)",
E "common/type_to_string.c:32 (type_to_string_)",
E "lightningd/peer_control.c:1433 (json_add_peer)",
E "lightningd/peer_control.c:1481 (json_listpeers)",
E "lightningd/jsonrpc.c:627 (command_exec)",
E "lightningd/jsonrpc.c:762 (rpc_command_hook_final)",
E "lightningd/plugin_hook.c:274 (plugin_hook_call_)",
E "lightningd/jsonrpc.c:850 (plugin_hook_call_rpc_command)",
E "lightningd/jsonrpc.c:949 (parse_request)",
E "lightningd/jsonrpc.c:1040 (read_json)",
E "ccan/ccan/io/io.c:59 (next_plan)",
E "ccan/ccan/io/io.c:435 (io_do_always)",
E "ccan/ccan/io/poll.c:300 (handle_always)",
E "ccan/ccan/io/poll.c:377 (io_loop)",
E "lightningd/io_loop_with_timers.c:21 (io_loop_with_timers)",
E "lightningd/lightningd.c:1112 (main)"
E ],
E "label": "common/wireaddr.c:232:char[]",
E "parents": [
E "common/json_stream.c:22:struct json_stream",
E "ccan/ccan/io/io.c:91:struct io_conn",
E "lightningd/lightningd.c:103:struct lightningd"
E ],
E "value": "0x56041b322a48"
E }
E ]
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2021-09-21 06:05:00 +02:00
type_to_string ( tmpctx ,
2018-10-26 08:01:30 +02:00
struct wireaddr_internal ,
& p - > addr ) ) ;
2018-02-20 21:59:09 +01:00
json_array_end ( response ) ;
2020-04-03 02:03:59 +02:00
json_add_hex_talarr ( response , " features " , p - > their_features ) ;
2018-02-20 21:59:09 +01:00
}
2018-08-09 02:25:29 +02:00
json_array_start ( response , " channels " ) ;
json_add_uncommitted_channel ( response , p - > uncommitted_channel ) ;
2018-02-20 21:59:09 +01:00
2021-02-24 03:29:38 +01:00
list_for_each ( & p - > channels , channel , list ) {
if ( channel_unsaved ( channel ) )
json_add_unsaved_channel ( response , channel ) ;
else
json_add_channel ( ld , response , NULL , channel ) ;
}
2018-02-20 21:59:09 +01:00
json_array_end ( response ) ;
2018-08-09 02:25:29 +02:00
if ( ll )
2019-11-18 01:27:15 +01:00
json_add_log ( response , ld - > log_book , & p - > id , * ll ) ;
2018-02-20 21:59:09 +01:00
json_object_end ( response ) ;
}
2018-12-16 05:52:06 +01:00
static struct command_result * json_listpeers ( struct command * cmd ,
const char * buffer ,
const jsmntok_t * obj UNNEEDED ,
const jsmntok_t * params )
2018-02-20 21:59:09 +01:00
{
2018-08-09 02:25:29 +02:00
enum log_level * ll ;
2019-04-08 11:58:32 +02:00
struct node_id * specific_id ;
2018-08-09 02:25:29 +02:00
struct peer * peer ;
2018-10-19 03:17:49 +02:00
struct json_stream * response ;
2018-02-20 21:59:09 +01:00
2018-07-20 03:14:02 +02:00
if ( ! param ( cmd , buffer , params ,
2019-04-08 11:58:32 +02:00
p_opt ( " id " , param_node_id , & specific_id ) ,
2018-12-16 05:50:06 +01:00
p_opt ( " level " , param_loglevel , & ll ) ,
2018-07-20 03:14:02 +02:00
NULL ) )
2018-12-16 05:52:06 +01:00
return command_param_failed ( ) ;
2018-02-20 21:59:09 +01:00
2018-10-19 03:17:48 +02:00
response = json_stream_success ( cmd ) ;
2018-08-09 02:25:29 +02:00
json_array_start ( response , " peers " ) ;
if ( specific_id ) {
peer = peer_by_id ( cmd - > ld , specific_id ) ;
if ( peer )
json_add_peer ( cmd - > ld , response , peer , ll ) ;
} else {
list_for_each ( & cmd - > ld - > peers , peer , list )
json_add_peer ( cmd - > ld , response , peer , ll ) ;
}
json_array_end ( response ) ;
2019-06-12 02:38:54 +02:00
2018-12-16 05:52:06 +01:00
return command_success ( cmd , response ) ;
2018-02-20 21:59:09 +01:00
}
static const struct json_command listpeers_command = {
" listpeers " ,
2019-05-22 16:08:16 +02:00
" network " ,
2018-02-20 21:59:09 +01:00
json_listpeers ,
" Show current peers, if {level} is set, include logs for {id} "
} ;
2021-06-15 07:07:10 +02:00
/* Comment added to satisfice AUTODATA */
2018-02-20 21:59:09 +01:00
AUTODATA ( json_command , & listpeers_command ) ;
2021-09-15 02:59:23 +02:00
struct command_result *
2018-04-30 14:54:39 +02:00
command_find_channel ( struct command * cmd ,
2018-12-16 05:53:06 +01:00
const char * buffer , const jsmntok_t * tok ,
2019-03-09 21:29:39 +01:00
struct channel * * channel )
2018-04-30 14:54:39 +02:00
{
struct lightningd * ld = cmd - > ld ;
struct channel_id cid ;
struct short_channel_id scid ;
struct peer * peer ;
if ( json_tok_channel_id ( buffer , tok , & cid ) ) {
list_for_each ( & ld - > peers , peer , list ) {
2018-12-16 05:53:06 +01:00
* channel = peer_active_channel ( peer ) ;
if ( ! * channel )
2018-04-30 14:54:39 +02:00
continue ;
2020-09-09 09:20:53 +02:00
if ( channel_id_eq ( & ( * channel ) - > cid , & cid ) )
2018-12-16 05:53:06 +01:00
return NULL ;
2018-04-30 14:54:39 +02:00
}
2018-12-16 05:53:06 +01:00
return command_fail ( cmd , JSONRPC2_INVALID_PARAMS ,
" Channel ID not found: '%.*s' " ,
tok - > end - tok - > start ,
buffer + tok - > start ) ;
2019-09-06 08:41:41 +02:00
} else if ( json_to_short_channel_id ( buffer , tok , & scid ) ) {
2020-02-27 05:26:36 +01:00
* channel = active_channel_by_scid ( ld , & scid ) ;
if ( * channel )
return NULL ;
2018-12-16 05:53:06 +01:00
return command_fail ( cmd , JSONRPC2_INVALID_PARAMS ,
" Short channel ID not found: '%.*s' " ,
tok - > end - tok - > start ,
buffer + tok - > start ) ;
2018-04-30 14:54:39 +02:00
} else {
2020-08-25 23:20:50 +02:00
return command_fail_badparam ( cmd , " id " , buffer , tok ,
" should be a channel ID or short channel ID " ) ;
2018-04-30 14:54:39 +02:00
}
}
2019-08-01 03:10:11 +02:00
static void activate_peer ( struct peer * peer , u32 delay )
2018-01-03 06:26:44 +01:00
{
2018-02-12 11:13:04 +01:00
struct channel * channel ;
2021-05-20 23:53:48 +02:00
struct channel_inflight * inflight ;
2018-02-12 11:13:04 +01:00
struct lightningd * ld = peer - > ld ;
2018-01-11 07:19:53 +01:00
2018-07-24 08:18:58 +02:00
/* We can only have one active channel: make sure connectd
2018-08-09 02:25:29 +02:00
* knows to try reconnecting . */
2018-02-12 11:13:04 +01:00
channel = peer_active_channel ( peer ) ;
2022-01-11 02:15:48 +01:00
if ( channel )
try_reconnect ( channel , delay , & peer - > addr ) ;
2018-01-03 06:26:44 +01:00
2018-02-12 11:13:04 +01:00
list_for_each ( & peer - > channels , channel , list ) {
2021-01-22 01:55:23 +01:00
if ( channel_unsaved ( channel ) )
continue ;
2018-02-20 21:59:04 +01:00
/* Watching lockin may be unnecessary, but it's harmless. */
channel_watch_funding ( ld , channel ) ;
2021-05-20 23:53:48 +02:00
/* Also watch any inflight txs */
list_for_each ( & channel - > inflights , inflight , list ) {
/* Don't double watch the txid that's also in
* channel - > funding_txid */
2021-10-13 05:45:36 +02:00
if ( bitcoin_txid_eq ( & channel - > funding . txid ,
& inflight - > funding - > outpoint . txid ) )
2021-05-20 23:53:48 +02:00
continue ;
channel_watch_inflight ( ld , channel , inflight ) ;
}
2018-01-03 06:26:44 +01:00
}
}
void activate_peers ( struct lightningd * ld )
{
struct peer * p ;
2019-08-01 03:10:11 +02:00
/* Avoid thundering herd: after first five, delay by 1 second. */
int delay = - 5 ;
2018-01-03 06:26:44 +01:00
2019-08-01 03:10:11 +02:00
list_for_each ( & ld - > peers , p , list ) {
activate_peer ( p , delay > 0 ? delay : 0 ) ;
delay + + ;
}
2018-01-03 06:26:44 +01:00
}
2018-09-03 03:08:53 +02:00
/* Pull peers, channels and HTLCs from db, and wire them up. */
2019-08-10 07:24:57 +02:00
struct htlc_in_map * load_channels_from_wallet ( struct lightningd * ld )
2018-09-03 03:08:53 +02:00
{
struct peer * peer ;
2019-12-12 00:39:10 +01:00
struct htlc_in_map * unconnected_htlcs_in = tal ( ld , struct htlc_in_map ) ;
2018-09-03 03:08:53 +02:00
2019-08-09 18:01:31 +02:00
/* Load channels from database */
if ( ! wallet_init_channels ( ld - > wallet ) )
2018-09-03 03:08:53 +02:00
fatal ( " Could not load channels from the database " ) ;
2019-12-12 00:39:10 +01:00
/* First we load the incoming htlcs */
2018-09-03 03:08:53 +02:00
list_for_each ( & ld - > peers , peer , list ) {
struct channel * channel ;
list_for_each ( & peer - > channels , channel , list ) {
2019-12-12 00:39:10 +01:00
if ( ! wallet_htlcs_load_in_for_channel ( ld - > wallet ,
channel ,
& ld - > htlcs_in ) ) {
2018-09-03 03:08:53 +02:00
fatal ( " could not load htlcs for channel " ) ;
}
}
}
2019-12-12 00:39:10 +01:00
/* Make a copy of the htlc_map: entries removed as they're matched */
htlc_in_map_copy ( unconnected_htlcs_in , & ld - > htlcs_in ) ;
/* Now we load the outgoing HTLCs, so we can connect them. */
list_for_each ( & ld - > peers , peer , list ) {
struct channel * channel ;
list_for_each ( & peer - > channels , channel , list ) {
if ( ! wallet_htlcs_load_out_for_channel ( ld - > wallet ,
channel ,
& ld - > htlcs_out ,
unconnected_htlcs_in ) ) {
fatal ( " could not load outgoing htlcs for channel " ) ;
}
}
}
# ifdef COMPAT_V061
fixup_htlcs_out ( ld ) ;
# endif /* COMPAT_V061 */
return unconnected_htlcs_in ;
2018-09-03 03:08:53 +02:00
}
2018-12-16 05:52:06 +01:00
static struct command_result * json_disconnect ( struct command * cmd ,
const char * buffer ,
const jsmntok_t * obj UNNEEDED ,
const jsmntok_t * params )
2018-03-05 17:16:20 +01:00
{
2019-04-08 11:58:32 +02:00
struct node_id * id ;
2022-03-22 21:26:30 +01:00
struct disconnect_command * dc ;
2018-08-09 02:25:29 +02:00
struct peer * peer ;
struct channel * channel ;
2018-12-10 02:03:42 +01:00
bool * force ;
2018-03-05 17:16:20 +01:00
2018-07-20 03:14:02 +02:00
if ( ! param ( cmd , buffer , params ,
2019-04-08 11:58:32 +02:00
p_req ( " id " , param_node_id , & id ) ,
2018-12-16 05:50:06 +01:00
p_opt_def ( " force " , param_bool , & force , false ) ,
2018-07-20 03:14:02 +02:00
NULL ) )
2018-12-16 05:52:06 +01:00
return command_param_failed ( ) ;
2018-03-05 17:16:20 +01:00
2018-08-14 23:19:31 +02:00
peer = peer_by_id ( cmd - > ld , id ) ;
2018-08-09 02:25:29 +02:00
if ( ! peer ) {
2022-03-22 21:27:29 +01:00
return command_fail ( cmd , LIGHTNINGD , " Unknown peer " ) ;
}
if ( ! peer - > is_connected ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD , " Peer not connected " ) ;
2018-08-09 02:25:29 +02:00
}
channel = peer_active_channel ( peer ) ;
if ( channel ) {
2018-12-10 02:03:42 +01:00
if ( * force ) {
2019-07-26 04:11:18 +02:00
channel_fail_reconnect ( channel ,
2018-12-10 02:03:42 +01:00
" disconnect command force=true " ) ;
2022-03-22 21:26:30 +01:00
goto wait_for_connectd ;
2018-12-10 02:03:42 +01:00
}
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD , " Peer is in state %s " ,
channel_state_name ( channel ) ) ;
2018-08-09 02:25:29 +02:00
}
2021-01-22 01:55:23 +01:00
channel = peer_unsaved_channel ( peer ) ;
if ( channel ) {
2021-05-11 18:58:00 +02:00
channel_unsaved_close_conn ( channel , " disconnect command " ) ;
2022-03-22 21:26:30 +01:00
goto wait_for_connectd ;
2021-01-22 01:55:23 +01:00
}
2022-03-22 21:27:29 +01:00
if ( peer - > uncommitted_channel ) {
kill_uncommitted_channel ( peer - > uncommitted_channel ,
" disconnect command " ) ;
goto wait_for_connectd ;
2018-08-09 02:25:29 +02:00
}
2022-03-22 21:27:29 +01:00
/* It's just sitting in connectd. */
subd_send_msg ( cmd - > ld - > connectd ,
take ( towire_connectd_discard_peer ( NULL , id ) ) ) ;
2022-03-22 21:26:30 +01:00
wait_for_connectd :
/* Connectd tells us when it's finally disconnected */
dc = tal ( cmd , struct disconnect_command ) ;
dc - > cmd = cmd ;
dc - > id = * id ;
list_add_tail ( & cmd - > ld - > disconnect_commands , & dc - > list ) ;
tal_add_destructor ( dc , destroy_disconnect_command ) ;
return command_still_pending ( cmd ) ;
2018-03-05 17:16:20 +01:00
}
static const struct json_command disconnect_command = {
" disconnect " ,
2019-05-22 16:08:16 +02:00
" network " ,
2018-03-05 17:16:20 +01:00
json_disconnect ,
2018-12-10 02:03:42 +01:00
" Disconnect from {id} that has previously been connected to using connect; with {force} set, even if it has a current channel "
2018-03-05 17:16:20 +01:00
} ;
AUTODATA ( json_command , & disconnect_command ) ;
2018-12-16 05:52:06 +01:00
static struct command_result * json_getinfo ( struct command * cmd ,
const char * buffer ,
const jsmntok_t * obj UNNEEDED ,
const jsmntok_t * params )
2018-11-15 15:00:34 +01:00
{
struct json_stream * response ;
2018-11-15 15:02:40 +01:00
struct peer * peer ;
struct channel * channel ;
unsigned int pending_channels = 0 , active_channels = 0 ,
inactive_channels = 0 , num_peers = 0 ;
2018-11-15 15:00:34 +01:00
if ( ! param ( cmd , buffer , params , NULL ) )
2018-12-16 05:52:06 +01:00
return command_param_failed ( ) ;
2018-11-15 15:00:34 +01:00
response = json_stream_success ( cmd ) ;
2019-04-08 11:58:32 +02:00
json_add_node_id ( response , " id " , & cmd - > ld - > id ) ;
2018-11-15 15:00:34 +01:00
json_add_string ( response , " alias " , ( const char * ) cmd - > ld - > alias ) ;
json_add_hex_talarr ( response , " color " , cmd - > ld - > rgb ) ;
2018-11-15 15:02:40 +01:00
/* Add some peer and channel stats */
list_for_each ( & cmd - > ld - > peers , peer , list ) {
num_peers + + ;
list_for_each ( & peer - > channels , channel , list ) {
2021-01-22 01:55:23 +01:00
if ( channel - > state = = CHANNELD_AWAITING_LOCKIN
| | channel - > state = = DUALOPEND_AWAITING_LOCKIN
| | channel - > state = = DUALOPEND_OPEN_INIT ) {
2018-11-15 15:02:40 +01:00
pending_channels + + ;
} else if ( channel_active ( channel ) ) {
active_channels + + ;
} else {
inactive_channels + + ;
}
}
}
json_add_num ( response , " num_peers " , num_peers ) ;
json_add_num ( response , " num_pending_channels " , pending_channels ) ;
json_add_num ( response , " num_active_channels " , active_channels ) ;
json_add_num ( response , " num_inactive_channels " , inactive_channels ) ;
2018-11-15 15:00:34 +01:00
/* Add network info */
if ( cmd - > ld - > listen ) {
/* These are the addresses we're announcing */
json_array_start ( response , " address " ) ;
2022-02-23 17:58:41 +01:00
for ( size_t i = 0 ; i < tal_count ( cmd - > ld - > announceable ) ; i + + )
json_add_address ( response , NULL , cmd - > ld - > announceable + i ) ;
2018-11-15 15:00:34 +01:00
json_array_end ( response ) ;
/* This is what we're actually bound to. */
json_array_start ( response , " binding " ) ;
for ( size_t i = 0 ; i < tal_count ( cmd - > ld - > binding ) ; i + + )
json_add_address_internal ( response , NULL ,
cmd - > ld - > binding + i ) ;
json_array_end ( response ) ;
}
json_add_string ( response , " version " , version ( ) ) ;
2022-01-08 14:21:29 +01:00
json_add_num ( response , " blockheight " , cmd - > ld - > blockheight ) ;
2019-10-15 12:58:30 +02:00
json_add_string ( response , " network " , chainparams - > network_name ) ;
2019-05-20 07:07:40 +02:00
json_add_amount_msat_compat ( response ,
wallet_total_forward_fees ( cmd - > ld - > wallet ) ,
" msatoshi_fees_collected " ,
" fees_collected_msat " ) ;
2019-11-23 02:46:40 +01:00
json_add_string ( response , " lightning-dir " , cmd - > ld - > config_netdir ) ;
2019-08-09 04:38:59 +02:00
if ( ! cmd - > ld - > topology - > bitcoind - > synced )
json_add_string ( response , " warning_bitcoind_sync " ,
" Bitcoind is not up-to-date with network. " ) ;
2019-08-10 12:47:32 +02:00
else if ( ! topology_synced ( cmd - > ld - > topology ) )
json_add_string ( response , " warning_lightningd_sync " ,
" Still loading latest blocks from bitcoind. " ) ;
2019-08-09 04:38:59 +02:00
2022-02-18 16:34:05 +01:00
u8 * * bits = cmd - > ld - > our_features - > bits ;
json_object_start ( response , " our_features " ) ;
json_add_hex_talarr ( response , " init " ,
featurebits_or ( cmd , bits [ INIT_FEATURE ] , bits [ GLOBAL_INIT_FEATURE ] ) ) ;
json_add_hex_talarr ( response , " node " , bits [ NODE_ANNOUNCE_FEATURE ] ) ;
json_add_hex_talarr ( response , " channel " , bits [ CHANNEL_FEATURE ] ) ;
json_add_hex_talarr ( response , " invoice " , bits [ BOLT11_FEATURE ] ) ;
json_object_end ( response ) ;
2018-12-16 05:52:06 +01:00
return command_success ( cmd , response ) ;
2018-11-15 15:00:34 +01:00
}
static const struct json_command getinfo_command = {
" getinfo " ,
2019-05-22 16:08:16 +02:00
" utility " ,
2018-11-15 15:00:34 +01:00
json_getinfo ,
" Show information about this node "
} ;
AUTODATA ( json_command , & getinfo_command ) ;
2019-12-26 11:19:09 +01:00
/* Wait for at least a specific blockheight, then return, or time out. */
struct waitblockheight_waiter {
/* struct lightningd::waitblockheight_commands. */
struct list_node list ;
/* Command structure. This is the parent of the close command. */
struct command * cmd ;
/* The block height being waited for. */
u32 block_height ;
/* Whether we have been removed from the list. */
bool removed ;
} ;
/* Completes a pending waitblockheight. */
static struct command_result *
waitblockheight_complete ( struct command * cmd ,
u32 block_height )
{
struct json_stream * response ;
response = json_stream_success ( cmd ) ;
json_add_num ( response , " blockheight " , block_height ) ;
return command_success ( cmd , response ) ;
}
/* Called when command is destroyed without being resolved. */
static void
destroy_waitblockheight_waiter ( struct waitblockheight_waiter * w )
{
if ( ! w - > removed )
list_del ( & w - > list ) ;
}
/* Called on timeout. */
static void
timeout_waitblockheight_waiter ( struct waitblockheight_waiter * w )
{
list_del ( & w - > list ) ;
w - > removed = true ;
tal_steal ( tmpctx , w ) ;
2021-01-06 16:22:34 +01:00
was_pending ( command_fail ( w - > cmd , WAIT_TIMEOUT ,
2019-12-26 11:19:09 +01:00
" Timed out. " ) ) ;
}
/* Called by lightningd at each new block. */
void waitblockheight_notify_new_block ( struct lightningd * ld ,
u32 block_height )
{
struct waitblockheight_waiter * w , * n ;
char * to_delete = tal ( NULL , char ) ;
/* Use safe since we could resolve commands and thus
* trigger removal of list elements .
*/
list_for_each_safe ( & ld - > waitblockheight_commands , w , n , list ) {
/* Skip commands that have not been reached yet. */
if ( w - > block_height > block_height )
continue ;
list_del ( & w - > list ) ;
w - > removed = true ;
tal_steal ( to_delete , w ) ;
was_pending ( waitblockheight_complete ( w - > cmd ,
block_height ) ) ;
}
tal_free ( to_delete ) ;
}
static struct command_result * json_waitblockheight ( struct command * cmd ,
const char * buffer ,
const jsmntok_t * obj ,
const jsmntok_t * params )
{
unsigned int * target_block_height ;
u32 block_height ;
unsigned int * timeout ;
struct waitblockheight_waiter * w ;
if ( ! param ( cmd , buffer , params ,
p_req ( " blockheight " , param_number , & target_block_height ) ,
p_opt_def ( " timeout " , param_number , & timeout , 60 ) ,
NULL ) )
return command_param_failed ( ) ;
/* Check if already reached anyway. */
block_height = get_block_height ( cmd - > ld - > topology ) ;
if ( * target_block_height < = block_height )
return waitblockheight_complete ( cmd , block_height ) ;
/* Create a new waitblockheight command. */
w = tal ( cmd , struct waitblockheight_waiter ) ;
tal_add_destructor ( w , & destroy_waitblockheight_waiter ) ;
list_add ( & cmd - > ld - > waitblockheight_commands , & w - > list ) ;
w - > cmd = cmd ;
w - > block_height = * target_block_height ;
w - > removed = false ;
/* Install the timeout. */
( void ) new_reltimer ( cmd - > ld - > timers , w , time_from_sec ( * timeout ) ,
& timeout_waitblockheight_waiter , w ) ;
return command_still_pending ( cmd ) ;
}
static const struct json_command waitblockheight_command = {
" waitblockheight " ,
" utility " ,
& json_waitblockheight ,
" Wait for the blockchain to reach {blockheight}, up to "
" {timeout} seconds. "
} ;
AUTODATA ( json_command , & waitblockheight_command ) ;
2019-03-16 13:47:40 +01:00
static struct command_result * param_channel_or_all ( struct command * cmd ,
const char * name ,
const char * buffer ,
const jsmntok_t * tok ,
struct channel * * channel )
{
struct command_result * res ;
struct peer * peer ;
/* early return the easy case */
if ( json_tok_streq ( buffer , tok , " all " ) ) {
* channel = NULL ;
return NULL ;
}
/* Find channel by peer_id */
peer = peer_from_json ( cmd - > ld , buffer , tok ) ;
if ( peer ) {
* channel = peer_active_channel ( peer ) ;
if ( ! * channel )
return command_fail ( cmd , LIGHTNINGD ,
" Could not find active channel of peer with that id " ) ;
return NULL ;
/* Find channel by id or scid */
} else {
res = command_find_channel ( cmd , buffer , tok , channel ) ;
if ( res )
return res ;
/* check channel is found and in valid state */
if ( ! * channel )
return command_fail ( cmd , LIGHTNINGD ,
" Could not find channel with that id " ) ;
return NULL ;
}
}
2019-03-15 04:00:07 +01:00
/* Fee base is a u32, but it's convenient to let them specify it using
* msat etc . suffix . */
static struct command_result * param_msat_u32 ( struct command * cmd ,
const char * name ,
const char * buffer ,
const jsmntok_t * tok ,
u32 * * num )
{
struct amount_msat * msat ;
struct command_result * res ;
/* Parse just like an msat. */
res = param_msat ( cmd , name , buffer , tok , & msat ) ;
if ( res )
return res ;
* num = tal ( cmd , u32 ) ;
if ( ! amount_msat_to_u32 ( * msat , * num ) ) {
2020-08-25 23:20:50 +02:00
return command_fail_badparam ( cmd , name , buffer , tok ,
" exceeds u32 max " ) ;
2019-03-15 04:00:07 +01:00
}
return NULL ;
}
2019-03-09 21:29:39 +01:00
2022-03-21 01:58:28 +01:00
static void set_channel_config ( struct command * cmd , struct channel * channel ,
u32 * base ,
u32 * ppm ,
2022-03-21 01:58:54 +01:00
struct amount_msat * htlc_min ,
2022-03-21 01:58:28 +01:00
struct amount_msat * htlc_max ,
u32 delaysecs ,
struct json_stream * response ,
bool add_details )
2019-03-16 13:47:40 +01:00
{
2022-03-21 01:58:57 +01:00
bool warn_cannot_set_min = false , warn_cannot_set_max = false ;
2022-03-21 01:58:54 +01:00
2022-03-21 01:58:28 +01:00
/* We only need to defer values if we *increase* fees (or drop
2022-03-21 01:58:54 +01:00
* max , increase min ) ; we always allow users to overpay fees . */
2022-03-21 01:58:28 +01:00
if ( ( base & & * base > channel - > feerate_base )
| | ( ppm & & * ppm > channel - > feerate_ppm )
2022-03-21 01:58:54 +01:00
| | ( htlc_min
& & amount_msat_greater ( * htlc_min , channel - > htlc_minimum_msat ) )
2022-03-21 01:58:28 +01:00
| | ( htlc_max
& & amount_msat_less ( * htlc_max , channel - > htlc_maximum_msat ) ) ) {
2021-09-23 04:42:47 +02:00
channel - > old_feerate_timeout
= timeabs_add ( time_now ( ) , time_from_sec ( delaysecs ) ) ;
channel - > old_feerate_base = channel - > feerate_base ;
channel - > old_feerate_ppm = channel - > feerate_ppm ;
2022-03-21 01:58:54 +01:00
channel - > old_htlc_minimum_msat = channel - > htlc_minimum_msat ;
2022-03-21 01:58:27 +01:00
channel - > old_htlc_maximum_msat = channel - > htlc_maximum_msat ;
2021-09-23 04:42:47 +02:00
}
2019-03-16 13:47:40 +01:00
/* set new values */
2022-03-21 01:58:28 +01:00
if ( base )
channel - > feerate_base = * base ;
if ( ppm )
channel - > feerate_ppm = * ppm ;
2022-03-21 01:58:54 +01:00
if ( htlc_min ) {
struct amount_msat actual_min ;
/* We can't send something they'll refuse: check that here. */
actual_min = channel - > channel_info . their_config . htlc_minimum ;
if ( amount_msat_less ( * htlc_min , actual_min ) ) {
warn_cannot_set_min = true ;
channel - > htlc_minimum_msat = actual_min ;
} else
channel - > htlc_minimum_msat = * htlc_min ;
}
2022-03-21 01:58:57 +01:00
if ( htlc_max ) {
struct amount_msat actual_max ;
/* Can't set it greater than actual capacity. */
actual_max = htlc_max_possible_send ( channel ) ;
if ( amount_msat_greater ( * htlc_max , actual_max ) ) {
warn_cannot_set_max = true ;
channel - > htlc_maximum_msat = actual_max ;
} else
channel - > htlc_maximum_msat = * htlc_max ;
}
2019-03-16 13:47:40 +01:00
/* tell channeld to make a send_channel_update */
2019-11-18 01:27:17 +01:00
if ( channel - > owner & & streq ( channel - > owner - > name , " channeld " ) )
2019-03-16 13:47:40 +01:00
subd_send_msg ( channel - > owner ,
2022-03-21 01:58:28 +01:00
take ( towire_channeld_config_channel ( NULL , base , ppm ,
2022-03-21 01:58:54 +01:00
htlc_min , htlc_max ) ) ) ;
2019-03-16 13:47:40 +01:00
/* save values to database */
wallet_channel_save ( cmd - > ld - > wallet , channel ) ;
/* write JSON response entry */
json_object_start ( response , NULL ) ;
2019-04-08 11:58:32 +02:00
json_add_node_id ( response , " peer_id " , & channel - > peer - > id ) ;
2019-03-16 13:47:40 +01:00
json_add_string ( response , " channel_id " ,
2020-09-09 09:20:53 +02:00
type_to_string ( tmpctx , struct channel_id , & channel - > cid ) ) ;
2019-03-16 13:47:40 +01:00
if ( channel - > scid )
json_add_short_channel_id ( response , " short_channel_id " , channel - > scid ) ;
2022-03-21 01:58:28 +01:00
/* setchannel lists these explicitly */
if ( add_details ) {
json_add_amount_msat_only ( response , " fee_base_msat " ,
amount_msat ( channel - > feerate_base ) ) ;
json_add_u32 ( response , " fee_proportional_millionths " ,
channel - > feerate_ppm ) ;
2022-03-21 01:58:54 +01:00
json_add_amount_msat_only ( response ,
" minimum_htlc_out_msat " ,
channel - > htlc_minimum_msat ) ;
if ( warn_cannot_set_min )
json_add_string ( response , " warning_htlcmin_too_low " ,
" Set minimum_htlc_out_msat to minimum allowed by peer " ) ;
2022-03-21 01:58:28 +01:00
json_add_amount_msat_only ( response ,
" maximum_htlc_out_msat " ,
channel - > htlc_maximum_msat ) ;
2022-03-21 01:58:57 +01:00
if ( warn_cannot_set_max )
json_add_string ( response , " warning_htlcmax_too_high " ,
" Set maximum_htlc_out_msat to maximum possible in channel " ) ;
2022-03-21 01:58:28 +01:00
}
2019-03-16 13:47:40 +01:00
json_object_end ( response ) ;
}
2019-03-09 21:29:39 +01:00
static struct command_result * json_setchannelfee ( struct command * cmd ,
const char * buffer ,
const jsmntok_t * obj UNNEEDED ,
const jsmntok_t * params )
{
struct json_stream * response ;
struct peer * peer ;
struct channel * channel ;
2021-09-23 04:42:47 +02:00
u32 * base , * ppm , * delaysecs ;
2019-03-09 21:29:39 +01:00
/* Parse the JSON command */
if ( ! param ( cmd , buffer , params ,
2019-03-16 13:47:40 +01:00
p_req ( " id " , param_channel_or_all , & channel ) ,
2019-03-15 04:00:07 +01:00
p_opt_def ( " base " , param_msat_u32 ,
& base , cmd - > ld - > config . fee_base ) ,
p_opt_def ( " ppm " , param_number , & ppm ,
cmd - > ld - > config . fee_per_satoshi ) ,
2021-09-23 04:42:47 +02:00
p_opt_def ( " enforcedelay " , param_number , & delaysecs , 600 ) ,
2019-03-15 04:00:07 +01:00
NULL ) )
2019-03-09 21:29:39 +01:00
return command_param_failed ( ) ;
2020-12-17 06:59:18 +01:00
if ( channel
& & channel - > state ! = CHANNELD_NORMAL
2020-11-30 22:49:01 +01:00
& & channel - > state ! = CHANNELD_AWAITING_LOCKIN
& & channel - > state ! = DUALOPEND_AWAITING_LOCKIN )
2020-12-17 06:59:18 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Channel is in state %s " , channel_state_name ( channel ) ) ;
2019-03-16 13:47:40 +01:00
/* Open JSON response object for later iteration */
2019-03-09 21:29:39 +01:00
response = json_stream_success ( cmd ) ;
2019-03-15 04:00:07 +01:00
json_add_num ( response , " base " , * base ) ;
2019-03-09 21:29:39 +01:00
json_add_num ( response , " ppm " , * ppm ) ;
2019-03-16 13:47:40 +01:00
json_array_start ( response , " channels " ) ;
/* If the users requested 'all' channels we need to iterate */
if ( channel = = NULL ) {
list_for_each ( & cmd - > ld - > peers , peer , list ) {
2019-06-13 00:25:23 +02:00
channel = peer_active_channel ( peer ) ;
if ( ! channel )
continue ;
if ( channel - > state ! = CHANNELD_NORMAL & &
2020-11-30 22:49:01 +01:00
channel - > state ! = CHANNELD_AWAITING_LOCKIN & &
channel - > state ! = DUALOPEND_AWAITING_LOCKIN )
2019-06-13 00:25:23 +02:00
continue ;
2022-03-21 01:58:54 +01:00
set_channel_config ( cmd , channel , base , ppm , NULL , NULL ,
2022-03-21 01:58:28 +01:00
* delaysecs , response , false ) ;
2019-03-16 13:47:40 +01:00
}
/* single channel should be updated */
} else {
2022-03-21 01:58:54 +01:00
set_channel_config ( cmd , channel , base , ppm , NULL , NULL ,
2022-03-21 01:58:28 +01:00
* delaysecs , response , false ) ;
2019-03-16 13:47:40 +01:00
}
/* Close and return response */
json_array_end ( response ) ;
2019-03-09 21:29:39 +01:00
return command_success ( cmd , response ) ;
}
static const struct json_command setchannelfee_command = {
" setchannelfee " ,
2019-05-22 16:08:16 +02:00
" channels " ,
2019-03-09 21:29:39 +01:00
json_setchannelfee ,
" Sets specific routing fees for channel with {id} "
2019-03-16 13:51:54 +01:00
" (either peer ID, channel ID, short channel ID or 'all'). "
" Routing fees are defined by a fixed {base} (msat) "
" and a {ppm} (proportional per millionth) value. "
" If values for {base} or {ppm} are left out, defaults will be used. "
" {base} can also be defined in other units, for example '1sat'. "
2022-03-21 01:58:28 +01:00
" If {id} is 'all', the fees will be applied for all channels. " ,
true /* deprecated */
2019-03-09 21:29:39 +01:00
} ;
AUTODATA ( json_command , & setchannelfee_command ) ;
2022-03-21 01:58:28 +01:00
static struct command_result * json_setchannel ( struct command * cmd ,
const char * buffer ,
const jsmntok_t * obj UNNEEDED ,
const jsmntok_t * params )
{
struct json_stream * response ;
struct peer * peer ;
struct channel * channel ;
u32 * base , * ppm , * delaysecs ;
2022-03-21 01:58:54 +01:00
struct amount_msat * htlc_min , * htlc_max ;
2022-03-21 01:58:28 +01:00
/* Parse the JSON command */
if ( ! param ( cmd , buffer , params ,
p_req ( " id " , param_channel_or_all , & channel ) ,
p_opt ( " feebase " , param_msat_u32 , & base ) ,
p_opt ( " feeppm " , param_number , & ppm ) ,
2022-03-21 01:58:54 +01:00
p_opt ( " htlcmin " , param_msat , & htlc_min ) ,
2022-03-21 01:58:28 +01:00
p_opt ( " htlcmax " , param_msat , & htlc_max ) ,
p_opt_def ( " enforcedelay " , param_number , & delaysecs , 600 ) ,
NULL ) )
return command_param_failed ( ) ;
2022-03-21 01:58:57 +01:00
/* Prevent obviously incorrect things! */
if ( htlc_min & & htlc_max
& & amount_msat_less ( * htlc_max , * htlc_min ) ) {
return command_fail ( cmd , LIGHTNINGD ,
" htlcmax cannot be less than htlcmin " ) ;
}
2022-03-21 01:58:28 +01:00
if ( channel
& & channel - > state ! = CHANNELD_NORMAL
& & channel - > state ! = CHANNELD_AWAITING_LOCKIN
& & channel - > state ! = DUALOPEND_AWAITING_LOCKIN )
return command_fail ( cmd , LIGHTNINGD ,
" Channel is in state %s " , channel_state_name ( channel ) ) ;
/* Open JSON response object for later iteration */
response = json_stream_success ( cmd ) ;
json_array_start ( response , " channels " ) ;
/* If the users requested 'all' channels we need to iterate */
if ( channel = = NULL ) {
list_for_each ( & cmd - > ld - > peers , peer , list ) {
channel = peer_active_channel ( peer ) ;
if ( ! channel )
continue ;
if ( channel - > state ! = CHANNELD_NORMAL & &
channel - > state ! = CHANNELD_AWAITING_LOCKIN & &
channel - > state ! = DUALOPEND_AWAITING_LOCKIN )
continue ;
2022-03-21 01:58:54 +01:00
set_channel_config ( cmd , channel , base , ppm ,
htlc_min , htlc_max ,
2022-03-21 01:58:28 +01:00
* delaysecs , response , true ) ;
}
/* single channel should be updated */
} else {
2022-03-21 01:58:54 +01:00
set_channel_config ( cmd , channel , base , ppm ,
htlc_min , htlc_max ,
2022-03-21 01:58:28 +01:00
* delaysecs , response , true ) ;
}
/* Close and return response */
json_array_end ( response ) ;
return command_success ( cmd , response ) ;
}
static const struct json_command setchannel_command = {
" setchannel " ,
" channels " ,
json_setchannel ,
" Sets fees and/or htlc_max for channel with {id} "
" (either peer ID, channel ID, short channel ID or 'all'). "
" If {feebase}, {feeppm} or {htlcmax} is missing, it is unchanged. "
" {base} can also be defined in other units, for example '1sat'. "
" If {id} is 'all', the fees will be applied for all channels. "
} ;
AUTODATA ( json_command , & setchannel_command ) ;
2017-10-24 04:06:14 +02:00
# if DEVELOPER
2018-12-16 05:52:06 +01:00
static struct command_result * json_sign_last_tx ( struct command * cmd ,
const char * buffer ,
const jsmntok_t * obj UNNEEDED ,
const jsmntok_t * params )
2017-10-24 04:06:14 +02:00
{
2019-04-08 11:58:32 +02:00
struct node_id * peerid ;
2017-10-24 04:06:14 +02:00
struct peer * peer ;
2018-10-19 03:17:49 +02:00
struct json_stream * response ;
2018-02-12 11:12:55 +01:00
struct channel * channel ;
2017-10-24 04:06:14 +02:00
2018-07-20 03:14:02 +02:00
if ( ! param ( cmd , buffer , params ,
2019-04-08 11:58:32 +02:00
p_req ( " id " , param_node_id , & peerid ) ,
2018-07-20 03:14:02 +02:00
NULL ) )
2018-12-16 05:52:06 +01:00
return command_param_failed ( ) ;
2017-10-24 04:06:14 +02:00
2018-08-14 23:19:31 +02:00
peer = peer_by_id ( cmd - > ld , peerid ) ;
2017-10-24 04:06:14 +02:00
if ( ! peer ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Could not find peer with that id " ) ;
2017-10-24 04:06:14 +02:00
}
2018-02-12 11:13:04 +01:00
channel = peer_active_channel ( peer ) ;
if ( ! channel ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Could not find active channel " ) ;
2018-02-12 11:13:04 +01:00
}
2017-10-24 04:06:14 +02:00
2018-10-19 03:17:48 +02:00
response = json_stream_success ( cmd ) ;
2018-02-12 11:13:04 +01:00
log_debug ( channel - > log , " dev-sign-last-tx: signing tx with %zu outputs " ,
2019-03-25 11:35:56 +01:00
channel - > last_tx - > wtx - > num_outputs ) ;
2017-10-24 04:06:14 +02:00
2021-05-20 02:09:56 +02:00
sign_last_tx ( channel , channel - > last_tx , & channel - > last_sig ) ;
2019-06-05 07:29:01 +02:00
json_add_tx ( response , " tx " , channel - > last_tx ) ;
remove_sig ( channel - > last_tx ) ;
2021-05-20 02:14:13 +02:00
/* If we've got inflights, return them */
if ( ! list_empty ( & channel - > inflights ) ) {
struct channel_inflight * inflight ;
json_array_start ( response , " inflights " ) ;
list_for_each ( & channel - > inflights , inflight , list ) {
sign_last_tx ( channel , inflight - > last_tx ,
& inflight - > last_sig ) ;
json_object_start ( response , NULL ) ;
json_add_txid ( response , " funding_txid " ,
2021-10-13 05:45:36 +02:00
& inflight - > funding - > outpoint . txid ) ;
2021-05-20 02:14:13 +02:00
remove_sig ( inflight - > last_tx ) ;
json_add_tx ( response , " tx " , channel - > last_tx ) ;
json_object_end ( response ) ;
}
json_array_end ( response ) ;
}
2018-12-16 05:52:06 +01:00
return command_success ( cmd , response ) ;
2017-10-24 04:06:14 +02:00
}
static const struct json_command dev_sign_last_tx = {
" dev-sign-last-tx " ,
2019-05-22 16:08:16 +02:00
" developer " ,
2017-10-24 04:06:14 +02:00
json_sign_last_tx ,
2018-01-22 09:55:07 +01:00
" Sign and show the last commitment transaction with peer {id} "
2017-10-24 04:06:14 +02:00
} ;
AUTODATA ( json_command , & dev_sign_last_tx ) ;
2018-12-16 05:52:06 +01:00
static struct command_result * json_dev_fail ( struct command * cmd ,
const char * buffer ,
const jsmntok_t * obj UNNEEDED ,
const jsmntok_t * params )
2017-10-24 04:06:14 +02:00
{
2019-04-08 11:58:32 +02:00
struct node_id * peerid ;
2017-10-24 04:06:14 +02:00
struct peer * peer ;
2018-02-12 11:13:04 +01:00
struct channel * channel ;
2017-10-24 04:06:14 +02:00
2018-07-20 03:14:02 +02:00
if ( ! param ( cmd , buffer , params ,
2019-04-08 11:58:32 +02:00
p_req ( " id " , param_node_id , & peerid ) ,
2018-07-20 03:14:02 +02:00
NULL ) )
2018-12-16 05:52:06 +01:00
return command_param_failed ( ) ;
2017-10-24 04:06:14 +02:00
2018-08-14 23:19:31 +02:00
peer = peer_by_id ( cmd - > ld , peerid ) ;
2017-10-24 04:06:14 +02:00
if ( ! peer ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Could not find peer with that id " ) ;
2017-10-24 04:06:14 +02:00
}
2018-02-12 11:13:04 +01:00
channel = peer_active_channel ( peer ) ;
if ( ! channel ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Could not find active channel with peer " ) ;
2018-02-12 11:13:04 +01:00
}
feat: adds state change cause and message
This adds a `state_change` 'cause' to a channel.
A 'cause' is some initial 'reason' a channel was created or closed by:
/* Anything other than the reasons below. Should not happen. */
REASON_UNKNOWN,
/* Unconscious internal reasons, e.g. dev fail of a channel. */
REASON_LOCAL,
/* The operator or a plugin opened or closed a channel by intention. */
REASON_USER,
/* The remote closed or funded a channel with us by intention. */
REASON_REMOTE,
/* E.g. We need to close a channel because of bad signatures and such. */
REASON_PROTOCOL,
/* A channel was closed onchain, while we were offline. */
/* Note: This is very likely a conscious remote decision. */
REASON_ONCHAIN
If a 'cause' is known and a subsequent state change is made with
`REASON_UNKNOWN` the preceding cause will be used as reason, since a lot
(all `REASON_UNKNOWN`) state changes are a subsequent consequences of a prior
cause: local, user, remote, protocol or onchain.
Changelog-Added: Plugins: Channel closure resaon/cause to channel_state_changed notification
2020-10-28 11:46:12 +01:00
channel_fail_permanent ( channel ,
REASON_USER ,
" Failing due to dev-fail command " ) ;
2019-06-12 02:38:54 +02:00
return command_success ( cmd , json_stream_success ( cmd ) ) ;
2017-10-24 04:06:14 +02:00
}
static const struct json_command dev_fail_command = {
" dev-fail " ,
2019-05-22 16:08:16 +02:00
" developer " ,
2017-10-24 04:06:14 +02:00
json_dev_fail ,
2018-01-22 09:55:07 +01:00
" Fail with peer {id} "
2017-10-24 04:06:14 +02:00
} ;
AUTODATA ( json_command , & dev_fail_command ) ;
2018-02-21 16:06:07 +01:00
static void dev_reenable_commit_finished ( struct subd * channeld UNUSED ,
const u8 * resp UNUSED ,
const int * fds UNUSED ,
2017-10-24 04:06:14 +02:00
struct command * cmd )
{
2019-06-12 02:38:54 +02:00
was_pending ( command_success ( cmd , json_stream_success ( cmd ) ) ) ;
2017-10-24 04:06:14 +02:00
}
2018-12-16 05:52:06 +01:00
static struct command_result * json_dev_reenable_commit ( struct command * cmd ,
const char * buffer ,
const jsmntok_t * obj UNNEEDED ,
const jsmntok_t * params )
2017-10-24 04:06:14 +02:00
{
2019-04-08 11:58:32 +02:00
struct node_id * peerid ;
2017-10-24 04:06:14 +02:00
struct peer * peer ;
u8 * msg ;
2018-02-12 11:12:55 +01:00
struct channel * channel ;
2017-10-24 04:06:14 +02:00
2018-07-20 03:14:02 +02:00
if ( ! param ( cmd , buffer , params ,
2019-04-08 11:58:32 +02:00
p_req ( " id " , param_node_id , & peerid ) ,
2018-07-20 03:14:02 +02:00
NULL ) )
2018-12-16 05:52:06 +01:00
return command_param_failed ( ) ;
2017-10-24 04:06:14 +02:00
2018-08-14 23:19:31 +02:00
peer = peer_by_id ( cmd - > ld , peerid ) ;
2017-10-24 04:06:14 +02:00
if ( ! peer ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Could not find peer with that id " ) ;
2017-10-24 04:06:14 +02:00
}
2018-02-12 11:13:04 +01:00
channel = peer_active_channel ( peer ) ;
if ( ! channel ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Peer has no active channel " ) ;
2018-02-12 11:13:04 +01:00
}
2018-02-12 11:12:55 +01:00
if ( ! channel - > owner ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Peer has no owner " ) ;
2017-10-24 04:06:14 +02:00
}
2019-11-18 01:27:17 +01:00
if ( ! streq ( channel - > owner - > name , " channeld " ) ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Peer owned by %s " , channel - > owner - > name ) ;
2017-10-24 04:06:14 +02:00
}
2020-08-25 03:33:16 +02:00
msg = towire_channeld_dev_reenable_commit ( channel ) ;
2018-02-12 11:12:55 +01:00
subd_req ( peer , channel - > owner , take ( msg ) , - 1 , 0 ,
2017-10-24 04:06:14 +02:00
dev_reenable_commit_finished , cmd ) ;
2018-12-16 05:52:06 +01:00
return command_still_pending ( cmd ) ;
2017-10-24 04:06:14 +02:00
}
static const struct json_command dev_reenable_commit = {
" dev-reenable-commit " ,
2019-05-22 16:08:16 +02:00
" developer " ,
2017-10-24 04:06:14 +02:00
json_dev_reenable_commit ,
2018-01-22 09:55:07 +01:00
" Re-enable the commit timer on peer {id} "
2017-10-24 04:06:14 +02:00
} ;
AUTODATA ( json_command , & dev_reenable_commit ) ;
2018-02-06 15:46:34 +01:00
struct dev_forget_channel_cmd {
struct short_channel_id scid ;
2018-02-12 11:13:04 +01:00
struct channel * channel ;
2018-02-06 15:46:34 +01:00
bool force ;
struct command * cmd ;
} ;
static void process_dev_forget_channel ( struct bitcoind * bitcoind UNUSED ,
const struct bitcoin_tx_output * txout ,
void * arg )
{
2018-10-19 03:17:49 +02:00
struct json_stream * response ;
2018-02-06 15:46:34 +01:00
struct dev_forget_channel_cmd * forget = arg ;
if ( txout ! = NULL & & ! forget - > force ) {
2018-12-16 05:53:06 +01:00
was_pending ( command_fail ( forget - > cmd , LIGHTNINGD ,
2018-02-06 15:46:34 +01:00
" Cowardly refusing to forget channel with an "
" unspent funding output, if you know what "
" you're doing you can override with "
" `force=true`, otherwise consider `close` or "
" `dev-fail`! If you force and the channel "
" confirms we will not track the funds in the "
2018-12-16 05:53:06 +01:00
" channel " ) ) ;
2018-02-06 15:46:34 +01:00
return ;
}
2018-10-19 03:17:48 +02:00
response = json_stream_success ( forget - > cmd ) ;
2018-02-06 15:46:34 +01:00
json_add_bool ( response , " forced " , forget - > force ) ;
json_add_bool ( response , " funding_unspent " , txout ! = NULL ) ;
2021-10-13 05:45:36 +02:00
json_add_txid ( response , " funding_txid " , & forget - > channel - > funding . txid ) ;
2018-02-06 15:46:34 +01:00
2018-08-09 02:25:29 +02:00
/* Set error so we don't try to reconnect. */
2021-02-03 03:51:41 +01:00
forget - > channel - > error = towire_errorfmt ( forget - > channel ,
& forget - > channel - > cid ,
2018-08-09 02:25:29 +02:00
" dev_forget_channel " ) ;
2018-02-21 16:50:49 +01:00
delete_channel ( forget - > channel ) ;
2018-02-06 15:46:34 +01:00
2018-12-16 05:53:06 +01:00
was_pending ( command_success ( forget - > cmd , response ) ) ;
2018-02-06 15:46:34 +01:00
}
2018-12-16 05:52:06 +01:00
static struct command_result * json_dev_forget_channel ( struct command * cmd ,
const char * buffer ,
const jsmntok_t * obj UNNEEDED ,
const jsmntok_t * params )
2018-02-06 15:46:34 +01:00
{
2019-04-08 11:58:32 +02:00
struct node_id * peerid ;
2018-02-12 11:13:04 +01:00
struct peer * peer ;
struct channel * channel ;
2018-07-20 03:14:02 +02:00
struct short_channel_id * scid ;
2020-09-09 09:20:53 +02:00
struct channel_id * find_cid ;
2018-02-06 15:46:34 +01:00
struct dev_forget_channel_cmd * forget = tal ( cmd , struct dev_forget_channel_cmd ) ;
forget - > cmd = cmd ;
2018-02-12 11:13:04 +01:00
2018-08-13 22:31:40 +02:00
bool * force ;
2018-07-20 03:14:02 +02:00
if ( ! param ( cmd , buffer , params ,
2019-04-08 11:58:32 +02:00
p_req ( " id " , param_node_id , & peerid ) ,
2018-12-16 05:50:06 +01:00
p_opt ( " short_channel_id " , param_short_channel_id , & scid ) ,
2019-09-30 18:31:27 +02:00
p_opt ( " channel_id " , param_channel_id , & find_cid ) ,
2018-12-16 05:50:06 +01:00
p_opt_def ( " force " , param_bool , & force , false ) ,
2018-07-20 03:14:02 +02:00
NULL ) )
2018-12-16 05:52:06 +01:00
return command_param_failed ( ) ;
2018-02-06 15:46:34 +01:00
2018-08-13 22:31:40 +02:00
forget - > force = * force ;
2018-08-14 23:19:31 +02:00
peer = peer_by_id ( cmd - > ld , peerid ) ;
2018-02-12 11:13:04 +01:00
if ( ! peer ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Could not find channel with that peer " ) ;
2018-02-12 11:13:04 +01:00
}
forget - > channel = NULL ;
list_for_each ( & peer - > channels , channel , list ) {
2019-09-30 18:31:27 +02:00
/* Check for channel id first */
if ( find_cid ) {
2020-09-09 09:20:53 +02:00
if ( ! channel_id_eq ( find_cid , & channel - > cid ) )
2019-09-30 18:31:27 +02:00
continue ;
}
2018-07-20 03:14:02 +02:00
if ( scid ) {
2018-02-12 11:13:04 +01:00
if ( ! channel - > scid )
continue ;
2018-07-20 03:14:02 +02:00
if ( ! short_channel_id_eq ( channel - > scid , scid ) )
2018-02-12 11:13:04 +01:00
continue ;
}
if ( forget - > channel ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" Multiple channels: "
" please specify short_channel_id " ) ;
2018-02-12 11:13:04 +01:00
}
forget - > channel = channel ;
}
if ( ! forget - > channel ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
2019-02-08 14:23:22 +01:00
" No channels matching that peer_id%s " ,
scid ? " and that short_channel_id " : " " ) ;
2018-02-12 11:13:04 +01:00
}
2018-02-28 23:24:58 +01:00
if ( channel_has_htlc_out ( forget - > channel ) | |
channel_has_htlc_in ( forget - > channel ) ) {
2018-12-16 05:52:06 +01:00
return command_fail ( cmd , LIGHTNINGD ,
" This channel has HTLCs attached and it is "
" not safe to forget it. Please use `close` "
" or `dev-fail` instead. " ) ;
2018-02-28 23:24:58 +01:00
}
2021-01-22 01:55:23 +01:00
if ( ! channel_unsaved ( forget - > channel ) )
bitcoind_getutxout ( cmd - > ld - > topology - > bitcoind ,
2021-10-13 05:45:36 +02:00
& forget - > channel - > funding ,
2021-01-22 01:55:23 +01:00
process_dev_forget_channel , forget ) ;
2018-12-16 05:52:06 +01:00
return command_still_pending ( cmd ) ;
2018-02-06 15:46:34 +01:00
}
static const struct json_command dev_forget_channel_command = {
2019-05-22 16:08:16 +02:00
" dev-forget-channel " ,
" developer " ,
json_dev_forget_channel ,
2018-02-06 15:46:34 +01:00
" Forget the channel with peer {id}, ignore UTXO check with {force}='true'. " , false ,
" Forget the channel with peer {id}. Checks if the channel is still active by checking its funding transaction. Check can be ignored by setting {force} to 'true' "
} ;
AUTODATA ( json_command , & dev_forget_channel_command ) ;
2018-11-22 03:17:29 +01:00
2022-03-08 01:14:41 +01:00
static void channeld_memleak_req_done ( struct subd * channeld ,
const u8 * msg , const int * fds UNUSED ,
struct leak_detect * leaks )
2018-11-22 03:17:29 +01:00
{
2022-03-08 01:14:41 +01:00
bool found_leak ;
2018-11-22 03:17:29 +01:00
2022-03-08 01:14:41 +01:00
if ( ! fromwire_channeld_dev_memleak_reply ( msg , & found_leak ) )
fatal ( " Bad channel_dev_memleak " ) ;
2018-11-22 03:17:29 +01:00
if ( found_leak )
2022-03-08 01:14:41 +01:00
report_subd_memleak ( leaks , channeld ) ;
2018-11-22 03:17:29 +01:00
}
2022-03-08 01:14:41 +01:00
static void onchaind_memleak_req_done ( struct subd * onchaind ,
2018-11-22 03:17:29 +01:00
const u8 * msg , const int * fds UNUSED ,
2022-03-08 01:14:41 +01:00
struct leak_detect * leaks )
2018-11-22 03:17:29 +01:00
{
bool found_leak ;
2022-03-08 01:14:41 +01:00
if ( ! fromwire_onchaind_dev_memleak_reply ( msg , & found_leak ) )
fatal ( " Bad onchaind_dev_memleak " ) ;
if ( found_leak )
report_subd_memleak ( leaks , onchaind ) ;
2018-11-22 03:17:29 +01:00
}
2022-03-08 01:14:41 +01:00
static void openingd_memleak_req_done ( struct subd * open_daemon ,
const u8 * msg , const int * fds UNUSED ,
struct leak_detect * leaks )
2018-11-22 03:17:29 +01:00
{
bool found_leak ;
2018-11-22 03:17:29 +01:00
2022-03-08 01:14:41 +01:00
if ( ! fromwire_openingd_dev_memleak_reply ( msg , & found_leak ) )
fatal ( " Bad opening_dev_memleak " ) ;
if ( found_leak )
report_subd_memleak ( leaks , open_daemon ) ;
2018-11-22 03:17:29 +01:00
}
2022-03-08 01:31:26 +01:00
static void dualopend_memleak_req_done ( struct subd * dualopend ,
const u8 * msg , const int * fds UNUSED ,
struct leak_detect * leaks )
{
bool found_leak ;
if ( ! fromwire_dualopend_dev_memleak_reply ( msg , & found_leak ) )
fatal ( " Bad dualopend_dev_memleak " ) ;
if ( found_leak )
report_subd_memleak ( leaks , dualopend ) ;
}
2022-03-08 01:14:41 +01:00
void peer_dev_memleak ( struct lightningd * ld , struct leak_detect * leaks )
2018-11-22 03:17:29 +01:00
{
struct peer * p ;
2022-03-08 01:14:41 +01:00
list_for_each ( & ld - > peers , p , list ) {
2018-11-22 03:17:29 +01:00
struct channel * c ;
2022-03-08 01:14:41 +01:00
if ( p - > uncommitted_channel ) {
struct subd * openingd = p - > uncommitted_channel - > open_daemon ;
start_leak_request ( subd_req ( openingd , openingd ,
take ( towire_openingd_dev_memleak ( NULL ) ) ,
- 1 , 0 , openingd_memleak_req_done , leaks ) ,
leaks ) ;
}
2018-11-22 03:17:29 +01:00
list_for_each ( & p - > channels , c , list ) {
if ( ! c - > owner )
continue ;
2019-11-18 01:27:17 +01:00
if ( streq ( c - > owner - > name , " channeld " ) ) {
2022-03-08 01:14:41 +01:00
start_leak_request ( subd_req ( c , c - > owner ,
2020-08-25 03:33:16 +02:00
take ( towire_channeld_dev_memleak ( NULL ) ) ,
2022-03-08 01:14:41 +01:00
- 1 , 0 , channeld_memleak_req_done , leaks ) ,
leaks ) ;
} else if ( streq ( c - > owner - > name , " onchaind " ) ) {
start_leak_request ( subd_req ( c , c - > owner ,
2020-08-25 04:15:48 +02:00
take ( towire_onchaind_dev_memleak ( NULL ) ) ,
2022-03-08 01:14:41 +01:00
- 1 , 0 , onchaind_memleak_req_done , leaks ) ,
leaks ) ;
2022-03-08 01:31:26 +01:00
} else if ( streq ( c - > owner - > name , " dualopend " ) ) {
start_leak_request ( subd_req ( c , c - > owner ,
take ( towire_dualopend_dev_memleak ( NULL ) ) ,
- 1 , 0 , dualopend_memleak_req_done , leaks ) ,
leaks ) ;
2018-11-22 03:17:29 +01:00
}
2018-11-22 03:17:29 +01:00
}
}
}
2021-07-13 23:04:30 +02:00
# endif /* DEVELOPER */