mirror of
https://github.com/ElementsProject/lightning.git
synced 2024-11-19 09:54:16 +01:00
lightningd/subd: add a context to requests.
If a peer dies, and then we get a reply, that can cause access after free. The usual way to handle this is to make the request a child of the peer, but in fact we still want to catch (and disard) it, so it's a little more complex internally. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
a815500653
commit
fed25cc540
@ -115,7 +115,7 @@ static void json_dev_newhtlc(struct command *cmd,
|
||||
|
||||
/* FIXME: If subdaemon dies? */
|
||||
msg = towire_channel_offer_htlc(cmd, msatoshi, expiry, &rhash, onion);
|
||||
subd_req(peer->owner, take(msg), -1, 0, offer_htlc_reply, cmd);
|
||||
subd_req(peer, peer->owner, take(msg), -1, 0, offer_htlc_reply, cmd);
|
||||
}
|
||||
|
||||
static const struct json_command dev_newhtlc_command = {
|
||||
|
@ -216,7 +216,7 @@ static void json_getnodes(struct command *cmd, const char *buffer,
|
||||
{
|
||||
struct lightningd *ld = ld_from_dstate(cmd->dstate);
|
||||
u8 *req = towire_gossip_getnodes_request(cmd);
|
||||
subd_req(ld->gossip, req, -1, 0, json_getnodes_reply, cmd);
|
||||
subd_req(cmd, ld->gossip, req, -1, 0, json_getnodes_reply, cmd);
|
||||
}
|
||||
|
||||
static const struct json_command getnodes_command = {
|
||||
@ -290,7 +290,7 @@ static void json_getroute(struct command *cmd, const char *buffer, const jsmntok
|
||||
return;
|
||||
}
|
||||
u8 *req = towire_gossip_getroute_request(cmd, &cmd->dstate->id, &id, msatoshi, riskfactor*1000);
|
||||
subd_req(ld->gossip, req, -1, 0, json_getroute_reply, cmd);
|
||||
subd_req(ld->gossip, ld->gossip, req, -1, 0, json_getroute_reply, cmd);
|
||||
}
|
||||
|
||||
static const struct json_command getroute_command = {
|
||||
@ -345,7 +345,7 @@ static void json_getchannels(struct command *cmd, const char *buffer,
|
||||
{
|
||||
struct lightningd *ld = ld_from_dstate(cmd->dstate);
|
||||
u8 *req = towire_gossip_getchannels_request(cmd);
|
||||
subd_req(ld->gossip, req, -1, 0, json_getchannels_reply, cmd);
|
||||
subd_req(ld->gossip, ld->gossip, req, -1, 0, json_getchannels_reply, cmd);
|
||||
}
|
||||
|
||||
static const struct json_command getchannels_command = {
|
||||
|
@ -97,7 +97,7 @@ void hsm_init(struct lightningd *ld, bool newdir)
|
||||
else
|
||||
create = (access("hsm_secret", F_OK) != 0);
|
||||
|
||||
subd_req(ld->hsm, take(towire_hsmctl_init(ld->hsm, create)),
|
||||
subd_req(ld->hsm, ld->hsm, take(towire_hsmctl_init(ld->hsm, create)),
|
||||
-1, 0, hsm_init_done, ld);
|
||||
|
||||
if (io_loop(NULL, NULL) != ld->hsm)
|
||||
|
@ -195,7 +195,7 @@ static bool peer_got_handshake_hsmfd(struct subd *hsm, const u8 *msg,
|
||||
|
||||
/* Now hand peer request to the handshake daemon: hands it
|
||||
* back on success */
|
||||
subd_req(peer->owner, take(req), -1, 1, handshake_succeeded, peer);
|
||||
subd_req(peer, peer->owner, take(req), -1, 1, handshake_succeeded, peer);
|
||||
return true;
|
||||
|
||||
error:
|
||||
@ -213,7 +213,7 @@ static struct io_plan *peer_in(struct io_conn *conn, struct lightningd *ld)
|
||||
return io_close(conn);
|
||||
|
||||
/* Get HSM fd for this peer. */
|
||||
subd_req(ld->hsm,
|
||||
subd_req(peer, ld->hsm,
|
||||
take(towire_hsmctl_hsmfd_ecdh(ld, peer->unique_id)),
|
||||
-1, 1, peer_got_handshake_hsmfd, peer);
|
||||
|
||||
@ -344,7 +344,7 @@ static struct io_plan *peer_out(struct io_conn *conn,
|
||||
peer->id = tal_dup(peer, struct pubkey, &jc->id);
|
||||
|
||||
/* Get HSM fd for this peer. */
|
||||
subd_req(ld->hsm,
|
||||
subd_req(peer, ld->hsm,
|
||||
take(towire_hsmctl_hsmfd_ecdh(ld, peer->unique_id)),
|
||||
-1, 1, peer_got_handshake_hsmfd, peer);
|
||||
|
||||
@ -732,7 +732,7 @@ static void peer_start_channeld(struct peer *peer, bool am_funder,
|
||||
.commit_time));
|
||||
|
||||
/* Get fd from hsm. */
|
||||
subd_req(peer->ld->hsm,
|
||||
subd_req(peer, peer->ld->hsm,
|
||||
take(towire_hsmctl_hsmfd_ecdh(peer, peer->unique_id)), -1, 1,
|
||||
peer_start_channeld_hsmfd, cds);
|
||||
}
|
||||
@ -779,7 +779,7 @@ static bool opening_release_tx(struct subd *opening, const u8 *resp,
|
||||
&fc->remote_fundingkey,
|
||||
utxos);
|
||||
tal_free(utxos);
|
||||
subd_req(fc->peer->ld->hsm, take(msg), -1, 0,
|
||||
subd_req(fc, fc->peer->ld->hsm, take(msg), -1, 0,
|
||||
opening_got_hsm_funding_sig, fc);
|
||||
|
||||
/* Start normal channel daemon. */
|
||||
@ -824,7 +824,7 @@ static bool opening_gen_funding(struct subd *opening, const u8 *reply,
|
||||
|
||||
msg = towire_opening_open_funding(fc, fc->peer->funding_txid,
|
||||
fc->peer->funding_outnum);
|
||||
subd_req(fc->peer->owner, take(msg), -1, 1, opening_release_tx, fc);
|
||||
subd_req(fc, fc->peer->owner, take(msg), -1, 1, opening_release_tx, fc);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -887,7 +887,7 @@ static bool opening_accept_reply(struct subd *opening, const u8 *reply,
|
||||
funding_depth_cb, NULL);
|
||||
|
||||
/* Tell it we're watching. */
|
||||
subd_req(opening, towire_opening_accept_finish(reply),
|
||||
subd_req(peer, opening, towire_opening_accept_finish(reply),
|
||||
-1, 1,
|
||||
opening_accept_finish_response, peer);
|
||||
return true;
|
||||
@ -997,7 +997,7 @@ void peer_accept_open(struct peer *peer,
|
||||
tal_free(peer);
|
||||
return;
|
||||
}
|
||||
subd_req(peer->owner, take(msg), -1, 0, opening_accept_reply, peer);
|
||||
subd_req(peer, peer->owner, take(msg), -1, 0, opening_accept_reply, peer);
|
||||
}
|
||||
|
||||
/* Peer has been released from gossip. Start opening. */
|
||||
@ -1064,7 +1064,7 @@ static bool gossip_peer_released(struct subd *gossip,
|
||||
msg = towire_opening_open(fc, fc->peer->funding_satoshi,
|
||||
fc->peer->push_msat,
|
||||
15000, max_minimum_depth);
|
||||
subd_req(opening, take(msg), -1, 0, opening_gen_funding, fc);
|
||||
subd_req(fc, opening, take(msg), -1, 0, opening_gen_funding, fc);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1114,7 +1114,7 @@ static void json_fund_channel(struct command *cmd,
|
||||
/* Tie this fc lifetime (and hence utxo release) to the peer */
|
||||
tal_steal(fc->peer, fc);
|
||||
tal_add_destructor(fc, fail_fundchannel_command);
|
||||
subd_req(ld->gossip, msg, -1, 2, gossip_peer_released, fc);
|
||||
subd_req(fc, ld->gossip, msg, -1, 2, gossip_peer_released, fc);
|
||||
}
|
||||
|
||||
static const struct json_command fund_channel_command = {
|
||||
|
@ -48,14 +48,38 @@ struct subd_req {
|
||||
void *replycb_data;
|
||||
|
||||
size_t num_reply_fds;
|
||||
/* If non-NULL, this is here to disable replycb */
|
||||
void *disabler;
|
||||
};
|
||||
|
||||
static void free_subd_req(struct subd_req *sr)
|
||||
{
|
||||
list_del(&sr->list);
|
||||
/* Don't disable once we're freed! */
|
||||
if (sr->disabler)
|
||||
tal_free(sr->disabler);
|
||||
}
|
||||
|
||||
static void add_req(struct subd *sd, int type, size_t num_fds_in,
|
||||
/* Called when the callback is disabled because caller was freed. */
|
||||
static bool ignore_reply(struct subd *sd, const u8 *msg, const int *fds,
|
||||
void *arg)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
log_debug(sd->log, "IGNORING REPLY");
|
||||
for (i = 0; i < tal_count(fds); i++)
|
||||
close(fds[i]);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void disable_cb(void *disabler, struct subd_req *sr)
|
||||
{
|
||||
sr->replycb = ignore_reply;
|
||||
sr->disabler = NULL;
|
||||
}
|
||||
|
||||
static void add_req(const tal_t *ctx,
|
||||
struct subd *sd, int type, size_t num_fds_in,
|
||||
bool (*replycb)(struct subd *, const u8 *, const int *,
|
||||
void *),
|
||||
void *replycb_data)
|
||||
@ -66,6 +90,15 @@ static void add_req(struct subd *sd, int type, size_t num_fds_in,
|
||||
sr->replycb = replycb;
|
||||
sr->replycb_data = replycb_data;
|
||||
sr->num_reply_fds = num_fds_in;
|
||||
|
||||
/* We don't allocate sr off ctx, because we still have to handle the
|
||||
* case where ctx is freed between request and reply. Hence this
|
||||
* trick. */
|
||||
if (ctx) {
|
||||
sr->disabler = tal(ctx, char);
|
||||
tal_add_destructor2(sr->disabler, disable_cb, sr);
|
||||
} else
|
||||
sr->disabler = NULL;
|
||||
assert(strends(sd->msgname(sr->reply_type), "_REPLY"));
|
||||
|
||||
/* Keep in FIFO order: we sent in order, so replies will be too. */
|
||||
@ -381,7 +414,8 @@ void subd_send_fd(struct subd *sd, int fd)
|
||||
msg_enqueue_fd(&sd->outq, fd);
|
||||
}
|
||||
|
||||
void subd_req_(struct subd *sd,
|
||||
void subd_req_(const tal_t *ctx,
|
||||
struct subd *sd,
|
||||
const u8 *msg_out,
|
||||
int fd_out, size_t num_fds_in,
|
||||
bool (*replycb)(struct subd *, const u8 *, const int *, void *),
|
||||
@ -394,7 +428,7 @@ void subd_req_(struct subd *sd,
|
||||
if (fd_out >= 0)
|
||||
subd_send_fd(sd, fd_out);
|
||||
|
||||
add_req(sd, type, num_fds_in, replycb, replycb_data);
|
||||
add_req(ctx, sd, type, num_fds_in, replycb, replycb_data);
|
||||
}
|
||||
|
||||
char *opt_subd_debug(const char *optarg, struct lightningd *ld)
|
||||
|
@ -89,6 +89,7 @@ void subd_send_fd(struct subd *sd, int fd);
|
||||
|
||||
/**
|
||||
* subd_req - queue a request to the subdaemon.
|
||||
* @ctx: lifetime for the callback: if this is freed, don't call replycb.
|
||||
* @sd: subdaemon to request
|
||||
* @msg_out: request message (can be take)
|
||||
* @fd_out: if >=0 fd to pass at the end of the message (closed after)
|
||||
@ -98,14 +99,15 @@ void subd_send_fd(struct subd *sd, int fd);
|
||||
*
|
||||
* @replycb cannot free @sd, so it returns false to remove it.
|
||||
*/
|
||||
#define subd_req(sd, msg_out, fd_out, num_fds_in, replycb, replycb_data) \
|
||||
subd_req_((sd), (msg_out), (fd_out), (num_fds_in), \
|
||||
#define subd_req(ctx, sd, msg_out, fd_out, num_fds_in, replycb, replycb_data) \
|
||||
subd_req_((ctx), (sd), (msg_out), (fd_out), (num_fds_in), \
|
||||
typesafe_cb_preargs(bool, void *, \
|
||||
(replycb), (replycb_data), \
|
||||
struct subd *, \
|
||||
const u8 *, const int *), \
|
||||
(replycb_data))
|
||||
void subd_req_(struct subd *sd,
|
||||
void subd_req_(const tal_t *ctx,
|
||||
struct subd *sd,
|
||||
const u8 *msg_out,
|
||||
int fd_out, size_t num_fds_in,
|
||||
bool (*replycb)(struct subd *, const u8 *, const int *, void *),
|
||||
|
Loading…
Reference in New Issue
Block a user