2021-02-01 13:28:50 +10:30
|
|
|
#include "config.h"
|
2017-01-10 15:24:20 +10:30
|
|
|
#include <assert.h>
|
|
|
|
#include <ccan/breakpoint/breakpoint.h>
|
|
|
|
#include <ccan/fdpass/fdpass.h>
|
|
|
|
#include <ccan/tal/str/str.h>
|
2019-04-08 09:21:30 +09:30
|
|
|
#include <common/daemon.h>
|
2017-08-29 01:35:01 +09:30
|
|
|
#include <common/daemon_conn.h>
|
|
|
|
#include <common/status.h>
|
2020-08-25 11:49:38 +09:30
|
|
|
#include <common/status_wiregen.h>
|
2021-04-08 15:46:49 +09:30
|
|
|
#include <common/version.h>
|
2017-09-12 14:25:52 +09:30
|
|
|
#include <errno.h>
|
2018-02-05 14:39:28 +10:30
|
|
|
#include <wire/peer_wire.h>
|
2017-08-29 01:32:01 +09:30
|
|
|
#include <wire/wire_sync.h>
|
2017-01-10 15:24:20 +10:30
|
|
|
|
|
|
|
static int status_fd = -1;
|
2017-03-20 07:02:44 +10:30
|
|
|
static struct daemon_conn *status_conn;
|
2018-02-05 14:39:28 +10:30
|
|
|
volatile bool logging_io = false;
|
2019-11-18 10:57:17 +10:30
|
|
|
static bool was_logging_io;
|
2018-02-05 14:39:28 +10:30
|
|
|
|
2019-05-02 10:25:17 +09:30
|
|
|
/* If we're more than this many msgs deep, don't add debug messages. */
|
|
|
|
#define TRACE_QUEUE_LIMIT 20
|
|
|
|
static size_t traces_suppressed;
|
|
|
|
|
2018-02-21 16:06:07 +01:00
|
|
|
static void got_sigusr1(int signal UNUSED)
|
2018-02-05 14:39:28 +10:30
|
|
|
{
|
|
|
|
logging_io = !logging_io;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_logging_sighandler(void)
|
|
|
|
{
|
|
|
|
struct sigaction act;
|
|
|
|
|
2019-11-18 10:57:17 +10:30
|
|
|
/* Could have been set to true by --log-io arg. */
|
|
|
|
was_logging_io = logging_io;
|
|
|
|
|
2018-02-05 14:39:28 +10:30
|
|
|
memset(&act, 0, sizeof(act));
|
|
|
|
act.sa_handler = got_sigusr1;
|
|
|
|
act.sa_flags = SA_RESTART;
|
|
|
|
|
|
|
|
sigaction(SIGUSR1, &act, NULL);
|
|
|
|
}
|
2017-01-10 15:24:20 +10:30
|
|
|
|
2018-08-17 13:46:34 +09:30
|
|
|
static void report_logging_io(const char *why)
|
|
|
|
{
|
|
|
|
if (logging_io != was_logging_io) {
|
|
|
|
was_logging_io = logging_io;
|
2019-09-08 18:39:26 +02:00
|
|
|
status_debug("%s: IO LOGGING %s",
|
2018-08-17 13:46:34 +09:30
|
|
|
why, logging_io ? "ENABLED" : "DISABLED");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-20 07:02:44 +10:30
|
|
|
void status_setup_sync(int fd)
|
2017-01-10 15:24:20 +10:30
|
|
|
{
|
2017-03-20 07:02:44 +10:30
|
|
|
assert(status_fd == -1);
|
|
|
|
assert(!status_conn);
|
2017-01-10 15:24:20 +10:30
|
|
|
status_fd = fd;
|
2018-02-05 14:39:28 +10:30
|
|
|
setup_logging_sighandler();
|
2021-04-08 15:46:49 +09:30
|
|
|
|
|
|
|
/* Send version now. */
|
|
|
|
status_send(take(towire_status_version(NULL, version())));
|
2017-01-10 15:24:20 +10:30
|
|
|
}
|
|
|
|
|
2019-09-15 01:03:05 +08:00
|
|
|
static void destroy_daemon_conn(struct daemon_conn *dc UNUSED)
|
|
|
|
{
|
|
|
|
status_conn = NULL;
|
|
|
|
}
|
|
|
|
|
2017-03-20 07:02:44 +10:30
|
|
|
void status_setup_async(struct daemon_conn *master)
|
|
|
|
{
|
|
|
|
assert(status_fd == -1);
|
|
|
|
assert(!status_conn);
|
|
|
|
status_conn = master;
|
2017-09-28 13:10:58 +09:30
|
|
|
|
2019-09-15 01:03:05 +08:00
|
|
|
tal_add_destructor(master, destroy_daemon_conn);
|
|
|
|
|
2018-02-05 14:39:28 +10:30
|
|
|
setup_logging_sighandler();
|
2021-04-08 15:46:49 +09:30
|
|
|
|
|
|
|
/* Send version now. */
|
|
|
|
status_send(take(towire_status_version(NULL, version())));
|
2017-03-20 07:02:44 +10:30
|
|
|
}
|
|
|
|
|
2018-02-23 16:23:47 +10:30
|
|
|
void status_send(const u8 *msg TAKES)
|
2017-01-10 15:25:20 +10:30
|
|
|
{
|
2018-08-17 13:46:34 +09:30
|
|
|
report_logging_io("SIGUSR1");
|
2017-03-20 07:02:44 +10:30
|
|
|
if (status_fd >= 0) {
|
2018-02-08 11:55:12 +10:30
|
|
|
if (!wire_sync_write(status_fd, msg))
|
2019-01-15 20:34:08 +10:30
|
|
|
/* No point printing error if lightningd is dead. */
|
|
|
|
exit(1);
|
2017-03-20 07:02:44 +10:30
|
|
|
} else {
|
2018-02-08 11:55:12 +10:30
|
|
|
daemon_conn_send(status_conn, msg);
|
2017-03-20 07:02:44 +10:30
|
|
|
}
|
2017-01-10 15:24:20 +10:30
|
|
|
}
|
|
|
|
|
2019-11-17 22:12:33 +10:30
|
|
|
static void status_io_full(enum log_level iodir,
|
|
|
|
const struct node_id *peer,
|
|
|
|
const char *who, const u8 *p)
|
2018-02-05 14:39:27 +10:30
|
|
|
{
|
2019-11-17 22:12:33 +10:30
|
|
|
status_send(take(towire_status_io(NULL, iodir, peer, who, p)));
|
2018-02-05 14:39:27 +10:30
|
|
|
}
|
|
|
|
|
2021-12-02 17:16:42 +01:00
|
|
|
static bool status_peer_io_filter_packettype(const u8 *p)
|
|
|
|
{
|
|
|
|
int msg_type = fromwire_peektype(p);
|
|
|
|
switch (msg_type) {
|
|
|
|
case WIRE_PING:
|
|
|
|
case WIRE_PONG:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-11-17 22:12:33 +10:30
|
|
|
static void status_peer_io_short(enum log_level iodir,
|
|
|
|
const struct node_id *peer,
|
|
|
|
const u8 *p)
|
2018-02-05 14:39:28 +10:30
|
|
|
{
|
2021-12-02 17:16:42 +01:00
|
|
|
if (!status_peer_io_filter_packettype(p))
|
|
|
|
status_peer_debug(peer, "%s %s",
|
|
|
|
iodir == LOG_IO_OUT ? "peer_out" : "peer_in",
|
|
|
|
peer_wire_name(fromwire_peektype(p)));
|
2018-02-05 14:39:28 +10:30
|
|
|
}
|
|
|
|
|
2019-11-17 22:12:33 +10:30
|
|
|
void status_peer_io(enum log_level iodir,
|
|
|
|
const struct node_id *peer,
|
|
|
|
const u8 *p)
|
2018-02-05 14:39:28 +10:30
|
|
|
{
|
2018-08-17 13:46:34 +09:30
|
|
|
report_logging_io("SIGUSR1");
|
2018-02-05 14:39:28 +10:30
|
|
|
if (logging_io)
|
2022-01-29 14:03:05 +10:30
|
|
|
status_io_full(iodir, peer, "", p);
|
2018-02-22 12:15:54 +10:30
|
|
|
/* We get a huge amount of gossip; don't log it */
|
2018-03-08 13:47:05 +10:30
|
|
|
else if (!is_msg_for_gossipd(p))
|
2019-11-17 22:12:33 +10:30
|
|
|
status_peer_io_short(iodir, peer, p);
|
2018-05-10 08:48:24 +09:30
|
|
|
}
|
|
|
|
|
2019-11-17 22:12:33 +10:30
|
|
|
void status_io(enum log_level iodir,
|
|
|
|
const struct node_id *peer,
|
|
|
|
const char *who,
|
2018-05-10 08:48:24 +09:30
|
|
|
const void *data, size_t len)
|
|
|
|
{
|
2018-08-17 13:46:34 +09:30
|
|
|
report_logging_io("SIGUSR1");
|
2018-05-10 08:48:24 +09:30
|
|
|
if (!logging_io)
|
|
|
|
return;
|
|
|
|
/* Horribly inefficient, but so is logging IO generally. */
|
2019-11-17 22:12:33 +10:30
|
|
|
status_io_full(iodir, peer, who, tal_dup_arr(tmpctx, u8, data, len, 0));
|
2018-02-05 14:39:28 +10:30
|
|
|
}
|
|
|
|
|
2019-11-17 22:12:33 +10:30
|
|
|
void status_vfmt(enum log_level level,
|
|
|
|
const struct node_id *peer,
|
|
|
|
const char *fmt, va_list ap)
|
2017-01-10 15:24:20 +10:30
|
|
|
{
|
|
|
|
char *str;
|
|
|
|
|
2019-05-02 10:25:17 +09:30
|
|
|
/* We only suppress async debug msgs. IO messages are even spammier
|
|
|
|
* but they only occur when explicitly asked for */
|
|
|
|
if (level == LOG_DBG && status_conn) {
|
|
|
|
size_t qlen = daemon_conn_queue_length(status_conn);
|
|
|
|
|
|
|
|
/* Once suppressing, we keep suppressing until we're empty */
|
|
|
|
if (traces_suppressed && qlen == 0) {
|
|
|
|
size_t n = traces_suppressed;
|
|
|
|
traces_suppressed = 0;
|
|
|
|
/* Careful: recursion! */
|
|
|
|
status_debug("...[%zu debug messages suppressed]...", n);
|
|
|
|
} else if (traces_suppressed || qlen > TRACE_QUEUE_LIMIT) {
|
|
|
|
traces_suppressed++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2017-01-10 15:24:20 +10:30
|
|
|
str = tal_vfmt(NULL, fmt, ap);
|
2019-11-17 22:12:33 +10:30
|
|
|
status_send(take(towire_status_log(NULL, level, peer, str)));
|
2017-01-10 15:24:20 +10:30
|
|
|
tal_free(str);
|
|
|
|
}
|
|
|
|
|
2019-11-17 22:12:33 +10:30
|
|
|
void status_fmt(enum log_level level,
|
|
|
|
const struct node_id *peer,
|
|
|
|
const char *fmt, ...)
|
2017-10-11 20:32:50 +10:30
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
2019-11-17 22:12:33 +10:30
|
|
|
status_vfmt(level, peer, fmt, ap);
|
2017-10-11 20:32:50 +10:30
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
2018-02-08 11:55:12 +10:30
|
|
|
static NORETURN void flush_and_exit(int reason)
|
|
|
|
{
|
|
|
|
/* Don't let it take forever. */
|
|
|
|
alarm(10);
|
|
|
|
if (status_conn)
|
|
|
|
daemon_conn_sync_flush(status_conn);
|
|
|
|
|
|
|
|
exit(0x80 | (reason & 0xFF));
|
|
|
|
}
|
|
|
|
|
2019-06-04 03:41:25 +09:30
|
|
|
void status_send_fd(int fd)
|
|
|
|
{
|
|
|
|
assert(!status_conn);
|
|
|
|
fdpass_send(status_fd, fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void status_send_fatal(const u8 *msg TAKES)
|
2018-02-19 11:36:15 +10:30
|
|
|
{
|
|
|
|
int reason = fromwire_peektype(msg);
|
|
|
|
breakpoint();
|
|
|
|
status_send(msg);
|
|
|
|
|
|
|
|
flush_and_exit(reason);
|
|
|
|
}
|
|
|
|
|
2018-02-08 11:55:12 +10:30
|
|
|
/* FIXME: rename to status_fatal, s/fail/fatal/ in status_failreason enums */
|
2018-02-08 11:55:12 +10:30
|
|
|
void status_failed(enum status_failreason reason, const char *fmt, ...)
|
2017-01-10 15:24:20 +10:30
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
char *str;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
str = tal_vfmt(NULL, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
2019-04-08 09:21:30 +09:30
|
|
|
/* Give a nice backtrace when this happens! */
|
|
|
|
if (reason == STATUS_FAIL_INTERNAL_ERROR)
|
|
|
|
send_backtrace(str);
|
|
|
|
|
2019-06-04 03:41:25 +09:30
|
|
|
status_send_fatal(take(towire_status_fail(NULL, reason, str)));
|
2017-01-10 15:24:20 +10:30
|
|
|
}
|
2017-09-12 14:25:52 +09:30
|
|
|
|
|
|
|
void master_badmsg(u32 type_expected, const u8 *msg)
|
|
|
|
{
|
|
|
|
if (!msg)
|
|
|
|
status_failed(STATUS_FAIL_MASTER_IO,
|
2018-02-08 11:55:12 +10:30
|
|
|
"failed reading msg %u: %s",
|
|
|
|
type_expected, strerror(errno));
|
2017-09-12 14:25:52 +09:30
|
|
|
status_failed(STATUS_FAIL_MASTER_IO,
|
2018-02-08 11:55:12 +10:30
|
|
|
"Error parsing %u: %s",
|
2018-03-15 15:00:38 +10:30
|
|
|
type_expected, tal_hex(tmpctx, msg));
|
2018-02-08 11:55:12 +10:30
|
|
|
}
|
2021-09-06 22:09:27 +09:30
|
|
|
|
|
|
|
#if DEVELOPER
|
|
|
|
/* Print BROKEN status: callback for dump_memleak. */
|
|
|
|
void memleak_status_broken(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
va_start(ap, fmt);
|
|
|
|
status_vfmt(LOG_BROKEN, NULL, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
#endif
|