2016-01-21 21:11:47 +01:00
|
|
|
#include "log.h"
|
2018-03-22 08:10:23 +01:00
|
|
|
#include <backtrace-supported.h>
|
2018-03-23 08:52:33 +01:00
|
|
|
#include <backtrace.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <ccan/array_size/array_size.h>
|
2018-08-22 12:06:40 +02:00
|
|
|
#include <ccan/err/err.h>
|
|
|
|
#include <ccan/io/io.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <ccan/list/list.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <ccan/opt/opt.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <ccan/read_write_all/read_write_all.h>
|
|
|
|
#include <ccan/str/hex/hex.h>
|
2018-02-18 13:56:46 +01:00
|
|
|
#include <ccan/tal/link/link.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <ccan/tal/str/str.h>
|
2018-12-08 01:39:28 +01:00
|
|
|
#include <common/json_command.h>
|
|
|
|
#include <common/jsonrpc_errors.h>
|
2017-12-15 11:29:03 +01:00
|
|
|
#include <common/memleak.h>
|
2018-12-08 01:39:28 +01:00
|
|
|
#include <common/param.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <common/pseudorand.h>
|
2017-08-28 18:02:01 +02:00
|
|
|
#include <common/utils.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <errno.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <fcntl.h>
|
2016-07-01 03:49:28 +02:00
|
|
|
#include <inttypes.h>
|
2018-02-05 05:09:28 +01:00
|
|
|
#include <lightningd/jsonrpc.h>
|
2017-12-15 11:18:54 +01:00
|
|
|
#include <lightningd/lightningd.h>
|
2018-02-05 05:09:28 +01:00
|
|
|
#include <lightningd/options.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <signal.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <stdio.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/types.h>
|
2016-01-21 21:11:48 +01:00
|
|
|
#include <unistd.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
/* Once we're up and running, this is set up. */
|
|
|
|
struct log *crashlog;
|
|
|
|
|
2016-01-21 21:11:47 +01:00
|
|
|
struct log_entry {
|
|
|
|
struct list_node list;
|
|
|
|
struct timeabs time;
|
|
|
|
enum log_level level;
|
|
|
|
unsigned int skipped;
|
|
|
|
const char *prefix;
|
|
|
|
char *log;
|
2018-02-05 05:09:28 +01:00
|
|
|
/* Iff LOG_IO */
|
|
|
|
const u8 *io;
|
2016-01-21 21:11:47 +01:00
|
|
|
};
|
|
|
|
|
2017-01-10 05:48:26 +01:00
|
|
|
struct log_book {
|
2016-01-21 21:11:47 +01:00
|
|
|
size_t mem_used;
|
|
|
|
size_t max_mem;
|
2016-01-21 21:11:47 +01:00
|
|
|
void (*print)(const char *prefix,
|
|
|
|
enum log_level level,
|
|
|
|
bool continued,
|
2018-02-02 01:05:41 +01:00
|
|
|
const struct timeabs *time,
|
2018-02-05 05:09:28 +01:00
|
|
|
const char *str, const u8 *io, void *arg);
|
2016-01-21 21:11:47 +01:00
|
|
|
void *print_arg;
|
|
|
|
enum log_level print_level;
|
2016-01-21 21:11:47 +01:00
|
|
|
struct timeabs init_time;
|
|
|
|
|
|
|
|
struct list_head log;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct log {
|
2017-01-10 05:48:26 +01:00
|
|
|
struct log_book *lr;
|
2016-01-21 21:11:47 +01:00
|
|
|
const char *prefix;
|
|
|
|
};
|
|
|
|
|
2018-02-02 01:07:19 +01:00
|
|
|
static void log_to_file(const char *prefix,
|
|
|
|
enum log_level level,
|
|
|
|
bool continued,
|
|
|
|
const struct timeabs *time,
|
|
|
|
const char *str,
|
2018-02-05 05:09:28 +01:00
|
|
|
const u8 *io,
|
2018-02-02 01:07:19 +01:00
|
|
|
FILE *logf)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-02-02 01:05:41 +01:00
|
|
|
char iso8601_msec_fmt[sizeof("YYYY-mm-ddTHH:MM:SS.%03dZ")];
|
2018-02-02 01:05:41 +01:00
|
|
|
strftime(iso8601_msec_fmt, sizeof(iso8601_msec_fmt), "%FT%T.%%03dZ", gmtime(&time->ts.tv_sec));
|
2018-02-02 01:05:41 +01:00
|
|
|
char iso8601_s[sizeof("YYYY-mm-ddTHH:MM:SS.nnnZ")];
|
2018-02-02 01:05:41 +01:00
|
|
|
snprintf(iso8601_s, sizeof(iso8601_s), iso8601_msec_fmt, (int) time->ts.tv_nsec / 1000000);
|
2018-02-05 05:09:28 +01:00
|
|
|
|
|
|
|
if (level == LOG_IO_IN || level == LOG_IO_OUT) {
|
|
|
|
const char *dir = level == LOG_IO_IN ? "[IN]" : "[OUT]";
|
|
|
|
char *hex = tal_hex(NULL, io);
|
|
|
|
fprintf(logf, "%s %s%s%s %s\n",
|
|
|
|
iso8601_s, prefix, str, dir, hex);
|
|
|
|
tal_free(hex);
|
|
|
|
} else if (!continued) {
|
2018-02-02 01:07:19 +01:00
|
|
|
fprintf(logf, "%s %s %s\n", iso8601_s, prefix, str);
|
2016-01-21 21:11:47 +01:00
|
|
|
} else {
|
2018-02-02 01:07:19 +01:00
|
|
|
fprintf(logf, "%s %s \t%s\n", iso8601_s, prefix, str);
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
2018-02-02 01:07:19 +01:00
|
|
|
fflush(logf);
|
|
|
|
}
|
|
|
|
|
2018-02-02 01:07:21 +01:00
|
|
|
static void log_to_stdout(const char *prefix,
|
|
|
|
enum log_level level,
|
|
|
|
bool continued,
|
|
|
|
const struct timeabs *time,
|
2018-02-05 05:09:28 +01:00
|
|
|
const char *str,
|
|
|
|
const u8 *io, void *unused UNUSED)
|
2018-02-02 01:07:19 +01:00
|
|
|
{
|
2018-02-05 05:09:28 +01:00
|
|
|
log_to_file(prefix, level, continued, time, str, io, stdout);
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
2018-02-05 05:09:28 +01:00
|
|
|
static size_t mem_used(const struct log_entry *e)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-07-28 08:00:16 +02:00
|
|
|
return sizeof(*e) + strlen(e->log) + 1 + tal_count(e->io);
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
2017-01-10 05:48:26 +01:00
|
|
|
static size_t prune_log(struct log_book *log)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
struct log_entry *i, *next, *tail;
|
|
|
|
size_t skipped = 0, deleted = 0;
|
|
|
|
|
|
|
|
/* Never delete the last one. */
|
|
|
|
tail = list_tail(&log->log, struct log_entry, list);
|
|
|
|
|
|
|
|
list_for_each_safe(&log->log, i, next, list) {
|
2018-02-05 05:09:28 +01:00
|
|
|
/* 50% chance of deleting IO_IN, 25% IO_OUT, 12.5% DEBUG... */
|
2016-01-21 21:11:47 +01:00
|
|
|
if (i == tail || !pseudorand(2 << i->level)) {
|
|
|
|
i->skipped += skipped;
|
|
|
|
skipped = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_del_from(&log->log, &i->list);
|
2018-02-05 05:09:28 +01:00
|
|
|
log->mem_used -= mem_used(i);
|
2016-01-21 21:11:47 +01:00
|
|
|
tal_free(i);
|
|
|
|
skipped++;
|
|
|
|
deleted++;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(!skipped);
|
|
|
|
return deleted;
|
|
|
|
}
|
|
|
|
|
2018-02-18 13:56:46 +01:00
|
|
|
struct log_book *new_log_book(size_t max_mem,
|
2017-01-10 05:48:26 +01:00
|
|
|
enum log_level printlevel)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-02-18 13:56:46 +01:00
|
|
|
struct log_book *lr = tal_linkable(tal(NULL, struct log_book));
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
/* Give a reasonable size for memory limit! */
|
|
|
|
assert(max_mem > sizeof(struct log) * 2);
|
|
|
|
lr->mem_used = 0;
|
|
|
|
lr->max_mem = max_mem;
|
2018-02-02 01:07:21 +01:00
|
|
|
lr->print = log_to_stdout;
|
2016-01-21 21:11:47 +01:00
|
|
|
lr->print_level = printlevel;
|
2016-04-24 12:05:13 +02:00
|
|
|
lr->init_time = time_now();
|
2016-01-21 21:11:47 +01:00
|
|
|
list_head_init(&lr->log);
|
|
|
|
|
|
|
|
return lr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With different entry points */
|
|
|
|
struct log *PRINTF_FMT(3,4)
|
2017-01-10 05:48:26 +01:00
|
|
|
new_log(const tal_t *ctx, struct log_book *record, const char *fmt, ...)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
struct log *log = tal(ctx, struct log);
|
|
|
|
va_list ap;
|
|
|
|
|
2018-02-18 13:56:46 +01:00
|
|
|
log->lr = tal_link(log, record);
|
2016-01-21 21:11:47 +01:00
|
|
|
va_start(ap, fmt);
|
|
|
|
/* log->lr owns this, since its entries keep a pointer to it. */
|
log: block reporting on minor memleak.
Exception: Node /tmp/lightning-t5gxc6gs/test_closing_different_fees/lightning-2/ has memory leaks: [{'value': '0x55caa0a0b8d0', 'label': 'ccan/ccan/tal/str/str.c:90:char[]', 'backtrace': ['ccan/ccan/tal/tal.c:467 (tal_alloc_)', 'ccan/ccan/tal/tal.c:496 (tal_alloc_arr_)', 'ccan/ccan/tal/str/str.c:90 (tal_vfmt)', 'lightningd/log.c:131 (new_log)', 'lightningd/subd.c:632 (new_subd)', 'lightningd/subd.c:686 (new_peer_subd)', 'lightningd/peer_control.c:2487 (peer_accept_channel)', 'lightningd/peer_control.c:674 (peer_sent_nongossip)', 'lightningd/gossip_control.c:55 (peer_nongossip)', 'lightningd/gossip_control.c:142 (gossip_msg)', 'lightningd/subd.c:477 (sd_msg_read)', 'lightningd/subd.c:319 (read_fds)', 'ccan/ccan/io/io.c:59 (next_plan)', 'ccan/ccan/io/io.c:387 (do_plan)', 'ccan/ccan/io/io.c:397 (io_ready)', 'ccan/ccan/io/poll.c:305 (io_loop)', 'lightningd/lightningd.c:347 (main)', '(null):0 ((null))', '(null):0 ((null))', '(null):0 ((null))'], 'parents': ['lightningd/log.c:103:struct log_book', 'lightningd/lightningd.c:43:struct lightningd']}]
Technically, true, but we save more memory by sharing the prefix pointer
than we lose by leaking it.
However, we'd ideally refcount so it's freed if the log is freed and
all the entries using it are pruned from the log book.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-01-30 07:29:02 +01:00
|
|
|
/* FIXME: Refcount this! */
|
|
|
|
log->prefix = notleak(tal_vfmt(log->lr, fmt, ap));
|
2016-01-21 21:11:47 +01:00
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
return log;
|
|
|
|
}
|
|
|
|
|
2018-02-18 13:59:46 +01:00
|
|
|
struct log_book *get_log_book(const struct log *log)
|
|
|
|
{
|
|
|
|
return log->lr;
|
|
|
|
}
|
|
|
|
|
2017-02-24 06:52:56 +01:00
|
|
|
enum log_level get_log_level(struct log_book *lr)
|
|
|
|
{
|
|
|
|
return lr->print_level;
|
|
|
|
}
|
|
|
|
|
2017-01-10 05:48:26 +01:00
|
|
|
void set_log_level(struct log_book *lr, enum log_level level)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2016-01-21 21:11:47 +01:00
|
|
|
lr->print_level = level;
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void set_log_prefix(struct log *log, const char *prefix)
|
|
|
|
{
|
|
|
|
/* log->lr owns this, since it keeps a pointer to it. */
|
|
|
|
log->prefix = tal_strdup(log->lr, prefix);
|
|
|
|
}
|
|
|
|
|
2017-01-10 05:48:26 +01:00
|
|
|
void set_log_outfn_(struct log_book *lr,
|
2016-01-21 21:11:47 +01:00
|
|
|
void (*print)(const char *prefix,
|
|
|
|
enum log_level level,
|
|
|
|
bool continued,
|
2018-02-02 01:05:41 +01:00
|
|
|
const struct timeabs *time,
|
2018-02-05 05:09:28 +01:00
|
|
|
const char *str, const u8 *io, void *arg),
|
2016-01-21 21:11:47 +01:00
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
lr->print = print;
|
|
|
|
lr->print_arg = arg;
|
|
|
|
}
|
|
|
|
|
2016-01-21 21:11:47 +01:00
|
|
|
const char *log_prefix(const struct log *log)
|
|
|
|
{
|
|
|
|
return log->prefix;
|
|
|
|
}
|
|
|
|
|
2017-01-10 05:48:26 +01:00
|
|
|
size_t log_max_mem(const struct log_book *lr)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
return lr->max_mem;
|
|
|
|
}
|
|
|
|
|
2017-01-10 05:48:26 +01:00
|
|
|
size_t log_used(const struct log_book *lr)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
return lr->mem_used;
|
|
|
|
}
|
|
|
|
|
2017-01-10 05:48:26 +01:00
|
|
|
const struct timeabs *log_init_time(const struct log_book *lr)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
return &lr->init_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void add_entry(struct log *log, struct log_entry *l)
|
|
|
|
{
|
2018-02-05 05:09:28 +01:00
|
|
|
log->lr->mem_used += mem_used(l);
|
2016-01-21 21:11:47 +01:00
|
|
|
list_add_tail(&log->lr->log, &l->list);
|
|
|
|
|
|
|
|
if (log->lr->mem_used > log->lr->max_mem) {
|
|
|
|
size_t old_mem = log->lr->mem_used, deleted;
|
|
|
|
deleted = prune_log(log->lr);
|
|
|
|
log_debug(log, "Log pruned %zu entries (mem %zu -> %zu)",
|
|
|
|
deleted, old_mem, log->lr->mem_used);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct log_entry *new_log_entry(struct log *log, enum log_level level)
|
|
|
|
{
|
|
|
|
struct log_entry *l = tal(log->lr, struct log_entry);
|
|
|
|
|
2016-04-24 12:05:13 +02:00
|
|
|
l->time = time_now();
|
2016-01-21 21:11:47 +01:00
|
|
|
l->level = level;
|
|
|
|
l->skipped = 0;
|
|
|
|
l->prefix = log->prefix;
|
2018-02-05 05:09:28 +01:00
|
|
|
l->io = NULL;
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
return l;
|
|
|
|
}
|
|
|
|
|
2018-02-05 05:09:28 +01:00
|
|
|
static void maybe_print(const struct log *log, const struct log_entry *l,
|
|
|
|
size_t offset)
|
|
|
|
{
|
|
|
|
if (l->level >= log->lr->print_level)
|
|
|
|
log->lr->print(log->prefix, l->level, offset != 0,
|
|
|
|
&l->time, l->log + offset,
|
|
|
|
l->io, log->lr->print_arg);
|
|
|
|
}
|
|
|
|
|
2016-01-21 21:11:47 +01:00
|
|
|
void logv(struct log *log, enum log_level level, const char *fmt, va_list ap)
|
|
|
|
{
|
2018-02-05 05:09:27 +01:00
|
|
|
int save_errno = errno;
|
2016-01-21 21:11:47 +01:00
|
|
|
struct log_entry *l = new_log_entry(log, level);
|
|
|
|
|
|
|
|
l->log = tal_vfmt(l, fmt, ap);
|
2018-02-08 21:05:29 +01:00
|
|
|
|
2018-05-21 18:30:22 +02:00
|
|
|
size_t log_len = strlen(l->log);
|
|
|
|
|
2018-02-08 21:05:29 +01:00
|
|
|
/* Sanitize any non-printable characters, and replace with '?' */
|
2018-05-21 18:30:22 +02:00
|
|
|
for (size_t i=0; i<log_len; i++)
|
2018-02-08 21:05:29 +01:00
|
|
|
if (l->log[i] < ' ' || l->log[i] >= 0x7f)
|
|
|
|
l->log[i] = '?';
|
|
|
|
|
2018-02-05 05:09:28 +01:00
|
|
|
maybe_print(log, l, 0);
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
add_entry(log, l);
|
2018-02-05 05:09:27 +01:00
|
|
|
errno = save_errno;
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
2018-02-05 05:09:28 +01:00
|
|
|
void log_io(struct log *log, enum log_level dir,
|
|
|
|
const char *str TAKES,
|
|
|
|
const void *data TAKES, size_t len)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
int save_errno = errno;
|
2018-02-05 05:09:28 +01:00
|
|
|
struct log_entry *l = new_log_entry(log, dir);
|
|
|
|
|
|
|
|
assert(dir == LOG_IO_IN || dir == LOG_IO_OUT);
|
|
|
|
|
|
|
|
l->log = tal_strdup(l, str);
|
|
|
|
l->io = tal_dup_arr(l, u8, data, len, 0);
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2018-02-05 05:09:28 +01:00
|
|
|
maybe_print(log, l, 0);
|
2016-01-21 21:11:47 +01:00
|
|
|
add_entry(log, l);
|
|
|
|
errno = save_errno;
|
|
|
|
}
|
|
|
|
|
2017-06-20 02:39:17 +02:00
|
|
|
void logv_add(struct log *log, const char *fmt, va_list ap)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
struct log_entry *l = list_tail(&log->lr->log, struct log_entry, list);
|
|
|
|
size_t oldlen = strlen(l->log);
|
|
|
|
|
|
|
|
/* Remove from list, so it doesn't get pruned. */
|
2018-02-05 05:09:28 +01:00
|
|
|
log->lr->mem_used -= mem_used(l);
|
2016-01-21 21:11:47 +01:00
|
|
|
list_del_from(&log->lr->log, &l->list);
|
|
|
|
|
|
|
|
tal_append_vfmt(&l->log, fmt, ap);
|
2018-02-08 21:05:29 +01:00
|
|
|
|
|
|
|
/* Sanitize any non-printable characters, and replace with '?' */
|
|
|
|
for (size_t i=oldlen; i<strlen(l->log); i++)
|
|
|
|
if (l->log[i] < ' ' || l->log[i] >= 0x7f)
|
|
|
|
l->log[i] = '?';
|
|
|
|
|
2016-01-21 21:11:47 +01:00
|
|
|
add_entry(log, l);
|
|
|
|
|
2018-02-05 05:09:28 +01:00
|
|
|
maybe_print(log, l, oldlen);
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void log_(struct log *log, enum log_level level, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
logv(log, level, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
|
|
|
void log_add(struct log *log, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
2017-06-20 02:39:17 +02:00
|
|
|
logv_add(log, fmt, ap);
|
2016-01-21 21:11:47 +01:00
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
2017-01-10 05:48:26 +01:00
|
|
|
void log_each_line_(const struct log_book *lr,
|
2016-01-21 21:11:47 +01:00
|
|
|
void (*func)(unsigned int skipped,
|
|
|
|
struct timerel time,
|
|
|
|
enum log_level level,
|
|
|
|
const char *prefix,
|
|
|
|
const char *log,
|
2018-02-05 05:09:28 +01:00
|
|
|
const u8 *io,
|
2016-01-21 21:11:47 +01:00
|
|
|
void *arg),
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
const struct log_entry *i;
|
|
|
|
|
|
|
|
list_for_each(&lr->log, i, list) {
|
|
|
|
func(i->skipped, time_between(i->time, lr->init_time),
|
2018-02-05 05:09:28 +01:00
|
|
|
i->level, i->prefix, i->log, i->io, arg);
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct log_data {
|
|
|
|
int fd;
|
|
|
|
const char *prefix;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void log_one_line(unsigned int skipped,
|
|
|
|
struct timerel diff,
|
|
|
|
enum log_level level,
|
|
|
|
const char *prefix,
|
|
|
|
const char *log,
|
2018-02-05 05:09:28 +01:00
|
|
|
const u8 *io,
|
2016-01-21 21:11:47 +01:00
|
|
|
struct log_data *data)
|
|
|
|
{
|
|
|
|
char buf[101];
|
|
|
|
|
|
|
|
if (skipped) {
|
2018-07-31 14:56:04 +02:00
|
|
|
snprintf(buf, sizeof(buf), "%s... %u skipped...", data->prefix, skipped);
|
2016-01-21 21:11:47 +01:00
|
|
|
write_all(data->fd, buf, strlen(buf));
|
|
|
|
data->prefix = "\n";
|
|
|
|
}
|
|
|
|
|
2018-07-31 14:56:04 +02:00
|
|
|
snprintf(buf, sizeof(buf), "%s+%lu.%09u %s%s: ",
|
2016-01-21 21:11:47 +01:00
|
|
|
data->prefix,
|
|
|
|
(unsigned long)diff.ts.tv_sec,
|
|
|
|
(unsigned)diff.ts.tv_nsec,
|
|
|
|
prefix,
|
2018-02-05 05:09:28 +01:00
|
|
|
level == LOG_IO_IN ? "IO_IN"
|
|
|
|
: level == LOG_IO_OUT ? "IO_OUT"
|
2016-01-21 21:11:47 +01:00
|
|
|
: level == LOG_DBG ? "DEBUG"
|
|
|
|
: level == LOG_INFORM ? "INFO"
|
|
|
|
: level == LOG_UNUSUAL ? "UNUSUAL"
|
|
|
|
: level == LOG_BROKEN ? "BROKEN"
|
|
|
|
: "**INVALID**");
|
|
|
|
|
|
|
|
write_all(data->fd, buf, strlen(buf));
|
2018-02-05 05:09:28 +01:00
|
|
|
write_all(data->fd, log, strlen(log));
|
|
|
|
if (level == LOG_IO_IN || level == LOG_IO_OUT) {
|
|
|
|
size_t off, used, len = tal_count(io);
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
/* No allocations, may be in signal handler. */
|
|
|
|
for (off = 0; off < len; off += used) {
|
|
|
|
used = len - off;
|
|
|
|
if (hex_str_size(used) > sizeof(buf))
|
|
|
|
used = hex_data_size(sizeof(buf));
|
2018-02-05 05:09:28 +01:00
|
|
|
hex_encode(io + off, used, buf, hex_str_size(used));
|
2016-01-21 21:11:47 +01:00
|
|
|
write_all(data->fd, buf, strlen(buf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
data->prefix = "\n";
|
|
|
|
}
|
|
|
|
|
2016-01-21 21:11:47 +01:00
|
|
|
static struct {
|
|
|
|
const char *name;
|
|
|
|
enum log_level level;
|
|
|
|
} log_levels[] = {
|
2018-02-05 05:09:28 +01:00
|
|
|
{ "IO", LOG_IO_OUT },
|
2016-01-21 21:11:47 +01:00
|
|
|
{ "DEBUG", LOG_DBG },
|
|
|
|
{ "INFO", LOG_INFORM },
|
|
|
|
{ "UNUSUAL", LOG_UNUSUAL },
|
|
|
|
{ "BROKEN", LOG_BROKEN }
|
|
|
|
};
|
|
|
|
|
|
|
|
static char *arg_log_level(const char *arg, struct log *log)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(log_levels); i++) {
|
|
|
|
if (strcasecmp(arg, log_levels[i].name) == 0) {
|
|
|
|
set_log_level(log->lr, log_levels[i].level);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return tal_fmt(NULL, "unknown log level");
|
|
|
|
}
|
|
|
|
|
2018-01-29 01:30:15 +01:00
|
|
|
static void show_log_level(char buf[OPT_SHOW_LEN], const struct log *log)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(log_levels); i++) {
|
|
|
|
if (log->lr->print_level == log_levels[i].level) {
|
|
|
|
strncpy(buf, log_levels[i].name, OPT_SHOW_LEN-1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2016-01-21 21:11:47 +01:00
|
|
|
static char *arg_log_prefix(const char *arg, struct log *log)
|
|
|
|
{
|
|
|
|
set_log_prefix(log, arg);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-01-29 01:30:15 +01:00
|
|
|
static void show_log_prefix(char buf[OPT_SHOW_LEN], const struct log *log)
|
|
|
|
{
|
|
|
|
strncpy(buf, log->prefix, OPT_SHOW_LEN);
|
|
|
|
}
|
|
|
|
|
2018-08-22 12:06:40 +02:00
|
|
|
static int signalfds[2];
|
|
|
|
|
|
|
|
static void handle_sighup(int sig)
|
|
|
|
{
|
|
|
|
/* Writes a single 0x00 byte to the signalfds pipe. This may fail if
|
|
|
|
* we're hammered with SIGHUP. We don't care. */
|
|
|
|
if (write(signalfds[1], "", 1))
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mutual recursion */
|
|
|
|
static struct io_plan *setup_read(struct io_conn *conn, struct lightningd *ld);
|
|
|
|
|
|
|
|
static struct io_plan *rotate_log(struct io_conn *conn, struct lightningd *ld)
|
|
|
|
{
|
|
|
|
FILE *logf;
|
|
|
|
|
|
|
|
log_info(ld->log, "Ending log due to SIGHUP");
|
|
|
|
fclose(ld->log->lr->print_arg);
|
|
|
|
|
|
|
|
logf = fopen(ld->logfile, "a");
|
|
|
|
if (!logf)
|
|
|
|
err(1, "failed to reopen log file %s", ld->logfile);
|
|
|
|
set_log_outfn(ld->log->lr, log_to_file, logf);
|
|
|
|
|
|
|
|
log_info(ld->log, "Started log due to SIGHUP");
|
|
|
|
return setup_read(conn, ld);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct io_plan *setup_read(struct io_conn *conn, struct lightningd *ld)
|
|
|
|
{
|
|
|
|
/* We read and discard. */
|
|
|
|
static char discard;
|
|
|
|
return io_read(conn, &discard, 1, rotate_log, ld);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_log_rotation(struct lightningd *ld)
|
|
|
|
{
|
|
|
|
struct sigaction act;
|
|
|
|
if (pipe(signalfds) != 0)
|
|
|
|
errx(1, "Pipe for signalfds");
|
|
|
|
|
|
|
|
notleak(io_new_conn(ld, signalfds[0], setup_read, ld));
|
|
|
|
|
|
|
|
io_fd_block(signalfds[1], false);
|
|
|
|
memset(&act, 0, sizeof(act));
|
|
|
|
act.sa_handler = handle_sighup;
|
|
|
|
act.sa_flags = SA_RESETHAND;
|
|
|
|
|
|
|
|
if (sigaction(SIGHUP, &act, NULL) != 0)
|
|
|
|
err(1, "Setting up SIGHUP handler");
|
|
|
|
}
|
|
|
|
|
2018-01-29 01:30:15 +01:00
|
|
|
char *arg_log_to_file(const char *arg, struct lightningd *ld)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-01-29 01:30:15 +01:00
|
|
|
FILE *logf;
|
|
|
|
|
|
|
|
if (ld->logfile) {
|
|
|
|
fclose(ld->log->lr->print_arg);
|
|
|
|
ld->logfile = tal_free(ld->logfile);
|
2018-08-22 12:06:40 +02:00
|
|
|
} else
|
|
|
|
setup_log_rotation(ld);
|
|
|
|
|
2018-01-29 01:30:15 +01:00
|
|
|
ld->logfile = tal_strdup(ld, arg);
|
|
|
|
logf = fopen(arg, "a");
|
2016-01-21 21:11:47 +01:00
|
|
|
if (!logf)
|
|
|
|
return tal_fmt(NULL, "Failed to open: %s", strerror(errno));
|
2018-01-29 01:30:15 +01:00
|
|
|
set_log_outfn(ld->log->lr, log_to_file, logf);
|
2016-01-21 21:11:47 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-01-29 01:30:15 +01:00
|
|
|
void opt_register_logging(struct lightningd *ld)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-12-16 06:25:25 +01:00
|
|
|
opt_register_early_arg("--log-level",
|
|
|
|
arg_log_level, show_log_level, ld->log,
|
|
|
|
"log level (io, debug, info, unusual, broken)");
|
|
|
|
opt_register_early_arg("--log-prefix", arg_log_prefix, show_log_prefix,
|
|
|
|
ld->log,
|
|
|
|
"log prefix");
|
|
|
|
/* We want this opened later, once we have moved to lightning dir */
|
2018-01-29 01:30:15 +01:00
|
|
|
opt_register_arg("--log-file=<file>", arg_log_to_file, NULL, ld,
|
2016-01-21 21:11:47 +01:00
|
|
|
"log to file instead of stdout");
|
|
|
|
}
|
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
void log_backtrace_print(const char *fmt, ...)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-03-29 04:06:45 +02:00
|
|
|
va_list ap;
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
if (!crashlog)
|
|
|
|
return;
|
2016-01-21 21:11:48 +01:00
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
va_start(ap, fmt);
|
|
|
|
logv(crashlog, LOG_BROKEN, fmt, ap);
|
|
|
|
va_end(ap);
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
static void log_dump_to_file(int fd, const struct log_book *lr)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
const struct log_entry *i;
|
|
|
|
char buf[100];
|
2018-03-10 20:02:33 +01:00
|
|
|
int len;
|
2016-01-21 21:11:47 +01:00
|
|
|
struct log_data data;
|
|
|
|
time_t start;
|
|
|
|
|
|
|
|
i = list_top(&lr->log, const struct log_entry, list);
|
|
|
|
if (!i) {
|
|
|
|
write_all(fd, "0 bytes:\n\n", strlen("0 bytes:\n\n"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
start = lr->init_time.ts.tv_sec;
|
2018-07-31 14:56:04 +02:00
|
|
|
len = snprintf(buf, sizeof(buf), "%zu bytes, %s", lr->mem_used, ctime(&start));
|
2018-03-10 20:02:33 +01:00
|
|
|
write_all(fd, buf, len);
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
/* ctime includes \n... WTF? */
|
|
|
|
data.prefix = "";
|
|
|
|
data.fd = fd;
|
|
|
|
log_each_line(lr, log_one_line, &data);
|
|
|
|
write_all(fd, "\n\n", strlen("\n\n"));
|
|
|
|
}
|
2016-01-21 21:11:48 +01:00
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
/* FIXME: Dump peer logs! */
|
|
|
|
void log_backtrace_exit(void)
|
|
|
|
{
|
2018-08-22 12:06:42 +02:00
|
|
|
int fd;
|
2018-08-22 13:17:20 +02:00
|
|
|
char timebuf[sizeof("YYYYmmddHHMMSS")];
|
|
|
|
char logfile[sizeof("/tmp/lightning-crash.log.") + sizeof(timebuf)];
|
|
|
|
struct timeabs time = time_now();
|
|
|
|
|
|
|
|
strftime(timebuf, sizeof(timebuf), "%Y%m%d%H%M%S", gmtime(&time.ts.tv_sec));
|
2018-08-22 12:06:42 +02:00
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
if (!crashlog)
|
|
|
|
return;
|
|
|
|
|
2018-08-22 12:06:42 +02:00
|
|
|
/* We expect to be in config dir. */
|
2018-08-22 13:17:20 +02:00
|
|
|
snprintf(logfile, sizeof(logfile), "crash.log.%s", timebuf);
|
2018-03-29 04:06:45 +02:00
|
|
|
|
2018-08-22 12:06:42 +02:00
|
|
|
fd = open(logfile, O_WRONLY|O_CREAT|O_TRUNC, 0600);
|
|
|
|
if (fd < 0) {
|
|
|
|
snprintf(logfile, sizeof(logfile),
|
2018-08-22 13:17:20 +02:00
|
|
|
"/tmp/lightning-crash.log.%s", timebuf);
|
2018-08-22 12:06:42 +02:00
|
|
|
fd = open(logfile, O_WRONLY|O_CREAT|O_TRUNC, 0600);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Dump entire log. */
|
|
|
|
if (fd >= 0) {
|
|
|
|
log_dump_to_file(fd, crashlog->lr);
|
|
|
|
close(fd);
|
|
|
|
fprintf(stderr, "Log dumped in %s\n", logfile);
|
2018-03-29 04:06:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-21 21:11:48 +01:00
|
|
|
void fatal(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vfprintf(stderr, fmt, ap);
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
va_end(ap);
|
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
if (!crashlog)
|
|
|
|
exit(1);
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
logv(crashlog, LOG_BROKEN, fmt, ap);
|
|
|
|
va_end(ap);
|
2017-10-29 12:18:13 +01:00
|
|
|
abort();
|
2016-01-21 21:11:48 +01:00
|
|
|
}
|
2018-02-05 05:09:28 +01:00
|
|
|
|
|
|
|
struct log_info {
|
|
|
|
enum log_level level;
|
2018-10-19 03:17:49 +02:00
|
|
|
struct json_stream *response;
|
2018-02-05 05:09:28 +01:00
|
|
|
unsigned int num_skipped;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void add_skipped(struct log_info *info)
|
|
|
|
{
|
|
|
|
if (info->num_skipped) {
|
|
|
|
json_object_start(info->response, NULL);
|
|
|
|
json_add_string(info->response, "type", "SKIPPED");
|
|
|
|
json_add_num(info->response, "num_skipped", info->num_skipped);
|
|
|
|
json_object_end(info->response);
|
|
|
|
info->num_skipped = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-19 03:17:49 +02:00
|
|
|
static void json_add_time(struct json_stream *result, const char *fieldname,
|
2018-02-05 05:09:28 +01:00
|
|
|
struct timespec ts)
|
|
|
|
{
|
|
|
|
char timebuf[100];
|
|
|
|
|
2018-07-31 14:56:04 +02:00
|
|
|
snprintf(timebuf, sizeof(timebuf), "%lu.%09u",
|
2018-02-05 05:09:28 +01:00
|
|
|
(unsigned long)ts.tv_sec,
|
|
|
|
(unsigned)ts.tv_nsec);
|
|
|
|
json_add_string(result, fieldname, timebuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void log_to_json(unsigned int skipped,
|
|
|
|
struct timerel diff,
|
|
|
|
enum log_level level,
|
|
|
|
const char *prefix,
|
|
|
|
const char *log,
|
2018-02-05 05:09:28 +01:00
|
|
|
const u8 *io,
|
2018-02-05 05:09:28 +01:00
|
|
|
struct log_info *info)
|
|
|
|
{
|
|
|
|
info->num_skipped += skipped;
|
|
|
|
|
|
|
|
if (level < info->level) {
|
|
|
|
info->num_skipped++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
add_skipped(info);
|
|
|
|
|
|
|
|
json_object_start(info->response, NULL);
|
|
|
|
json_add_string(info->response, "type",
|
|
|
|
level == LOG_BROKEN ? "BROKEN"
|
|
|
|
: level == LOG_UNUSUAL ? "UNUSUAL"
|
|
|
|
: level == LOG_INFORM ? "INFO"
|
|
|
|
: level == LOG_DBG ? "DEBUG"
|
2018-02-05 05:09:28 +01:00
|
|
|
: level == LOG_IO_IN ? "IO_IN"
|
|
|
|
: level == LOG_IO_OUT ? "IO_OUT"
|
2018-02-05 05:09:28 +01:00
|
|
|
: "UNKNOWN");
|
|
|
|
json_add_time(info->response, "time", diff.ts);
|
|
|
|
json_add_string(info->response, "source", prefix);
|
2018-02-05 05:09:28 +01:00
|
|
|
json_add_string(info->response, "log", log);
|
|
|
|
if (io)
|
2018-07-28 07:53:33 +02:00
|
|
|
json_add_hex_talarr(info->response, "data", io);
|
2018-02-05 05:09:28 +01:00
|
|
|
|
|
|
|
json_object_end(info->response);
|
|
|
|
}
|
|
|
|
|
2018-10-19 03:17:49 +02:00
|
|
|
void json_add_log(struct json_stream *response,
|
2018-02-05 05:09:28 +01:00
|
|
|
const struct log_book *lr, enum log_level minlevel)
|
|
|
|
{
|
|
|
|
struct log_info info;
|
|
|
|
|
|
|
|
info.level = minlevel;
|
|
|
|
info.response = response;
|
|
|
|
info.num_skipped = 0;
|
|
|
|
|
|
|
|
json_array_start(info.response, "log");
|
|
|
|
log_each_line(lr, log_to_json, &info);
|
|
|
|
add_skipped(&info);
|
|
|
|
json_array_end(info.response);
|
|
|
|
}
|
|
|
|
|
2018-08-15 16:40:37 +02:00
|
|
|
bool json_tok_loglevel(struct command *cmd, const char *name,
|
|
|
|
const char *buffer, const jsmntok_t *tok,
|
|
|
|
enum log_level **level)
|
2018-02-05 05:09:28 +01:00
|
|
|
{
|
2018-08-15 16:40:37 +02:00
|
|
|
*level = tal(cmd, enum log_level);
|
2018-02-05 05:09:28 +01:00
|
|
|
if (json_tok_streq(buffer, tok, "io"))
|
2018-08-15 16:40:37 +02:00
|
|
|
**level = LOG_IO_OUT;
|
2018-02-05 05:09:28 +01:00
|
|
|
else if (json_tok_streq(buffer, tok, "debug"))
|
2018-08-15 16:40:37 +02:00
|
|
|
**level = LOG_DBG;
|
2018-02-05 05:09:28 +01:00
|
|
|
else if (json_tok_streq(buffer, tok, "info"))
|
2018-08-15 16:40:37 +02:00
|
|
|
**level = LOG_INFORM;
|
2018-02-05 05:09:28 +01:00
|
|
|
else if (json_tok_streq(buffer, tok, "unusual"))
|
2018-08-15 16:40:37 +02:00
|
|
|
**level = LOG_UNUSUAL;
|
|
|
|
else {
|
|
|
|
command_fail(cmd, JSONRPC2_INVALID_PARAMS,
|
|
|
|
"'%s' should be 'io', 'debug', 'info', or "
|
|
|
|
"'unusual', not '%.*s'",
|
|
|
|
name, tok->end - tok->start, buffer + tok->start);
|
2018-02-05 05:09:28 +01:00
|
|
|
return false;
|
2018-08-15 16:40:37 +02:00
|
|
|
}
|
2018-02-05 05:09:28 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-02-05 05:09:28 +01:00
|
|
|
static void json_getlog(struct command *cmd,
|
2018-12-08 01:34:56 +01:00
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t * params)
|
2018-02-05 05:09:28 +01:00
|
|
|
{
|
2018-10-19 03:17:49 +02:00
|
|
|
struct json_stream *response;
|
2018-08-15 16:40:37 +02:00
|
|
|
enum log_level *minlevel;
|
2018-02-05 05:09:28 +01:00
|
|
|
struct log_book *lr = cmd->ld->log_book;
|
|
|
|
|
2018-07-20 03:14:02 +02:00
|
|
|
if (!param(cmd, buffer, params,
|
2018-08-15 17:16:02 +02:00
|
|
|
p_opt_def("level", json_tok_loglevel, &minlevel,
|
2018-08-15 16:40:37 +02:00
|
|
|
LOG_INFORM),
|
2018-07-20 03:14:02 +02:00
|
|
|
NULL))
|
2018-02-05 05:09:28 +01:00
|
|
|
return;
|
|
|
|
|
2018-10-19 03:17:48 +02:00
|
|
|
response = json_stream_success(cmd);
|
2018-02-05 05:09:28 +01:00
|
|
|
json_object_start(response, NULL);
|
|
|
|
json_add_time(response, "created_at", log_init_time(lr)->ts);
|
2018-08-15 16:40:37 +02:00
|
|
|
json_add_num(response, "bytes_used", (unsigned int) log_used(lr));
|
|
|
|
json_add_num(response, "bytes_max", (unsigned int) log_max_mem(lr));
|
|
|
|
json_add_log(response, lr, *minlevel);
|
2018-02-05 05:09:28 +01:00
|
|
|
json_object_end(response);
|
|
|
|
command_success(cmd, response);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command getlog_command = {
|
|
|
|
"getlog",
|
|
|
|
json_getlog,
|
|
|
|
"Show logs, with optional log {level} (info|unusual|debug|io)"
|
|
|
|
};
|
|
|
|
AUTODATA(json_command, &getlog_command);
|