mirror of
https://github.com/ElementsProject/lightning.git
synced 2025-01-17 19:03:42 +01:00
log: make formatting more consistent.
1. Printed form is always "[<nodeid>-]<prefix>: <string>" 2. "jcon fd %i" becomes "jsonrpc #%i". 3. "jsonrpc" log is only used once, and is removed. 4. "database" log prefix is use for db accesses. 5. "lightningd(%i)" becomes simply "lightningd" without the pid. 6. The "lightningd_" prefix is stripped from subd log prefixes, and pid removed. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Changelog-changed: Logging: formatting made uniform: [NODEID-]SUBSYSTEM: MESSAGE Changelog-removed: `lightning_` prefixes removed from subdaemon names, including in listpeers `owner` field.
This commit is contained in:
parent
86fb54a33b
commit
ef7a820ab1
@ -735,10 +735,10 @@ class LightningNode(object):
|
||||
def subd_pid(self, subd, peerid=None):
|
||||
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
|
||||
if peerid:
|
||||
ex = re.compile(r'{}-.*lightning_{}.*: pid ([0-9]*),'
|
||||
ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'
|
||||
.format(peerid, subd))
|
||||
else:
|
||||
ex = re.compile('lightning_{}-.*: pid ([0-9]*),'.format(subd))
|
||||
ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
|
||||
# Make sure we get latest one if it's restarted!
|
||||
for l in reversed(self.daemon.logs):
|
||||
group = ex.search(l)
|
||||
@ -1028,7 +1028,7 @@ class NodeFactory(object):
|
||||
# getpeers.
|
||||
if not fundchannel:
|
||||
for src, dst in connections:
|
||||
dst.daemon.wait_for_log(r'{}-.*lightning_openingd-chan #[0-9]*: Handed peer, entering loop'.format(src.info['id']))
|
||||
dst.daemon.wait_for_log(r'{}-.*openingd-chan #[0-9]*: Handed peer, entering loop'.format(src.info['id']))
|
||||
return nodes
|
||||
|
||||
# If we got here, we want to fund channels
|
||||
|
@ -211,7 +211,7 @@ struct channel *new_channel(struct peer *peer, u64 dbid,
|
||||
channel->log = new_log(channel,
|
||||
peer->ld->log_book,
|
||||
&channel->peer->id,
|
||||
"chan #%"PRIu64":",
|
||||
"chan #%"PRIu64,
|
||||
dbid);
|
||||
} else
|
||||
channel->log = tal_steal(channel, log);
|
||||
|
@ -105,7 +105,6 @@ struct json_connection {
|
||||
struct jsonrpc {
|
||||
struct io_listener *rpc_listener;
|
||||
struct json_command **commands;
|
||||
struct log *log;
|
||||
|
||||
/* Map from json command names to usage strings: we don't put this inside
|
||||
* struct json_command as it's good practice to have those const. */
|
||||
@ -903,7 +902,7 @@ static struct io_plan *jcon_connected(struct io_conn *conn,
|
||||
list_head_init(&jcon->commands);
|
||||
|
||||
/* We want to log on destruction, so we free this in destructor. */
|
||||
jcon->log = new_log(ld->log_book, ld->log_book, NULL, "jcon fd %i:",
|
||||
jcon->log = new_log(ld->log_book, ld->log_book, NULL, "jsonrpc #%i",
|
||||
io_conn_fd(conn));
|
||||
|
||||
tal_add_destructor(jcon, destroy_jcon);
|
||||
@ -1011,7 +1010,6 @@ void jsonrpc_setup(struct lightningd *ld)
|
||||
ld->jsonrpc = tal(ld, struct jsonrpc);
|
||||
strmap_init(&ld->jsonrpc->usagemap);
|
||||
ld->jsonrpc->commands = tal_arr(ld->jsonrpc, struct json_command *, 0);
|
||||
ld->jsonrpc->log = new_log(ld->jsonrpc, ld->log_book, NULL, "jsonrpc");
|
||||
for (size_t i=0; i<num_cmdlist; i++) {
|
||||
if (!jsonrpc_command_add_perm(ld, ld->jsonrpc, commands[i]))
|
||||
fatal("Cannot add duplicate command %s",
|
||||
@ -1081,7 +1079,6 @@ void jsonrpc_listen(struct jsonrpc *jsonrpc, struct lightningd *ld)
|
||||
err(1, "Listening on '%s'", rpc_filename);
|
||||
jsonrpc->rpc_listener = io_new_listener(
|
||||
ld->rpc_filename, fd, incoming_jcon_connected, ld);
|
||||
log_debug(jsonrpc->log, "Listening on '%s'", ld->rpc_filename);
|
||||
}
|
||||
|
||||
static struct command_result *param_command(struct command *cmd,
|
||||
|
@ -169,8 +169,7 @@ static struct lightningd *new_lightningd(const tal_t *ctx)
|
||||
/*~ Note the tal context arg (by convention, the first argument to any
|
||||
* allocation function): ld->log will be implicitly freed when ld
|
||||
* is. */
|
||||
ld->log = new_log(ld, ld->log_book, NULL,
|
||||
"lightningd(%u):", (int)getpid());
|
||||
ld->log = new_log(ld, ld->log_book, NULL, "lightningd");
|
||||
ld->logfile = NULL;
|
||||
|
||||
/*~ We explicitly set these to NULL: if they're still NULL after option
|
||||
@ -700,7 +699,7 @@ int main(int argc, char *argv[])
|
||||
/*~ Our "wallet" code really wraps the db, which is more than a simple
|
||||
* bitcoin wallet (though it's that too). It also stores channel
|
||||
* states, invoices, payments, blocks and bitcoin transactions. */
|
||||
ld->wallet = wallet_new(ld, ld->log, ld->timers);
|
||||
ld->wallet = wallet_new(ld, ld->timers);
|
||||
|
||||
/*~ We keep a filter of scriptpubkeys we're interested in. */
|
||||
ld->owned_txfilter = txfilter_new(ld);
|
||||
|
@ -90,20 +90,20 @@ static void log_to_file(const char *prefix,
|
||||
const char *dir = level == LOG_IO_IN ? "[IN]" : "[OUT]";
|
||||
char *hex = tal_hexstr(NULL, io, io_len);
|
||||
if (!node_id)
|
||||
fprintf(logf, "%s %s%s%s %s\n",
|
||||
fprintf(logf, "%s %s: %s%s %s\n",
|
||||
iso8601_s, prefix, str, dir, hex);
|
||||
else
|
||||
fprintf(logf, "%s %s-%s%s%s %s\n",
|
||||
fprintf(logf, "%s %s-%s: %s%s %s\n",
|
||||
iso8601_s,
|
||||
node_id_to_hexstr(tmpctx, node_id),
|
||||
prefix, str, dir, hex);
|
||||
tal_free(hex);
|
||||
} else if (!continued) {
|
||||
if (!node_id)
|
||||
fprintf(logf, "%s %s %s %s\n",
|
||||
fprintf(logf, "%s %s %s: %s\n",
|
||||
iso8601_s, level_prefix(level), prefix, str);
|
||||
else
|
||||
fprintf(logf, "%s %s %s-%s %s\n",
|
||||
fprintf(logf, "%s %s %s-%s: %s\n",
|
||||
iso8601_s, level_prefix(level),
|
||||
node_id_to_hexstr(tmpctx, node_id),
|
||||
prefix, str);
|
||||
|
@ -629,7 +629,7 @@ new_uncommitted_channel(struct peer *peer)
|
||||
uc->dbid = wallet_get_channel_dbid(ld->wallet);
|
||||
|
||||
uc->log = new_log(uc, ld->log_book, &uc->peer->id,
|
||||
"chan #%"PRIu64":", uc->dbid);
|
||||
"chan #%"PRIu64, uc->dbid);
|
||||
|
||||
uc->fc = NULL;
|
||||
uc->our_config.id = 0;
|
||||
|
@ -1803,7 +1803,7 @@ static void set_channel_fees(struct command *cmd, struct channel *channel,
|
||||
channel->feerate_ppm = ppm;
|
||||
|
||||
/* tell channeld to make a send_channel_update */
|
||||
if (channel->owner && streq(channel->owner->name, "lightning_channeld"))
|
||||
if (channel->owner && streq(channel->owner->name, "channeld"))
|
||||
subd_send_msg(channel->owner,
|
||||
take(towire_channel_specific_feerates(NULL, base, ppm)));
|
||||
|
||||
@ -2010,7 +2010,7 @@ static struct command_result *json_dev_reenable_commit(struct command *cmd,
|
||||
"Peer has no owner");
|
||||
}
|
||||
|
||||
if (!streq(channel->owner->name, "lightning_channeld")) {
|
||||
if (!streq(channel->owner->name, "channeld")) {
|
||||
return command_fail(cmd, LIGHTNINGD,
|
||||
"Peer owned by %s", channel->owner->name);
|
||||
}
|
||||
@ -2217,7 +2217,7 @@ static void peer_memleak_req_next(struct command *cmd, struct channel *prev)
|
||||
continue;
|
||||
|
||||
/* Note: closingd does its own checking automatically */
|
||||
if (streq(c->owner->name, "lightning_channeld")) {
|
||||
if (streq(c->owner->name, "channeld")) {
|
||||
subd_req(c, c->owner,
|
||||
take(towire_channel_dev_memleak(NULL)),
|
||||
-1, 0, channeld_memleak_req_done, cmd);
|
||||
@ -2226,7 +2226,7 @@ static void peer_memleak_req_next(struct command *cmd, struct channel *prev)
|
||||
cmd);
|
||||
return;
|
||||
}
|
||||
if (streq(c->owner->name, "lightning_onchaind")) {
|
||||
if (streq(c->owner->name, "onchaind")) {
|
||||
subd_req(c, c->owner,
|
||||
take(towire_onchain_dev_memleak(NULL)),
|
||||
-1, 0, onchaind_memleak_req_done, cmd);
|
||||
|
@ -638,12 +638,14 @@ static struct subd *new_subd(struct lightningd *ld,
|
||||
return tal_free(sd);
|
||||
}
|
||||
sd->ld = ld;
|
||||
/* This part of the name is a bit redundant for logging */
|
||||
if (strstarts(name, "lightning_"))
|
||||
name += strlen("lightning_");
|
||||
if (base_log) {
|
||||
sd->log = new_log(sd, ld->log_book, node_id,
|
||||
"%s-%s", name, log_prefix(base_log));
|
||||
} else {
|
||||
sd->log = new_log(sd, ld->log_book, node_id,
|
||||
"%s(%u):", name, sd->pid);
|
||||
sd->log = new_log(sd, ld->log_book, node_id, "%s", name);
|
||||
}
|
||||
|
||||
sd->name = name;
|
||||
|
@ -188,8 +188,7 @@ bool wallet_network_check(struct wallet *w UNNEEDED,
|
||||
const struct chainparams *chainparams UNNEEDED)
|
||||
{ fprintf(stderr, "wallet_network_check called!\n"); abort(); }
|
||||
/* Generated stub for wallet_new */
|
||||
struct wallet *wallet_new(struct lightningd *ld UNNEEDED,
|
||||
struct log *log UNNEEDED, struct timers *timers UNNEEDED)
|
||||
struct wallet *wallet_new(struct lightningd *ld UNNEEDED, struct timers *timers UNNEEDED)
|
||||
{ fprintf(stderr, "wallet_new called!\n"); abort(); }
|
||||
/* AUTOGENERATED MOCKS END */
|
||||
|
||||
|
@ -179,6 +179,12 @@ void json_add_log(struct json_stream *result UNNEEDED,
|
||||
const struct node_id *node_id UNNEEDED,
|
||||
enum log_level minlevel UNNEEDED)
|
||||
{ fprintf(stderr, "json_add_log called!\n"); abort(); }
|
||||
/* Generated stub for json_add_member */
|
||||
void json_add_member(struct json_stream *js UNNEEDED,
|
||||
const char *fieldname UNNEEDED,
|
||||
bool quote UNNEEDED,
|
||||
const char *fmt UNNEEDED, ...)
|
||||
{ fprintf(stderr, "json_add_member called!\n"); abort(); }
|
||||
/* Generated stub for json_add_node_id */
|
||||
void json_add_node_id(struct json_stream *response UNNEEDED,
|
||||
const char *fieldname UNNEEDED,
|
||||
|
@ -364,7 +364,7 @@ def test_reconnect_openingd(node_factory):
|
||||
|
||||
# We should get a message about reconnecting.
|
||||
l2.daemon.wait_for_log('Killing openingd: Reconnected')
|
||||
l2.daemon.wait_for_log('lightning_openingd.*Handed peer, entering loop')
|
||||
l2.daemon.wait_for_log('openingd.*Handed peer, entering loop')
|
||||
|
||||
# Should work fine.
|
||||
l1.rpc.fundchannel(l2.info['id'], 20000)
|
||||
@ -373,7 +373,7 @@ def test_reconnect_openingd(node_factory):
|
||||
l1.bitcoin.generate_block(3)
|
||||
|
||||
# Just to be sure, second openingd hand over to channeld. This log line is about channeld being started
|
||||
l2.daemon.wait_for_log(r'lightning_channeld-chan #[0-9]: pid [0-9]+, msgfd [0-9]+')
|
||||
l2.daemon.wait_for_log(r'channeld-chan #[0-9]: pid [0-9]+, msgfd [0-9]+')
|
||||
|
||||
|
||||
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
|
||||
@ -881,7 +881,7 @@ def test_funding_fail(node_factory, bitcoind):
|
||||
|
||||
# Should still be connected.
|
||||
assert only_one(l1.rpc.listpeers()['peers'])['connected']
|
||||
l2.daemon.wait_for_log('lightning_openingd-.*: Handed peer, entering loop')
|
||||
l2.daemon.wait_for_log('openingd-.*: Handed peer, entering loop')
|
||||
assert only_one(l2.rpc.listpeers()['peers'])['connected']
|
||||
|
||||
# This works.
|
||||
@ -1424,7 +1424,7 @@ def test_fee_limits(node_factory):
|
||||
l1.set_feerates((15, 15, 15), False)
|
||||
l1.start()
|
||||
|
||||
l1.daemon.wait_for_log('Peer permanent failure in CHANNELD_NORMAL: lightning_channeld: received ERROR channel .*: update_fee 253 outside range 1875-75000')
|
||||
l1.daemon.wait_for_log('Peer permanent failure in CHANNELD_NORMAL: channeld: received ERROR channel .*: update_fee 253 outside range 1875-75000')
|
||||
# Make sure the resolution of this one doesn't interfere with the next!
|
||||
# Note: may succeed, may fail with insufficient fee, depending on how
|
||||
# bitcoind feels!
|
||||
|
@ -486,7 +486,7 @@ def test_gossip_no_empty_announcements(node_factory, bitcoind):
|
||||
# Turn on IO logging for openingd (make sure it's ready!)
|
||||
l1.daemon.wait_for_log('openingd-.*: Handed peer, entering loop')
|
||||
subprocess.run(['kill', '-USR1', l1.subd_pid('openingd')])
|
||||
l2.daemon.wait_for_log(r'{}-.*lightning_openingd-chan #.: Handed peer, entering loop'.format(l3.info['id']))
|
||||
l2.daemon.wait_for_log(r'{}-.*openingd-chan #.: Handed peer, entering loop'.format(l3.info['id']))
|
||||
subprocess.run(['kill', '-USR1', l2.subd_pid('openingd', l3.info['id'])])
|
||||
|
||||
# Make an announced-but-not-updated channel.
|
||||
|
@ -664,9 +664,9 @@ def test_io_logging(node_factory, executor):
|
||||
fut = executor.submit(l1.pay, l2, 200000000)
|
||||
|
||||
# WIRE_UPDATE_ADD_HTLC = 128 = 0x0080
|
||||
l1.daemon.wait_for_log(r'channeld.*:\[OUT\] 0080')
|
||||
l1.daemon.wait_for_log(r'channeld.*: \[OUT\] 0080')
|
||||
# WIRE_UPDATE_FULFILL_HTLC = 130 = 0x0082
|
||||
l1.daemon.wait_for_log(r'channeld.*:\[IN\] 0082')
|
||||
l1.daemon.wait_for_log(r'channeld.*: \[IN\] 0082')
|
||||
fut.result(10)
|
||||
|
||||
# Send it sigusr1: should turn off logging.
|
||||
@ -674,9 +674,9 @@ def test_io_logging(node_factory, executor):
|
||||
|
||||
l1.pay(l2, 200000000)
|
||||
|
||||
assert not l1.daemon.is_in_log(r'channeld.*:\[OUT\] 0080',
|
||||
assert not l1.daemon.is_in_log(r'channeld.*: \[OUT\] 0080',
|
||||
start=l1.daemon.logsearch_start)
|
||||
assert not l1.daemon.is_in_log(r'channeld.*:\[IN\] 0082',
|
||||
assert not l1.daemon.is_in_log(r'channeld.*: \[IN\] 0082',
|
||||
start=l1.daemon.logsearch_start)
|
||||
|
||||
# IO logs should not appear in peer logs.
|
||||
@ -1171,7 +1171,7 @@ def test_reserve_enforcement(node_factory, executor):
|
||||
# kill us for trying to violate reserve.
|
||||
executor.submit(l2.pay, l1, 1000000)
|
||||
l1.daemon.wait_for_log(
|
||||
'Peer permanent failure in CHANNELD_NORMAL: lightning_channeld: sent '
|
||||
'Peer permanent failure in CHANNELD_NORMAL: channeld: sent '
|
||||
'ERROR Bad peer_add_htlc: CHANNEL_ERR_CHANNEL_CAPACITY_EXCEEDED'
|
||||
)
|
||||
|
||||
@ -1205,7 +1205,7 @@ def test_htlc_send_timeout(node_factory, bitcoind):
|
||||
timedout = False
|
||||
while not timedout:
|
||||
try:
|
||||
l2.daemon.wait_for_log(r'channeld-chan #[0-9]*:\[IN\] ', timeout=30)
|
||||
l2.daemon.wait_for_log(r'channeld-chan #[0-9]*: \[IN\] ', timeout=30)
|
||||
except TimeoutError:
|
||||
timedout = True
|
||||
|
||||
@ -1227,11 +1227,11 @@ def test_htlc_send_timeout(node_factory, bitcoind):
|
||||
assert status['attempts'][0]['failure']['data']['erring_channel'] == chanid2
|
||||
|
||||
# L2 should send ping, but never receive pong so never send commitment.
|
||||
l2.daemon.wait_for_log(r'channeld.*:\[OUT\] 0012')
|
||||
assert not l2.daemon.is_in_log(r'channeld.*:\[IN\] 0013')
|
||||
assert not l2.daemon.is_in_log(r'channeld.*:\[OUT\] 0084')
|
||||
l2.daemon.wait_for_log(r'channeld.*: \[OUT\] 0012')
|
||||
assert not l2.daemon.is_in_log(r'channeld.*: \[IN\] 0013')
|
||||
assert not l2.daemon.is_in_log(r'channeld.*: \[OUT\] 0084')
|
||||
# L2 killed the channel with l3 because it was too slow.
|
||||
l2.daemon.wait_for_log('{}-.*lightning_channeld-.*Adding HTLC too slow: killing connection'.format(l3.info['id']))
|
||||
l2.daemon.wait_for_log('{}-.*channeld-.*Adding HTLC too slow: killing connection'.format(l3.info['id']))
|
||||
|
||||
|
||||
def test_ipv4_and_ipv6(node_factory):
|
||||
|
@ -247,7 +247,7 @@ def test_pay_disconnect(node_factory, bitcoind):
|
||||
l1.set_feerates((10**6, 1000**6, 1000**6), False)
|
||||
|
||||
# Wait for l1 notice
|
||||
l1.daemon.wait_for_log(r'Peer permanent failure in CHANNELD_NORMAL: lightning_channeld: received ERROR channel .*: update_fee \d+ outside range 1875-75000')
|
||||
l1.daemon.wait_for_log(r'Peer permanent failure in CHANNELD_NORMAL: channeld: received ERROR channel .*: update_fee \d+ outside range 1875-75000')
|
||||
|
||||
# Should fail due to permenant channel fail
|
||||
with pytest.raises(RpcError, match=r'failed: WIRE_UNKNOWN_NEXT_PEER \(First peer not ready\)'):
|
||||
|
@ -263,7 +263,7 @@ def test_plugin_connected_hook(node_factory):
|
||||
l1.daemon.wait_for_log(r"{} is in reject list".format(l3.info['id']))
|
||||
|
||||
# FIXME: this error occurs *after* connection, so we connect then drop.
|
||||
l3.daemon.wait_for_log(r"lightning_openingd-chan #1: peer_in WIRE_ERROR")
|
||||
l3.daemon.wait_for_log(r"openingd-chan #1: peer_in WIRE_ERROR")
|
||||
l3.daemon.wait_for_log(r"You are in reject list")
|
||||
|
||||
def check_disconnect():
|
||||
@ -307,11 +307,11 @@ def test_db_hook(node_factory, executor):
|
||||
# It should see the db being created, and sometime later actually get
|
||||
# initted.
|
||||
# This precedes startup, so needle already past
|
||||
assert l1.daemon.is_in_log(r'plugin-dblog.py deferring \d+ commands')
|
||||
assert l1.daemon.is_in_log(r'plugin-dblog.py: deferring \d+ commands')
|
||||
l1.daemon.logsearch_start = 0
|
||||
l1.daemon.wait_for_log('plugin-dblog.py replaying pre-init data:')
|
||||
l1.daemon.wait_for_log('plugin-dblog.py CREATE TABLE version \\(version INTEGER\\)')
|
||||
l1.daemon.wait_for_log("plugin-dblog.py initialized.* 'startup': True")
|
||||
l1.daemon.wait_for_log('plugin-dblog.py: replaying pre-init data:')
|
||||
l1.daemon.wait_for_log('plugin-dblog.py: CREATE TABLE version \\(version INTEGER\\)')
|
||||
l1.daemon.wait_for_log("plugin-dblog.py: initialized.* 'startup': True")
|
||||
|
||||
l1.stop()
|
||||
|
||||
@ -394,18 +394,18 @@ def test_openchannel_hook(node_factory, bitcoind):
|
||||
l1.rpc.fundchannel(l2.info['id'], 100000)
|
||||
|
||||
# Make sure plugin got all the vars we expect
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py 11 VARS')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py channel_flags=1')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py channel_reserve_satoshis=1000000msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py dust_limit_satoshis=546000msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py feerate_per_kw=7500')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py funding_satoshis=100000000msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py htlc_minimum_msat=0msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py id={}'.format(l1.info['id']))
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py max_accepted_htlcs=483')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py max_htlc_value_in_flight_msat=18446744073709551615msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py push_msat=0msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py to_self_delay=5')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: 11 VARS')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: channel_flags=1')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: channel_reserve_satoshis=1000000msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: dust_limit_satoshis=546000msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: feerate_per_kw=7500')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: funding_satoshis=100000000msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: htlc_minimum_msat=0msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: id={}'.format(l1.info['id']))
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: max_accepted_htlcs=483')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: max_htlc_value_in_flight_msat=18446744073709551615msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: push_msat=0msat')
|
||||
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: to_self_delay=5')
|
||||
|
||||
# Close it.
|
||||
txid = l1.rpc.close(l2.info['id'])['txid']
|
||||
@ -557,25 +557,25 @@ def test_warning_notification(node_factory):
|
||||
l1.rpc.call('pretendbad', {'event': event, 'level': 'warn'})
|
||||
|
||||
# ensure an unusual log_entry was produced by 'pretendunusual' method
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py Test warning notification\\(for unusual event\\)')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: Test warning notification\\(for unusual event\\)')
|
||||
|
||||
# now wait for notification
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py Received warning')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py level: warn')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py time: *')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py source: plugin-pretend_badlog.py')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py log: Test warning notification\\(for unusual event\\)')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: Received warning')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: level: warn')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: time: *')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: source: plugin-pretend_badlog.py')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: log: Test warning notification\\(for unusual event\\)')
|
||||
|
||||
# 2. test 'error' level, steps like above
|
||||
event = "Test warning notification(for broken event)"
|
||||
l1.rpc.call('pretendbad', {'event': event, 'level': 'error'})
|
||||
l1.daemon.wait_for_log(r'\*\*BROKEN\*\* plugin-pretend_badlog.py Test warning notification\(for broken event\)')
|
||||
l1.daemon.wait_for_log(r'\*\*BROKEN\*\* plugin-pretend_badlog.py: Test warning notification\(for broken event\)')
|
||||
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py Received warning')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py level: error')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py time: *')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py source: plugin-pretend_badlog.py')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py log: Test warning notification\\(for broken event\\)')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: Received warning')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: level: error')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: time: *')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: source: plugin-pretend_badlog.py')
|
||||
l1.daemon.wait_for_log('plugin-pretend_badlog.py: log: Test warning notification\\(for broken event\\)')
|
||||
|
||||
|
||||
@unittest.skipIf(not DEVELOPER, "needs to deactivate shadow routing")
|
||||
|
@ -841,10 +841,10 @@ static void db_migrate(struct lightningd *ld, struct db *db)
|
||||
db_commit_transaction(db);
|
||||
}
|
||||
|
||||
struct db *db_setup(const tal_t *ctx, struct lightningd *ld, struct log *log)
|
||||
struct db *db_setup(const tal_t *ctx, struct lightningd *ld)
|
||||
{
|
||||
struct db *db = db_open(ctx, ld->wallet_dsn);
|
||||
db->log = log;
|
||||
db->log = new_log(db, ld->log_book, NULL, "database");
|
||||
db_migrate(ld, db);
|
||||
return db;
|
||||
}
|
||||
|
@ -53,9 +53,8 @@ struct db;
|
||||
* Params:
|
||||
* @ctx: the tal_t context to allocate from
|
||||
* @ld: the lightningd context to hand to upgrade functions.
|
||||
* @log: where to log messages to
|
||||
*/
|
||||
struct db *db_setup(const tal_t *ctx, struct lightningd *ld, struct log *log);
|
||||
struct db *db_setup(const tal_t *ctx, struct lightningd *ld);
|
||||
|
||||
/**
|
||||
* db_begin_transaction - Begin a transaction
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <common/timeout.h>
|
||||
#include <common/utils.h>
|
||||
#include <lightningd/invoice.h>
|
||||
#include <lightningd/log.h>
|
||||
#include <sodium/randombytes.h>
|
||||
#include <string.h>
|
||||
|
||||
@ -32,8 +31,6 @@ struct invoice_waiter {
|
||||
struct invoices {
|
||||
/* The database connection to use. */
|
||||
struct db *db;
|
||||
/* The log to report to. */
|
||||
struct log *log;
|
||||
/* The timers object to use for expirations. */
|
||||
struct timers *timers;
|
||||
/* Waiters waiting for invoices to be paid, expired, or deleted. */
|
||||
@ -140,13 +137,11 @@ static void install_expiration_timer(struct invoices *invoices);
|
||||
|
||||
struct invoices *invoices_new(const tal_t *ctx,
|
||||
struct db *db,
|
||||
struct log *log,
|
||||
struct timers *timers)
|
||||
{
|
||||
struct invoices *invs = tal(ctx, struct invoices);
|
||||
|
||||
invs->db = db;
|
||||
invs->log = log;
|
||||
invs->timers = timers;
|
||||
|
||||
list_head_init(&invs->waiters);
|
||||
|
@ -13,7 +13,6 @@ struct invoice;
|
||||
struct invoice_details;
|
||||
struct invoice_iterator;
|
||||
struct invoices;
|
||||
struct log;
|
||||
struct sha256;
|
||||
struct timers;
|
||||
|
||||
@ -22,12 +21,10 @@ struct timers;
|
||||
*
|
||||
* @ctx - the owner of the invoice handler.
|
||||
* @db - the database connection to use for saving invoice.
|
||||
* @log - the log to report to.
|
||||
* @timers - the timers object to use for expirations.
|
||||
*/
|
||||
struct invoices *invoices_new(const tal_t *ctx,
|
||||
struct db *db,
|
||||
struct log *log,
|
||||
struct timers *timers);
|
||||
|
||||
/**
|
||||
|
@ -27,6 +27,11 @@ size_t bigsize_put(u8 buf[BIGSIZE_MAX_LEN] UNNEEDED, bigsize_t v UNNEEDED)
|
||||
/* Generated stub for fatal */
|
||||
void fatal(const char *fmt UNNEEDED, ...)
|
||||
{ fprintf(stderr, "fatal called!\n"); abort(); }
|
||||
/* Generated stub for new_log */
|
||||
struct log *new_log(const tal_t *ctx UNNEEDED, struct log_book *record UNNEEDED,
|
||||
const struct node_id *default_node_id UNNEEDED,
|
||||
const char *fmt UNNEEDED, ...)
|
||||
{ fprintf(stderr, "new_log called!\n"); abort(); }
|
||||
/* AUTOGENERATED MOCKS END */
|
||||
|
||||
static char *db_err;
|
||||
|
@ -181,7 +181,6 @@ const struct invoice_details *invoices_iterator_deref(
|
||||
/* Generated stub for invoices_new */
|
||||
struct invoices *invoices_new(const tal_t *ctx UNNEEDED,
|
||||
struct db *db UNNEEDED,
|
||||
struct log *log UNNEEDED,
|
||||
struct timers *timers UNNEEDED)
|
||||
{ fprintf(stderr, "invoices_new called!\n"); abort(); }
|
||||
/* Generated stub for invoices_resolve */
|
||||
@ -253,6 +252,12 @@ void json_add_log(struct json_stream *result UNNEEDED,
|
||||
const struct node_id *node_id UNNEEDED,
|
||||
enum log_level minlevel UNNEEDED)
|
||||
{ fprintf(stderr, "json_add_log called!\n"); abort(); }
|
||||
/* Generated stub for json_add_member */
|
||||
void json_add_member(struct json_stream *js UNNEEDED,
|
||||
const char *fieldname UNNEEDED,
|
||||
bool quote UNNEEDED,
|
||||
const char *fmt UNNEEDED, ...)
|
||||
{ fprintf(stderr, "json_add_member called!\n"); abort(); }
|
||||
/* Generated stub for json_add_node_id */
|
||||
void json_add_node_id(struct json_stream *response UNNEEDED,
|
||||
const char *fieldname UNNEEDED,
|
||||
|
@ -50,19 +50,18 @@ static void outpointfilters_init(struct wallet *w)
|
||||
tal_free(stmt);
|
||||
}
|
||||
|
||||
struct wallet *wallet_new(struct lightningd *ld,
|
||||
struct log *log, struct timers *timers)
|
||||
struct wallet *wallet_new(struct lightningd *ld, struct timers *timers)
|
||||
{
|
||||
struct wallet *wallet = tal(ld, struct wallet);
|
||||
wallet->ld = ld;
|
||||
wallet->db = db_setup(wallet, ld, log);
|
||||
wallet->log = log;
|
||||
wallet->db = db_setup(wallet, ld);
|
||||
wallet->log = new_log(wallet, ld->log_book, NULL, "wallet");
|
||||
wallet->bip32_base = NULL;
|
||||
list_head_init(&wallet->unstored_payments);
|
||||
list_head_init(&wallet->unreleased_txs);
|
||||
|
||||
db_begin_transaction(wallet->db);
|
||||
wallet->invoices = invoices_new(wallet, wallet->db, log, timers);
|
||||
wallet->invoices = invoices_new(wallet, wallet->db, timers);
|
||||
outpointfilters_init(wallet);
|
||||
db_commit_transaction(wallet->db);
|
||||
return wallet;
|
||||
|
@ -319,8 +319,7 @@ struct wallet_transaction {
|
||||
* This is guaranteed to either return a valid wallet, or abort with
|
||||
* `fatal` if it cannot be initialized.
|
||||
*/
|
||||
struct wallet *wallet_new(struct lightningd *ld,
|
||||
struct log *log, struct timers *timers);
|
||||
struct wallet *wallet_new(struct lightningd *ld, struct timers *timers);
|
||||
|
||||
/**
|
||||
* wallet_add_utxo - Register an UTXO which we (partially) own
|
||||
|
Loading…
Reference in New Issue
Block a user