From 4b64b7f2aa315171e1f3f1c6ab9452f82a3f73b1 Mon Sep 17 00:00:00 2001 From: Christian Decker Date: Thu, 24 Aug 2017 14:15:30 +0200 Subject: [PATCH] lightningd: Fix channel-persistence for channels with commits I was hoping to defer HTLC updates until we actually store HTLCs, but we need to flush to DB whenever balances update as well. Signed-off-by: Christian Decker --- lightningd/peer_htlcs.c | 14 ++++++++++++++ tests/test_lightningd.py | 18 ++++++++++++++++-- tests/utils.py | 2 ++ 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/lightningd/peer_htlcs.c b/lightningd/peer_htlcs.c index 5fdba9d98..84fd5beb9 100644 --- a/lightningd/peer_htlcs.c +++ b/lightningd/peer_htlcs.c @@ -887,6 +887,10 @@ static bool peer_save_commitsig_received(struct peer *peer, u64 commitnum) peer->next_index[LOCAL]++; /* FIXME: Save to database, with sig and HTLCs. */ + if (!wallet_channel_save(peer->ld->wallet, peer->channel)) { + fatal("Could not save channel to database: %s", + peer->ld->wallet->db->err); + } return true; } @@ -903,6 +907,11 @@ static bool peer_save_commitsig_sent(struct peer *peer, u64 commitnum) peer->next_index[REMOTE]++; /* FIXME: Save to database, with sig and HTLCs. */ + if (!wallet_channel_save(peer->ld->wallet, peer->channel)) { + fatal("Could not save channel to database: %s", + peer->ld->wallet->db->err); + } + return true; } @@ -1207,6 +1216,11 @@ int peer_got_revoke(struct peer *peer, const u8 *msg) hin = find_htlc_in(&peer->ld->htlcs_in, peer, changed[i].id); local_fail_htlc(hin, failcodes[i]); } + if (!wallet_channel_save(peer->ld->wallet, peer->channel)) { + fatal("Could not save channel to database: %s", + peer->ld->wallet->db->err); + } + return 0; } diff --git a/tests/test_lightningd.py b/tests/test_lightningd.py index 136d8f699..c9d82432a 100644 --- a/tests/test_lightningd.py +++ b/tests/test_lightningd.py @@ -1022,11 +1022,17 @@ class LightningDTests(BaseLightningDTests): for n in (l1, l2): assert(n.db_query('SELECT COUNT(id) as count FROM channels;')[0]['count'] == 1) + # Perform a payment so we have something to restore + self.pay(l1, l2, 10000) + time.sleep(1) + assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99990000 + assert l2.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 10000 + + # Stop l2, l1 will reattempt to connect l2.daemon.stop() - # Let the other side notice, then stop it + # Wait for l1 to notice wait_for(lambda: not l1.rpc.getpeers()['peers'][0]['connected']) - #l1.daemon.stop() # Now restart l1 and it should reload peers/channels from the DB l2.daemon.start() @@ -1037,6 +1043,14 @@ class LightningDTests(BaseLightningDTests): # Now make sure this is really functional by sending a payment self.pay(l1, l2, 10000) + time.sleep(1) + assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99980000 + assert l2.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 20000 + + # Finally restart l1, and make sure it remembers + l1.daemon.stop() + l1.daemon.start() + assert l1.rpc.getpeers()['peers'][0]['msatoshi_to_us'] == 99980000 class LegacyLightningDTests(BaseLightningDTests): diff --git a/tests/utils.py b/tests/utils.py index 48e0e7490..2e36b63f1 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -61,6 +61,8 @@ class TailableProc(object): def stop(self): self.proc.terminate() self.proc.kill() + self.proc.wait() + self.thread.join() if self.outputDir: logpath = os.path.join(self.outputDir, 'log') with open(logpath, 'w') as f: