mirror of
https://github.com/ElementsProject/lightning.git
synced 2025-02-20 13:54:36 +01:00
bkpr: migration to delete any duplicate lease_fee entries
Clean up for #5557. If you've got duplicate 'lease_fee' entries, we delete them!
This commit is contained in:
parent
1980ba420b
commit
c143914ebf
4 changed files with 69 additions and 2 deletions
|
@ -1435,8 +1435,8 @@ class NodeFactory(object):
|
|||
return [j.result() for j in jobs]
|
||||
|
||||
def get_node(self, node_id=None, options=None, dbfile=None,
|
||||
feerates=(15000, 11000, 7500, 3750), start=True,
|
||||
wait_for_bitcoind_sync=True, may_fail=False,
|
||||
bkpr_dbfile=None, feerates=(15000, 11000, 7500, 3750),
|
||||
start=True, wait_for_bitcoind_sync=True, may_fail=False,
|
||||
expect_fail=False, cleandir=True, **kwargs):
|
||||
self.throttler.wait()
|
||||
node_id = self.get_node_id() if not node_id else node_id
|
||||
|
@ -1470,6 +1470,12 @@ class NodeFactory(object):
|
|||
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
|
||||
out.write(f.read())
|
||||
|
||||
if bkpr_dbfile:
|
||||
out = open(os.path.join(node.daemon.lightning_dir, TEST_NETWORK,
|
||||
'accounts.sqlite3'), 'xb')
|
||||
with lzma.open(os.path.join('tests/data', bkpr_dbfile), 'rb') as f:
|
||||
out.write(f.read())
|
||||
|
||||
if start:
|
||||
try:
|
||||
node.start(wait_for_bitcoind_sync)
|
||||
|
|
|
@ -15,6 +15,8 @@ struct migration {
|
|||
|
||||
static struct plugin *plugin;
|
||||
|
||||
static void migration_remove_dupe_lease_fees(struct plugin *p, struct db *db);
|
||||
|
||||
/* Do not reorder or remove elements from this array.
|
||||
* It is used to migrate existing databases from a prevoius state, based on
|
||||
* string indicies */
|
||||
|
@ -99,6 +101,7 @@ static struct migration db_migrations[] = {
|
|||
{SQL("ALTER TABLE chain_events ADD ev_desc TEXT DEFAULT NULL;"), NULL},
|
||||
{SQL("ALTER TABLE channel_events ADD ev_desc TEXT DEFAULT NULL;"), NULL},
|
||||
{SQL("ALTER TABLE channel_events ADD rebalance_id BIGINT DEFAULT NULL;"), NULL},
|
||||
{NULL, migration_remove_dupe_lease_fees}
|
||||
};
|
||||
|
||||
static bool db_migrate(struct plugin *p, struct db *db, bool *created)
|
||||
|
@ -141,6 +144,49 @@ static bool db_migrate(struct plugin *p, struct db *db, bool *created)
|
|||
return current != orig;
|
||||
}
|
||||
|
||||
static void migration_remove_dupe_lease_fees(struct plugin *p, struct db *db)
|
||||
{
|
||||
struct db_stmt *stmt, *del_stmt;
|
||||
u64 *last_acct_id;
|
||||
|
||||
stmt = db_prepare_v2(db, SQL("SELECT"
|
||||
" id"
|
||||
", account_id"
|
||||
" FROM channel_events"
|
||||
" WHERE tag = 'lease_fee'"
|
||||
" ORDER BY account_id"));
|
||||
db_query_prepared(stmt);
|
||||
last_acct_id = NULL;
|
||||
while (db_step(stmt)) {
|
||||
u64 id, acct_id;
|
||||
id = db_col_u64(stmt, "id");
|
||||
acct_id = db_col_u64(stmt, "account_id");
|
||||
|
||||
if (!last_acct_id) {
|
||||
last_acct_id = tal(stmt, u64);
|
||||
*last_acct_id = acct_id;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (*last_acct_id != acct_id) {
|
||||
*last_acct_id = acct_id;
|
||||
continue;
|
||||
}
|
||||
|
||||
plugin_log(plugin, LOG_INFORM,
|
||||
"Duplicate 'lease_fee' found for"
|
||||
" account %"PRIu64", deleting dupe",
|
||||
id);
|
||||
|
||||
/* same acct as last, we found a duplicate */
|
||||
del_stmt = db_prepare_v2(db, SQL("DELETE FROM channel_events"
|
||||
" WHERE id=?"));
|
||||
db_bind_u64(del_stmt, 0, id);
|
||||
db_exec_prepared_v2(take(del_stmt));
|
||||
}
|
||||
tal_free(stmt);
|
||||
}
|
||||
|
||||
/* Implement db_fatal, as a wrapper around fatal.
|
||||
* We use a ifndef block so that it can get be
|
||||
* implemented in a test file first, if necessary */
|
||||
|
|
BIN
tests/data/dupe_lease_fee.sqlite3.xz
Normal file
BIN
tests/data/dupe_lease_fee.sqlite3.xz
Normal file
Binary file not shown.
|
@ -1,6 +1,7 @@
|
|||
from fixtures import * # noqa: F401,F403
|
||||
from decimal import Decimal
|
||||
from pyln.client import Millisatoshi
|
||||
from db import Sqlite3Db
|
||||
from fixtures import TEST_NETWORK
|
||||
from utils import (
|
||||
sync_blockheight, wait_for, only_one, first_channel_id, TIMEOUT
|
||||
|
@ -708,3 +709,17 @@ def test_rebalance_tracking(node_factory, bitcoind):
|
|||
assert outbound_ev['debit_msat'] == Millisatoshi(1001)
|
||||
assert outbound_ev['credit_msat'] == Millisatoshi(0)
|
||||
assert outbound_ev['payment_id'] == pay_hash
|
||||
|
||||
|
||||
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "This test is based on a sqlite3 snapshot")
|
||||
def test_bookkeeper_lease_fee_dupe_migration(node_factory):
|
||||
""" Check that if there's duplicate lease_fees, we remove them"""
|
||||
|
||||
l1 = node_factory.get_node(bkpr_dbfile='dupe_lease_fee.sqlite3.xz')
|
||||
|
||||
wait_for(lambda: l1.daemon.is_in_log('Duplicate \'lease_fee\' found for account'))
|
||||
|
||||
accts_db_path = os.path.join(l1.lightning_dir, TEST_NETWORK, 'accounts.sqlite3')
|
||||
accts_db = Sqlite3Db(accts_db_path)
|
||||
|
||||
assert accts_db.query('SELECT tag from channel_events where tag = \'lease_fee\';') == [{'tag': 'lease_fee'}]
|
||||
|
|
Loading…
Add table
Reference in a new issue