2017-01-14 20:32:08 +01:00
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import re
|
2017-08-14 15:59:35 +02:00
|
|
|
import sqlite3
|
2017-01-14 20:32:08 +01:00
|
|
|
import subprocess
|
|
|
|
import threading
|
|
|
|
import time
|
2018-02-21 19:46:04 +01:00
|
|
|
|
|
|
|
from bitcoin.rpc import RawProxy as BitcoinProxy
|
2018-05-03 14:44:14 +02:00
|
|
|
from decimal import Decimal
|
2018-05-04 19:19:44 +02:00
|
|
|
from ephemeral_port_reserve import reserve
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
|
|
|
|
BITCOIND_CONFIG = {
|
|
|
|
"rpcuser": "rpcuser",
|
|
|
|
"rpcpassword": "rpcpass",
|
|
|
|
"rpcport": 18332,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
LIGHTNINGD_CONFIG = {
|
|
|
|
"bitcoind-poll": "1s",
|
|
|
|
"log-level": "debug",
|
2017-10-23 06:16:57 +02:00
|
|
|
"cltv-delta": 6,
|
|
|
|
"cltv-final": 5,
|
|
|
|
"locktime-blocks": 5,
|
2018-04-21 17:39:14 +02:00
|
|
|
"rescan": 1,
|
2017-01-14 20:32:08 +01:00
|
|
|
}
|
|
|
|
|
2017-10-24 04:06:14 +02:00
|
|
|
DEVELOPER = os.getenv("DEVELOPER", "0") == "1"
|
2017-01-14 20:32:08 +01:00
|
|
|
|
2018-02-21 19:46:04 +01:00
|
|
|
|
2018-05-03 14:44:14 +02:00
|
|
|
def wait_for(success, timeout=30, interval=0.1):
|
|
|
|
start_time = time.time()
|
|
|
|
while not success() and time.time() < start_time + timeout:
|
|
|
|
time.sleep(interval)
|
|
|
|
if time.time() > start_time + timeout:
|
|
|
|
raise ValueError("Error waiting for {}", success)
|
|
|
|
|
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
def write_config(filename, opts):
|
|
|
|
with open(filename, 'w') as f:
|
|
|
|
for k, v in opts.items():
|
|
|
|
f.write("{}={}\n".format(k, v))
|
|
|
|
|
|
|
|
|
|
|
|
class TailableProc(object):
|
|
|
|
"""A monitorable process that we can start, stop and tail.
|
|
|
|
|
|
|
|
This is the base class for the daemons. It allows us to directly
|
|
|
|
tail the processes and react to their output.
|
|
|
|
"""
|
|
|
|
|
2018-05-03 17:00:46 +02:00
|
|
|
def __init__(self, outputDir=None, verbose=True):
|
2017-01-14 20:32:08 +01:00
|
|
|
self.logs = []
|
|
|
|
self.logs_cond = threading.Condition(threading.RLock())
|
2017-12-17 04:02:30 +01:00
|
|
|
self.env = os.environ
|
2017-01-14 20:32:08 +01:00
|
|
|
self.running = False
|
|
|
|
self.proc = None
|
2017-05-02 07:33:35 +02:00
|
|
|
self.outputDir = outputDir
|
2017-06-20 07:39:03 +02:00
|
|
|
self.logsearch_start = 0
|
2017-09-28 05:31:47 +02:00
|
|
|
|
2018-05-03 17:00:46 +02:00
|
|
|
# Should we be logging lines we read from stdout?
|
|
|
|
self.verbose = verbose
|
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
def start(self):
|
|
|
|
"""Start the underlying process and start monitoring it.
|
|
|
|
"""
|
|
|
|
logging.debug("Starting '%s'", " ".join(self.cmd_line))
|
2017-12-17 04:02:30 +01:00
|
|
|
self.proc = subprocess.Popen(self.cmd_line, stdout=subprocess.PIPE, env=self.env)
|
2017-08-14 16:52:19 +02:00
|
|
|
self.thread = threading.Thread(target=self.tail)
|
|
|
|
self.thread.daemon = True
|
2017-01-14 20:32:08 +01:00
|
|
|
self.thread.start()
|
|
|
|
self.running = True
|
|
|
|
|
2017-10-03 06:12:01 +02:00
|
|
|
def save_log(self):
|
2017-05-02 07:33:35 +02:00
|
|
|
if self.outputDir:
|
|
|
|
logpath = os.path.join(self.outputDir, 'log')
|
|
|
|
with open(logpath, 'w') as f:
|
|
|
|
for l in self.logs:
|
|
|
|
f.write(l + '\n')
|
2017-10-03 06:12:01 +02:00
|
|
|
|
|
|
|
def stop(self, timeout=10):
|
|
|
|
self.save_log()
|
2017-09-06 03:07:19 +02:00
|
|
|
self.proc.terminate()
|
2017-09-28 05:31:47 +02:00
|
|
|
|
|
|
|
# Now give it some time to react to the signal
|
|
|
|
rc = self.proc.wait(timeout)
|
|
|
|
|
|
|
|
if rc is None:
|
|
|
|
self.proc.kill()
|
|
|
|
|
2017-09-06 03:07:19 +02:00
|
|
|
self.proc.wait()
|
|
|
|
self.thread.join()
|
2017-01-14 20:32:08 +01:00
|
|
|
|
2017-12-13 13:44:09 +01:00
|
|
|
if self.proc.returncode:
|
|
|
|
raise ValueError("Process '{}' did not cleanly shutdown: return code {}".format(self.proc.pid, rc))
|
2017-09-28 05:31:47 +02:00
|
|
|
|
|
|
|
return self.proc.returncode
|
|
|
|
|
2018-01-17 21:29:50 +01:00
|
|
|
def kill(self):
|
|
|
|
"""Kill process without giving it warning."""
|
|
|
|
self.proc.kill()
|
|
|
|
self.proc.wait()
|
|
|
|
self.thread.join()
|
2018-01-24 14:18:12 +01:00
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
def tail(self):
|
|
|
|
"""Tail the stdout of the process and remember it.
|
|
|
|
|
|
|
|
Stores the lines of output produced by the process in
|
|
|
|
self.logs and signals that a new line was read so that it can
|
|
|
|
be picked up by consumers.
|
|
|
|
"""
|
|
|
|
for line in iter(self.proc.stdout.readline, ''):
|
|
|
|
if len(line) == 0:
|
|
|
|
break
|
2018-05-03 17:00:46 +02:00
|
|
|
if self.verbose:
|
|
|
|
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
|
2017-01-14 20:32:08 +01:00
|
|
|
with self.logs_cond:
|
|
|
|
self.logs.append(str(line.rstrip()))
|
|
|
|
self.logs_cond.notifyAll()
|
|
|
|
self.running = False
|
2018-01-07 18:21:37 +01:00
|
|
|
self.proc.stdout.close()
|
2017-04-29 03:02:27 +02:00
|
|
|
|
2018-02-21 19:46:04 +01:00
|
|
|
def is_in_log(self, regex, start=0):
|
2017-04-29 03:02:27 +02:00
|
|
|
"""Look for `regex` in the logs."""
|
|
|
|
|
|
|
|
ex = re.compile(regex)
|
2018-01-29 02:53:14 +01:00
|
|
|
for l in self.logs[start:]:
|
2017-04-29 03:02:27 +02:00
|
|
|
if ex.search(l):
|
|
|
|
logging.debug("Found '%s' in logs", regex)
|
2018-03-26 02:08:11 +02:00
|
|
|
return l
|
2017-04-29 03:02:27 +02:00
|
|
|
|
|
|
|
logging.debug("Did not find '%s' in logs", regex)
|
2018-03-26 02:08:11 +02:00
|
|
|
return None
|
2017-04-29 03:02:27 +02:00
|
|
|
|
2017-07-01 09:10:18 +02:00
|
|
|
def wait_for_logs(self, regexs, timeout=60):
|
|
|
|
"""Look for `regexs` in the logs.
|
2017-01-14 20:32:08 +01:00
|
|
|
|
2017-07-01 09:10:18 +02:00
|
|
|
We tail the stdout of the process and look for each regex in `regexs`,
|
|
|
|
starting from last of the previous waited-for log entries (if any). We
|
2017-06-20 07:39:03 +02:00
|
|
|
fail if the timeout is exceeded or if the underlying process
|
2017-07-01 09:10:18 +02:00
|
|
|
exits before all the `regexs` were found.
|
2018-01-30 11:10:13 +01:00
|
|
|
|
|
|
|
If timeout is None, no time-out is applied.
|
2017-01-14 20:32:08 +01:00
|
|
|
"""
|
2017-07-01 09:10:18 +02:00
|
|
|
logging.debug("Waiting for {} in the logs".format(regexs))
|
2017-09-26 06:57:31 +02:00
|
|
|
exs = [re.compile(r) for r in regexs]
|
2017-01-14 20:32:08 +01:00
|
|
|
start_time = time.time()
|
2017-06-20 07:39:03 +02:00
|
|
|
pos = self.logsearch_start
|
2017-01-14 20:32:08 +01:00
|
|
|
while True:
|
2018-01-30 11:10:13 +01:00
|
|
|
if timeout is not None and time.time() > start_time + timeout:
|
|
|
|
print("Time-out: can't find {} in logs".format(exs))
|
2017-07-01 09:10:18 +02:00
|
|
|
for r in exs:
|
|
|
|
if self.is_in_log(r):
|
|
|
|
print("({} was previously in logs!)".format(r))
|
|
|
|
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
|
2017-01-14 20:32:08 +01:00
|
|
|
elif not self.running:
|
|
|
|
raise ValueError('Process died while waiting for logs')
|
|
|
|
|
|
|
|
with self.logs_cond:
|
|
|
|
if pos >= len(self.logs):
|
|
|
|
self.logs_cond.wait(1)
|
|
|
|
continue
|
|
|
|
|
2017-07-01 09:10:18 +02:00
|
|
|
for r in exs.copy():
|
2018-02-21 19:46:04 +01:00
|
|
|
self.logsearch_start = pos + 1
|
2017-07-01 09:10:18 +02:00
|
|
|
if r.search(self.logs[pos]):
|
|
|
|
logging.debug("Found '%s' in logs", r)
|
|
|
|
exs.remove(r)
|
2017-09-26 06:57:31 +02:00
|
|
|
break
|
2017-07-01 09:10:18 +02:00
|
|
|
if len(exs) == 0:
|
2017-01-14 20:32:08 +01:00
|
|
|
return self.logs[pos]
|
|
|
|
pos += 1
|
|
|
|
|
2017-07-01 09:10:18 +02:00
|
|
|
def wait_for_log(self, regex, timeout=60):
|
|
|
|
"""Look for `regex` in the logs.
|
|
|
|
|
|
|
|
Convenience wrapper for the common case of only seeking a single entry.
|
|
|
|
"""
|
|
|
|
return self.wait_for_logs([regex], timeout)
|
2017-01-14 20:32:08 +01:00
|
|
|
|
2017-12-22 15:17:22 +01:00
|
|
|
|
2017-04-27 14:17:16 +02:00
|
|
|
class SimpleBitcoinProxy:
|
|
|
|
"""Wrapper for BitcoinProxy to reconnect.
|
|
|
|
|
|
|
|
Long wait times between calls to the Bitcoin RPC could result in
|
|
|
|
`bitcoind` closing the connection, so here we just create
|
|
|
|
throwaway connections. This is easier than to reach into the RPC
|
|
|
|
library to close, reopen and reauth upon failure.
|
|
|
|
"""
|
2017-12-22 15:17:22 +01:00
|
|
|
def __init__(self, btc_conf_file, *args, **kwargs):
|
2018-02-21 19:46:04 +01:00
|
|
|
self.__btc_conf_file__ = btc_conf_file
|
2017-04-27 14:17:16 +02:00
|
|
|
|
|
|
|
def __getattr__(self, name):
|
|
|
|
if name.startswith('__') and name.endswith('__'):
|
|
|
|
# Python internal stuff
|
|
|
|
raise AttributeError
|
|
|
|
|
|
|
|
# Create a callable to do the actual call
|
2017-12-22 15:17:22 +01:00
|
|
|
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
|
|
|
|
|
|
|
|
f = lambda *args: proxy._call(name, *args)
|
2017-04-27 14:17:16 +02:00
|
|
|
|
|
|
|
# Make debuggers show <function bitcoin.rpc.name> rather than <function
|
|
|
|
# bitcoin.rpc.<lambda>>
|
|
|
|
f.__name__ = name
|
|
|
|
return f
|
|
|
|
|
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
class BitcoinD(TailableProc):
|
|
|
|
|
2018-05-04 19:19:44 +02:00
|
|
|
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
|
2018-05-03 17:00:46 +02:00
|
|
|
TailableProc.__init__(self, bitcoin_dir, verbose=False)
|
2017-09-28 05:31:47 +02:00
|
|
|
|
2018-05-04 19:19:44 +02:00
|
|
|
if rpcport is None:
|
|
|
|
rpcport = reserve()
|
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
self.bitcoin_dir = bitcoin_dir
|
|
|
|
self.rpcport = rpcport
|
|
|
|
self.prefix = 'bitcoind'
|
|
|
|
|
|
|
|
regtestdir = os.path.join(bitcoin_dir, 'regtest')
|
|
|
|
if not os.path.exists(regtestdir):
|
|
|
|
os.makedirs(regtestdir)
|
|
|
|
|
|
|
|
self.cmd_line = [
|
2017-01-24 05:07:56 +01:00
|
|
|
'bitcoind',
|
2017-01-14 20:32:08 +01:00
|
|
|
'-datadir={}'.format(bitcoin_dir),
|
|
|
|
'-printtoconsole',
|
|
|
|
'-server',
|
|
|
|
'-regtest',
|
|
|
|
'-logtimestamps',
|
|
|
|
'-nolisten',
|
|
|
|
]
|
|
|
|
BITCOIND_CONFIG['rpcport'] = rpcport
|
2017-12-22 15:17:22 +01:00
|
|
|
btc_conf_file = os.path.join(regtestdir, 'bitcoin.conf')
|
2017-01-14 20:32:08 +01:00
|
|
|
write_config(os.path.join(bitcoin_dir, 'bitcoin.conf'), BITCOIND_CONFIG)
|
2017-12-22 15:17:22 +01:00
|
|
|
write_config(btc_conf_file, BITCOIND_CONFIG)
|
|
|
|
self.rpc = SimpleBitcoinProxy(btc_conf_file=btc_conf_file)
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
def start(self):
|
|
|
|
TailableProc.start(self)
|
2018-02-01 21:43:39 +01:00
|
|
|
self.wait_for_log("Done loading", timeout=60)
|
2017-02-08 15:12:08 +01:00
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
logging.info("BitcoinD started")
|
|
|
|
|
2017-11-28 05:39:22 +01:00
|
|
|
def generate_block(self, numblocks=1):
|
|
|
|
# As of 0.16, generate() is removed; use generatetoaddress.
|
|
|
|
self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
|
|
|
|
|
2018-02-21 19:46:04 +01:00
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
class LightningD(TailableProc):
|
2017-12-02 07:36:15 +01:00
|
|
|
def __init__(self, lightning_dir, bitcoin_dir, port=9735, random_hsm=False):
|
2017-05-02 07:33:35 +02:00
|
|
|
TailableProc.__init__(self, lightning_dir)
|
2017-01-14 20:32:08 +01:00
|
|
|
self.lightning_dir = lightning_dir
|
|
|
|
self.port = port
|
2018-04-04 18:16:48 +02:00
|
|
|
self.cmd_prefix = []
|
|
|
|
|
|
|
|
self.opts = LIGHTNINGD_CONFIG.copy()
|
|
|
|
opts = {
|
|
|
|
'bitcoin-datadir': bitcoin_dir,
|
|
|
|
'lightning-dir': lightning_dir,
|
|
|
|
'port': port,
|
|
|
|
'allow-deprecated-apis': 'false',
|
|
|
|
'override-fee-rates': '15000/7500/1000',
|
|
|
|
'network': 'regtest',
|
|
|
|
'ignore-fee-limits': 'false',
|
|
|
|
}
|
|
|
|
|
|
|
|
for k, v in opts.items():
|
|
|
|
self.opts[k] = v
|
|
|
|
|
2018-05-03 14:20:29 +02:00
|
|
|
if not os.path.exists(lightning_dir):
|
|
|
|
os.makedirs(lightning_dir)
|
|
|
|
|
2017-10-22 15:32:16 +02:00
|
|
|
# Last 32-bytes of final part of dir -> seed.
|
|
|
|
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
|
2018-05-03 14:20:29 +02:00
|
|
|
if not random_hsm:
|
|
|
|
with open(os.path.join(lightning_dir, 'hsm_secret'), 'wb') as f:
|
|
|
|
f.write(seed)
|
2017-10-24 04:06:14 +02:00
|
|
|
if DEVELOPER:
|
2018-04-04 18:16:48 +02:00
|
|
|
self.opts['dev-broadcast-interval'] = 1000
|
2017-09-30 22:41:53 +02:00
|
|
|
self.prefix = 'lightningd(%d)' % (port)
|
2017-01-14 20:32:08 +01:00
|
|
|
|
2018-04-04 18:16:48 +02:00
|
|
|
@property
|
|
|
|
def cmd_line(self):
|
|
|
|
|
|
|
|
opts = []
|
|
|
|
for k, v in sorted(self.opts.items()):
|
|
|
|
if v is None:
|
|
|
|
opts.append("--{}".format(k))
|
|
|
|
else:
|
|
|
|
opts.append("--{}={}".format(k, v))
|
|
|
|
|
|
|
|
return self.cmd_prefix + ['lightningd/lightningd'] + opts
|
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
def start(self):
|
|
|
|
TailableProc.start(self)
|
2018-01-24 20:33:47 +01:00
|
|
|
self.wait_for_log("Server started with public key")
|
2017-01-14 20:32:08 +01:00
|
|
|
logging.info("LightningD started")
|
|
|
|
|
2017-09-28 05:31:47 +02:00
|
|
|
def wait(self, timeout=10):
|
|
|
|
"""Wait for the daemon to stop for up to timeout seconds
|
|
|
|
|
|
|
|
Returns the returncode of the process, None if the process did
|
|
|
|
not return before the timeout triggers.
|
|
|
|
"""
|
|
|
|
self.proc.wait(timeout)
|
|
|
|
return self.proc.returncode
|
2017-01-14 20:32:08 +01:00
|
|
|
|
2018-02-21 19:46:04 +01:00
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
class LightningNode(object):
|
2018-04-23 12:07:44 +02:00
|
|
|
def __init__(self, daemon, rpc, btc, executor, may_fail=False, may_reconnect=False):
|
2017-01-14 20:32:08 +01:00
|
|
|
self.rpc = rpc
|
|
|
|
self.daemon = daemon
|
|
|
|
self.bitcoin = btc
|
|
|
|
self.executor = executor
|
2017-09-28 18:01:09 +02:00
|
|
|
self.may_fail = may_fail
|
2018-04-23 12:07:44 +02:00
|
|
|
self.may_reconnect = may_reconnect
|
2017-01-14 20:32:08 +01:00
|
|
|
|
2017-06-20 07:38:03 +02:00
|
|
|
# Use batch if you're doing more than one async.
|
2017-01-14 20:32:08 +01:00
|
|
|
def connect(self, remote_node, capacity, async=False):
|
|
|
|
# Collect necessary information
|
|
|
|
addr = self.rpc.newaddr()['address']
|
|
|
|
txid = self.bitcoin.rpc.sendtoaddress(addr, capacity)
|
|
|
|
tx = self.bitcoin.rpc.gettransaction(txid)
|
2017-06-20 07:38:03 +02:00
|
|
|
start_size = self.bitcoin.rpc.getmempoolinfo()['size']
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
def call_connect():
|
2017-03-18 16:16:35 +01:00
|
|
|
try:
|
|
|
|
self.rpc.connect('127.0.0.1', remote_node.daemon.port, tx['hex'], async=False)
|
2018-02-24 18:59:33 +01:00
|
|
|
except Exception:
|
2017-03-18 16:16:35 +01:00
|
|
|
pass
|
2017-01-14 20:32:08 +01:00
|
|
|
t = threading.Thread(target=call_connect)
|
|
|
|
t.daemon = True
|
|
|
|
t.start()
|
2017-09-28 05:31:47 +02:00
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
def wait_connected():
|
2017-06-20 07:38:03 +02:00
|
|
|
# Up to 10 seconds to get tx into mempool.
|
|
|
|
start_time = time.time()
|
|
|
|
while self.bitcoin.rpc.getmempoolinfo()['size'] == start_size:
|
|
|
|
if time.time() > start_time + 10:
|
|
|
|
raise TimeoutError('No new transactions in mempool')
|
|
|
|
time.sleep(0.1)
|
|
|
|
|
2017-11-28 05:39:22 +01:00
|
|
|
self.bitcoin.generate_block(1)
|
2017-01-14 20:32:08 +01:00
|
|
|
|
2018-02-21 19:46:04 +01:00
|
|
|
# fut.result(timeout=5)
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
# Now wait for confirmation
|
2018-01-25 09:21:34 +01:00
|
|
|
self.daemon.wait_for_log(" to CHANNELD_NORMAL|STATE_NORMAL")
|
|
|
|
remote_node.daemon.wait_for_log(" to CHANNELD_NORMAL|STATE_NORMAL")
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
if async:
|
|
|
|
return self.executor.submit(wait_connected)
|
|
|
|
else:
|
|
|
|
return wait_connected()
|
2017-04-24 20:27:53 +02:00
|
|
|
|
2018-01-24 19:37:23 +01:00
|
|
|
def openchannel(self, remote_node, capacity, addrtype="p2sh-segwit"):
|
|
|
|
addr, wallettxid = self.fundwallet(capacity, addrtype)
|
|
|
|
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
|
2017-04-24 20:27:53 +02:00
|
|
|
self.daemon.wait_for_log('sendrawtx exit 0, gave')
|
2017-11-28 05:39:22 +01:00
|
|
|
self.bitcoin.generate_block(6)
|
2018-01-25 09:21:34 +01:00
|
|
|
self.daemon.wait_for_log('to CHANNELD_NORMAL|STATE_NORMAL')
|
2018-01-24 19:37:23 +01:00
|
|
|
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
|
|
|
|
|
|
|
|
def fundwallet(self, sats, addrtype="p2sh-segwit"):
|
|
|
|
addr = self.rpc.newaddr(addrtype)['address']
|
|
|
|
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**6)
|
|
|
|
self.bitcoin.generate_block(1)
|
|
|
|
self.daemon.wait_for_log('Owning output .* txid {}'.format(txid))
|
|
|
|
return addr, txid
|
2017-04-24 20:27:53 +02:00
|
|
|
|
2018-01-24 14:18:12 +01:00
|
|
|
def getactivechannels(self):
|
|
|
|
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
|
|
|
|
|
2017-08-14 15:59:35 +02:00
|
|
|
def db_query(self, query):
|
2018-01-24 15:47:35 +01:00
|
|
|
from shutil import copyfile
|
|
|
|
orig = os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3")
|
|
|
|
copy = os.path.join(self.daemon.lightning_dir, "lightningd-copy.sqlite3")
|
|
|
|
copyfile(orig, copy)
|
|
|
|
|
|
|
|
db = sqlite3.connect(copy)
|
2017-08-14 15:59:35 +02:00
|
|
|
db.row_factory = sqlite3.Row
|
|
|
|
c = db.cursor()
|
|
|
|
c.execute(query)
|
|
|
|
rows = c.fetchall()
|
|
|
|
|
|
|
|
result = []
|
|
|
|
for row in rows:
|
|
|
|
result.append(dict(zip(row.keys(), row)))
|
|
|
|
|
|
|
|
c.close()
|
|
|
|
db.close()
|
|
|
|
return result
|
2017-09-20 06:45:41 +02:00
|
|
|
|
2018-02-17 05:41:08 +01:00
|
|
|
# Assumes node is stopped!
|
|
|
|
def db_manip(self, query):
|
|
|
|
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3"))
|
|
|
|
db.row_factory = sqlite3.Row
|
|
|
|
c = db.cursor()
|
|
|
|
c.execute(query)
|
|
|
|
db.commit()
|
|
|
|
c.close()
|
|
|
|
db.close()
|
|
|
|
|
2017-09-28 05:31:47 +02:00
|
|
|
def stop(self, timeout=10):
|
|
|
|
""" Attempt to do a clean shutdown, but kill if it hangs
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Tell the daemon to stop
|
|
|
|
try:
|
|
|
|
# May fail if the process already died
|
|
|
|
self.rpc.stop()
|
2018-02-24 18:59:33 +01:00
|
|
|
except Exception:
|
2017-09-28 05:31:47 +02:00
|
|
|
pass
|
|
|
|
|
|
|
|
rc = self.daemon.wait(timeout)
|
|
|
|
|
|
|
|
# If it did not stop be more insistent
|
|
|
|
if rc is None:
|
|
|
|
rc = self.daemon.stop()
|
|
|
|
|
2017-10-03 06:12:01 +02:00
|
|
|
self.daemon.save_log()
|
|
|
|
|
2017-09-28 18:01:09 +02:00
|
|
|
if rc != 0 and not self.may_fail:
|
2017-09-28 05:31:47 +02:00
|
|
|
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
|
|
|
|
else:
|
|
|
|
return rc
|
2017-12-13 13:44:09 +01:00
|
|
|
|
|
|
|
def restart(self, timeout=10, clean=True):
|
|
|
|
"""Stop and restart the lightning node.
|
|
|
|
|
|
|
|
Keyword arguments:
|
|
|
|
timeout: number of seconds to wait for a shutdown
|
|
|
|
clean: whether to issue a `stop` RPC command before killing
|
|
|
|
"""
|
|
|
|
if clean:
|
|
|
|
self.stop(timeout)
|
|
|
|
else:
|
|
|
|
self.daemon.stop()
|
|
|
|
|
|
|
|
self.daemon.start()
|
2018-05-03 14:44:14 +02:00
|
|
|
|
|
|
|
def fund_channel(self, l2, amount):
|
|
|
|
|
|
|
|
# Give yourself some funds to work with
|
|
|
|
addr = self.rpc.newaddr()['address']
|
|
|
|
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
|
|
|
|
numfunds = len(self.rpc.listfunds()['outputs'])
|
|
|
|
self.bitcoin.generate_block(1)
|
|
|
|
wait_for(lambda: len(self.rpc.listfunds()['outputs']) > numfunds)
|
|
|
|
|
|
|
|
# Now go ahead and open a channel
|
|
|
|
num_tx = len(self.bitcoin.rpc.getrawmempool())
|
|
|
|
tx = self.rpc.fundchannel(l2.info['id'], amount)['tx']
|
|
|
|
|
|
|
|
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
|
|
|
|
self.bitcoin.generate_block(1)
|
|
|
|
# We wait until gossipd sees local update, as well as status NORMAL,
|
|
|
|
# so it can definitely route through.
|
|
|
|
self.daemon.wait_for_logs(['update for channel .* now ACTIVE', 'to CHANNELD_NORMAL'])
|
|
|
|
l2.daemon.wait_for_logs(['update for channel .* now ACTIVE', 'to CHANNELD_NORMAL'])
|
|
|
|
|
|
|
|
# Hacky way to find our output.
|
|
|
|
decoded = self.bitcoin.rpc.decoderawtransaction(tx)
|
|
|
|
for out in decoded['vout']:
|
|
|
|
# Sometimes a float? Sometimes a decimal? WTF Python?!
|
|
|
|
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
|
|
|
|
if out['value'] == Decimal(amount) / 10**8 or out['value'] * 10**8 == amount:
|
|
|
|
return "{}:1:{}".format(self.bitcoin.rpc.getblockcount(), out['n'])
|
|
|
|
# Intermittent decoding failure. See if it decodes badly twice?
|
|
|
|
decoded2 = self.bitcoin.rpc.decoderawtransaction(tx)
|
|
|
|
raise ValueError("Can't find {} payment in {} (1={} 2={})".format(amount, tx, decoded, decoded2))
|