2017-01-21 14:10:55 +01:00
|
|
|
from bitcoin.rpc import RawProxy as BitcoinProxy
|
2017-01-14 20:32:08 +01:00
|
|
|
from lightning import LightningRpc
|
|
|
|
|
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import re
|
2017-08-14 15:59:35 +02:00
|
|
|
import sqlite3
|
2017-01-14 20:32:08 +01:00
|
|
|
import subprocess
|
|
|
|
import threading
|
|
|
|
import time
|
|
|
|
|
|
|
|
|
|
|
|
BITCOIND_CONFIG = {
|
|
|
|
"rpcuser": "rpcuser",
|
|
|
|
"rpcpassword": "rpcpass",
|
|
|
|
"rpcport": 18332,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
LIGHTNINGD_CONFIG = {
|
|
|
|
"bitcoind-poll": "1s",
|
|
|
|
"log-level": "debug",
|
|
|
|
"deadline-blocks": 5,
|
|
|
|
"min-htlc-expiry": 6,
|
|
|
|
"locktime-blocks": 6,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def write_config(filename, opts):
|
|
|
|
with open(filename, 'w') as f:
|
|
|
|
for k, v in opts.items():
|
|
|
|
f.write("{}={}\n".format(k, v))
|
|
|
|
|
|
|
|
|
|
|
|
class TailableProc(object):
|
|
|
|
"""A monitorable process that we can start, stop and tail.
|
|
|
|
|
|
|
|
This is the base class for the daemons. It allows us to directly
|
|
|
|
tail the processes and react to their output.
|
|
|
|
"""
|
|
|
|
|
2017-05-02 07:33:35 +02:00
|
|
|
def __init__(self, outputDir=None):
|
2017-01-14 20:32:08 +01:00
|
|
|
self.logs = []
|
|
|
|
self.logs_cond = threading.Condition(threading.RLock())
|
|
|
|
self.cmd_line = None
|
|
|
|
self.running = False
|
|
|
|
self.proc = None
|
2017-05-02 07:33:35 +02:00
|
|
|
self.outputDir = outputDir
|
2017-06-20 07:39:03 +02:00
|
|
|
self.logsearch_start = 0
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
def start(self):
|
|
|
|
"""Start the underlying process and start monitoring it.
|
|
|
|
"""
|
|
|
|
logging.debug("Starting '%s'", " ".join(self.cmd_line))
|
|
|
|
self.proc = subprocess.Popen(self.cmd_line, stdout=subprocess.PIPE)
|
2017-08-14 16:52:19 +02:00
|
|
|
self.thread = threading.Thread(target=self.tail)
|
|
|
|
self.thread.daemon = True
|
2017-01-14 20:32:08 +01:00
|
|
|
self.thread.start()
|
|
|
|
self.running = True
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
self.proc.terminate()
|
|
|
|
self.proc.kill()
|
2017-05-02 07:33:35 +02:00
|
|
|
if self.outputDir:
|
|
|
|
logpath = os.path.join(self.outputDir, 'log')
|
|
|
|
with open(logpath, 'w') as f:
|
|
|
|
for l in self.logs:
|
|
|
|
f.write(l + '\n')
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
def tail(self):
|
|
|
|
"""Tail the stdout of the process and remember it.
|
|
|
|
|
|
|
|
Stores the lines of output produced by the process in
|
|
|
|
self.logs and signals that a new line was read so that it can
|
|
|
|
be picked up by consumers.
|
|
|
|
"""
|
|
|
|
for line in iter(self.proc.stdout.readline, ''):
|
|
|
|
if len(line) == 0:
|
|
|
|
break
|
|
|
|
with self.logs_cond:
|
|
|
|
self.logs.append(str(line.rstrip()))
|
|
|
|
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
|
|
|
|
self.logs_cond.notifyAll()
|
|
|
|
self.running = False
|
2017-04-29 03:02:27 +02:00
|
|
|
|
|
|
|
def is_in_log(self, regex):
|
|
|
|
"""Look for `regex` in the logs."""
|
|
|
|
|
|
|
|
ex = re.compile(regex)
|
|
|
|
for l in self.logs:
|
|
|
|
if ex.search(l):
|
|
|
|
logging.debug("Found '%s' in logs", regex)
|
|
|
|
return True
|
|
|
|
|
|
|
|
logging.debug("Did not find '%s' in logs", regex)
|
|
|
|
return False
|
|
|
|
|
2017-07-01 09:10:18 +02:00
|
|
|
def wait_for_logs(self, regexs, timeout=60):
|
|
|
|
"""Look for `regexs` in the logs.
|
2017-01-14 20:32:08 +01:00
|
|
|
|
2017-07-01 09:10:18 +02:00
|
|
|
We tail the stdout of the process and look for each regex in `regexs`,
|
|
|
|
starting from last of the previous waited-for log entries (if any). We
|
2017-06-20 07:39:03 +02:00
|
|
|
fail if the timeout is exceeded or if the underlying process
|
2017-07-01 09:10:18 +02:00
|
|
|
exits before all the `regexs` were found.
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
"""
|
2017-07-01 09:10:18 +02:00
|
|
|
logging.debug("Waiting for {} in the logs".format(regexs))
|
|
|
|
exs = {re.compile(r) for r in regexs}
|
2017-01-14 20:32:08 +01:00
|
|
|
start_time = time.time()
|
2017-06-20 07:39:03 +02:00
|
|
|
pos = self.logsearch_start
|
2017-05-05 08:41:46 +02:00
|
|
|
initial_pos = len(self.logs)
|
2017-01-14 20:32:08 +01:00
|
|
|
while True:
|
|
|
|
if time.time() > start_time + timeout:
|
2017-07-01 09:10:18 +02:00
|
|
|
print("Can't find {} in logs".format(exs))
|
2017-05-05 08:41:46 +02:00
|
|
|
with self.logs_cond:
|
|
|
|
for i in range(initial_pos, len(self.logs)):
|
|
|
|
print(" " + self.logs[i])
|
2017-07-01 09:10:18 +02:00
|
|
|
for r in exs:
|
|
|
|
if self.is_in_log(r):
|
|
|
|
print("({} was previously in logs!)".format(r))
|
|
|
|
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
|
2017-01-14 20:32:08 +01:00
|
|
|
elif not self.running:
|
|
|
|
raise ValueError('Process died while waiting for logs')
|
|
|
|
|
|
|
|
with self.logs_cond:
|
|
|
|
if pos >= len(self.logs):
|
|
|
|
self.logs_cond.wait(1)
|
|
|
|
continue
|
|
|
|
|
2017-07-01 09:10:18 +02:00
|
|
|
for r in exs.copy():
|
|
|
|
if r.search(self.logs[pos]):
|
|
|
|
logging.debug("Found '%s' in logs", r)
|
|
|
|
exs.remove(r)
|
2017-06-20 07:39:03 +02:00
|
|
|
self.logsearch_start = pos+1
|
2017-07-01 09:10:18 +02:00
|
|
|
if len(exs) == 0:
|
2017-01-14 20:32:08 +01:00
|
|
|
return self.logs[pos]
|
|
|
|
pos += 1
|
|
|
|
|
2017-07-01 09:10:18 +02:00
|
|
|
def wait_for_log(self, regex, timeout=60):
|
|
|
|
"""Look for `regex` in the logs.
|
|
|
|
|
|
|
|
Convenience wrapper for the common case of only seeking a single entry.
|
|
|
|
"""
|
|
|
|
return self.wait_for_logs([regex], timeout)
|
2017-01-14 20:32:08 +01:00
|
|
|
|
2017-04-27 14:17:16 +02:00
|
|
|
class SimpleBitcoinProxy:
|
|
|
|
"""Wrapper for BitcoinProxy to reconnect.
|
|
|
|
|
|
|
|
Long wait times between calls to the Bitcoin RPC could result in
|
|
|
|
`bitcoind` closing the connection, so here we just create
|
|
|
|
throwaway connections. This is easier than to reach into the RPC
|
|
|
|
library to close, reopen and reauth upon failure.
|
|
|
|
"""
|
|
|
|
def __init__(self, url):
|
|
|
|
self.url = url
|
|
|
|
|
|
|
|
def __getattr__(self, name):
|
|
|
|
if name.startswith('__') and name.endswith('__'):
|
|
|
|
# Python internal stuff
|
|
|
|
raise AttributeError
|
|
|
|
|
|
|
|
# Create a callable to do the actual call
|
|
|
|
f = lambda *args: BitcoinProxy(self.url)._call(name, *args)
|
|
|
|
|
|
|
|
# Make debuggers show <function bitcoin.rpc.name> rather than <function
|
|
|
|
# bitcoin.rpc.<lambda>>
|
|
|
|
f.__name__ = name
|
|
|
|
return f
|
|
|
|
|
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
class BitcoinD(TailableProc):
|
|
|
|
|
|
|
|
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=18332):
|
2017-05-02 07:33:35 +02:00
|
|
|
TailableProc.__init__(self, bitcoin_dir)
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
self.bitcoin_dir = bitcoin_dir
|
|
|
|
self.rpcport = rpcport
|
|
|
|
self.prefix = 'bitcoind'
|
|
|
|
|
|
|
|
regtestdir = os.path.join(bitcoin_dir, 'regtest')
|
|
|
|
if not os.path.exists(regtestdir):
|
|
|
|
os.makedirs(regtestdir)
|
|
|
|
|
|
|
|
self.cmd_line = [
|
2017-01-24 05:07:56 +01:00
|
|
|
'bitcoind',
|
2017-01-14 20:32:08 +01:00
|
|
|
'-datadir={}'.format(bitcoin_dir),
|
|
|
|
'-printtoconsole',
|
|
|
|
'-server',
|
|
|
|
'-regtest',
|
|
|
|
'-logtimestamps',
|
|
|
|
'-nolisten',
|
|
|
|
]
|
|
|
|
BITCOIND_CONFIG['rpcport'] = rpcport
|
|
|
|
write_config(os.path.join(bitcoin_dir, 'bitcoin.conf'), BITCOIND_CONFIG)
|
|
|
|
write_config(os.path.join(regtestdir, 'bitcoin.conf'), BITCOIND_CONFIG)
|
2017-04-27 14:17:16 +02:00
|
|
|
self.rpc = SimpleBitcoinProxy(
|
2017-01-14 20:32:08 +01:00
|
|
|
"http://rpcuser:rpcpass@127.0.0.1:{}".format(self.rpcport))
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
TailableProc.start(self)
|
2017-02-08 15:12:08 +01:00
|
|
|
self.wait_for_log("Done loading", timeout=10)
|
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
logging.info("BitcoinD started")
|
|
|
|
|
|
|
|
|
|
|
|
class LightningD(TailableProc):
|
|
|
|
def __init__(self, lightning_dir, bitcoin_dir, port=9735):
|
2017-05-02 07:33:35 +02:00
|
|
|
TailableProc.__init__(self, lightning_dir)
|
2017-01-14 20:32:08 +01:00
|
|
|
self.lightning_dir = lightning_dir
|
|
|
|
self.port = port
|
|
|
|
self.cmd_line = [
|
2017-01-17 23:30:22 +01:00
|
|
|
'lightningd/lightningd',
|
2017-01-14 20:32:08 +01:00
|
|
|
'--bitcoin-datadir={}'.format(bitcoin_dir),
|
|
|
|
'--lightning-dir={}'.format(lightning_dir),
|
|
|
|
'--port={}'.format(port),
|
|
|
|
'--disable-irc',
|
2017-07-10 12:22:00 +02:00
|
|
|
'--network=regtest',
|
|
|
|
'--dev-broadcast-interval=1000',
|
2017-01-14 20:32:08 +01:00
|
|
|
]
|
|
|
|
|
|
|
|
self.cmd_line += ["--{}={}".format(k, v) for k, v in LIGHTNINGD_CONFIG.items()]
|
|
|
|
self.prefix = 'lightningd'
|
|
|
|
|
|
|
|
if not os.path.exists(lightning_dir):
|
|
|
|
os.makedirs(lightning_dir)
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
TailableProc.start(self)
|
2017-01-17 23:30:22 +01:00
|
|
|
self.wait_for_log("Creating IPv6 listener on port")
|
2017-01-14 20:32:08 +01:00
|
|
|
logging.info("LightningD started")
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
TailableProc.stop(self)
|
|
|
|
logging.info("LightningD stopped")
|
|
|
|
|
2017-01-17 23:30:22 +01:00
|
|
|
class LegacyLightningD(LightningD):
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
LightningD.__init__(self, *args, **kwargs)
|
|
|
|
self.cmd_line[0] = 'daemon/lightningd'
|
2017-07-10 12:22:00 +02:00
|
|
|
# Filter out non-legacy options
|
|
|
|
self.cmd_line = [c for c in self.cmd_line if '--network' not in c and '--dev-broadcast-interval' not in c]
|
2017-01-17 23:30:22 +01:00
|
|
|
|
|
|
|
def start(self):
|
|
|
|
TailableProc.start(self)
|
|
|
|
self.wait_for_log("Hello world!")
|
|
|
|
logging.info("LightningD started")
|
|
|
|
|
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
class LightningNode(object):
|
|
|
|
def __init__(self, daemon, rpc, btc, executor):
|
|
|
|
self.rpc = rpc
|
|
|
|
self.daemon = daemon
|
|
|
|
self.bitcoin = btc
|
|
|
|
self.executor = executor
|
|
|
|
|
2017-06-20 07:38:03 +02:00
|
|
|
# Use batch if you're doing more than one async.
|
2017-01-14 20:32:08 +01:00
|
|
|
def connect(self, remote_node, capacity, async=False):
|
|
|
|
# Collect necessary information
|
|
|
|
addr = self.rpc.newaddr()['address']
|
|
|
|
txid = self.bitcoin.rpc.sendtoaddress(addr, capacity)
|
|
|
|
tx = self.bitcoin.rpc.gettransaction(txid)
|
2017-06-20 07:38:03 +02:00
|
|
|
start_size = self.bitcoin.rpc.getmempoolinfo()['size']
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
def call_connect():
|
2017-03-18 16:16:35 +01:00
|
|
|
try:
|
|
|
|
self.rpc.connect('127.0.0.1', remote_node.daemon.port, tx['hex'], async=False)
|
|
|
|
except:
|
|
|
|
pass
|
2017-01-14 20:32:08 +01:00
|
|
|
t = threading.Thread(target=call_connect)
|
|
|
|
t.daemon = True
|
|
|
|
t.start()
|
|
|
|
|
|
|
|
def wait_connected():
|
2017-06-20 07:38:03 +02:00
|
|
|
# Up to 10 seconds to get tx into mempool.
|
|
|
|
start_time = time.time()
|
|
|
|
while self.bitcoin.rpc.getmempoolinfo()['size'] == start_size:
|
|
|
|
if time.time() > start_time + 10:
|
|
|
|
raise TimeoutError('No new transactions in mempool')
|
|
|
|
time.sleep(0.1)
|
|
|
|
|
2017-01-14 20:32:08 +01:00
|
|
|
self.bitcoin.rpc.generate(1)
|
|
|
|
|
|
|
|
#fut.result(timeout=5)
|
|
|
|
|
|
|
|
# Now wait for confirmation
|
2017-05-22 13:27:20 +02:00
|
|
|
self.daemon.wait_for_log("-> CHANNELD_NORMAL|STATE_NORMAL")
|
|
|
|
remote_node.daemon.wait_for_log("-> CHANNELD_NORMAL|STATE_NORMAL")
|
2017-01-14 20:32:08 +01:00
|
|
|
|
|
|
|
if async:
|
|
|
|
return self.executor.submit(wait_connected)
|
|
|
|
else:
|
|
|
|
return wait_connected()
|
2017-04-24 20:27:53 +02:00
|
|
|
|
|
|
|
def openchannel(self, remote_node, capacity):
|
|
|
|
addr = self.rpc.newaddr()['address']
|
|
|
|
txid = self.bitcoin.rpc.sendtoaddress(addr, capacity / 10**6)
|
|
|
|
tx = self.bitcoin.rpc.getrawtransaction(txid)
|
|
|
|
self.rpc.addfunds(tx)
|
|
|
|
self.rpc.fundchannel(remote_node.info['id'], capacity)
|
|
|
|
self.daemon.wait_for_log('sendrawtx exit 0, gave')
|
|
|
|
time.sleep(1)
|
|
|
|
self.bitcoin.rpc.generate(6)
|
2017-05-22 13:27:20 +02:00
|
|
|
self.daemon.wait_for_log('-> CHANNELD_NORMAL|STATE_NORMAL')
|
2017-04-24 20:27:53 +02:00
|
|
|
|
2017-08-14 15:59:35 +02:00
|
|
|
def db_query(self, query):
|
|
|
|
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3"))
|
|
|
|
db.row_factory = sqlite3.Row
|
|
|
|
c = db.cursor()
|
|
|
|
c.execute(query)
|
|
|
|
rows = c.fetchall()
|
|
|
|
|
|
|
|
result = []
|
|
|
|
for row in rows:
|
|
|
|
result.append(dict(zip(row.keys(), row)))
|
|
|
|
|
|
|
|
c.close()
|
|
|
|
db.close()
|
|
|
|
return result
|