mirror of
https://github.com/ACINQ/eclair.git
synced 2025-03-13 19:37:35 +01:00
Merge branch 'master' into android
This commit is contained in:
commit
7e8784e64b
42 changed files with 798 additions and 298 deletions
|
@ -4,7 +4,7 @@ services:
|
|||
dist: trusty
|
||||
language: scala
|
||||
scala:
|
||||
- 2.11.11
|
||||
- 2.11.12
|
||||
env:
|
||||
- export LD_LIBRARY_PATH=/usr/local/lib
|
||||
before_install:
|
||||
|
|
13
BUILD.md
13
BUILD.md
|
@ -24,3 +24,16 @@ To only build the `eclair-node` module
|
|||
$ mvn install -pl eclair-node -am -DskipTests
|
||||
```
|
||||
|
||||
# Building the API documentation
|
||||
|
||||
## Slate
|
||||
|
||||
The API doc is generated via slate and hosted on github pages. To make a change and update the doc follow the steps:
|
||||
|
||||
1. git checkout slate-doc
|
||||
2. Install your local dependencies for slate, more info [here](https://github.com/lord/slate#getting-started-with-slate)
|
||||
3. Edit `source/index.html.md` and save your changes.
|
||||
4. Commit all the changes to git, before deploying the repo should be clean.
|
||||
5. Push your commit to remote.
|
||||
6. Run `./deploy.sh`
|
||||
7. Wait a few minutes and the doc should be updated at https://acinq.github.io/eclair
|
40
OLD-API-DOCS.md
Normal file
40
OLD-API-DOCS.md
Normal file
|
@ -0,0 +1,40 @@
|
|||
## JSON-RPC API
|
||||
|
||||
:warning: Note this interface is being deprecated.
|
||||
|
||||
method | params | description
|
||||
------------- |----------------------------------------------------------------------------------------|-----------------------------------------------------------
|
||||
getinfo | | return basic node information (id, chain hash, current block height)
|
||||
connect | nodeId, host, port | open a secure connection to a lightning node
|
||||
connect | uri | open a secure connection to a lightning node
|
||||
open | nodeId, fundingSatoshis, pushMsat = 0, feerateSatPerByte = ?, channelFlags = 0x01 | open a channel with another lightning node, by default push = 0, feerate for the funding tx targets 6 blocks, and channel is announced
|
||||
updaterelayfee | channelId, feeBaseMsat, feeProportionalMillionths | update relay fee for payments going through this channel
|
||||
peers | | list existing local peers
|
||||
channels | | list existing local channels
|
||||
channels | nodeId | list existing local channels opened with a particular nodeId
|
||||
channel | channelId | retrieve detailed information about a given channel
|
||||
channelstats | | retrieves statistics about channel usage (fees, number and average amount of payments)
|
||||
allnodes | | list all known nodes
|
||||
allchannels | | list all known channels
|
||||
allupdates | | list all channels updates
|
||||
allupdates | nodeId | list all channels updates for this nodeId
|
||||
receive | description | generate a payment request without a required amount (can be useful for donations)
|
||||
receive | amountMsat, description | generate a payment request for a given amount
|
||||
receive | amountMsat, description, expirySeconds | generate a payment request for a given amount that expires after given number of seconds
|
||||
parseinvoice | paymentRequest | returns node, amount and payment hash in a payment request
|
||||
findroute | paymentRequest | returns nodes and channels of the route for this payment request if there is any
|
||||
findroute | paymentRequest, amountMsat | returns nodes and channels of the route for this payment request and amount, if there is any
|
||||
findroute | nodeId, amountMsat | returns nodes and channels of the route to the nodeId, if there is any
|
||||
send | amountMsat, paymentHash, nodeId | send a payment to a lightning node
|
||||
send | paymentRequest | send a payment to a lightning node using a BOLT11 payment request
|
||||
send | paymentRequest, amountMsat | send a payment to a lightning node using a BOLT11 payment request and a custom amount
|
||||
checkpayment | paymentHash | returns true if the payment has been received, false otherwise
|
||||
checkpayment | paymentRequest | returns true if the payment has been received, false otherwise
|
||||
close | channelId | close a channel
|
||||
close | channelId, scriptPubKey | close a channel and send the funds to the given scriptPubKey
|
||||
forceclose | channelId | force-close a channel by publishing the local commitment tx (careful: this is more expensive than a regular close and will incur a delay before funds are spendable)"
|
||||
audit | | list all send/received/relayed payments
|
||||
audit | from, to | list send/received/relayed payments in that interval (from <= timestamp < to)
|
||||
networkfees | | list all network fees paid to the miners, by transaction
|
||||
networkfees |from, to | list network fees paid to the miners, by transaction, in that interval (from <= timestamp < to)
|
||||
help | | display available methods
|
51
README.md
51
README.md
|
@ -4,7 +4,7 @@
|
|||
[](LICENSE)
|
||||
[](https://gitter.im/ACINQ/eclair)
|
||||
|
||||
**Eclair** (French for Lightning) is a Scala implementation of the Lightning Network. It can run with or without a GUI, and a JSON-RPC API is also available.
|
||||
**Eclair** (French for Lightning) is a Scala implementation of the Lightning Network. It can run with or without a GUI, and a JSON API is also available.
|
||||
|
||||
This software follows the [Lightning Network Specifications (BOLTs)](https://github.com/lightningnetwork/lightning-rfc). Other implementations include [c-lightning](https://github.com/ElementsProject/lightning) and [lnd](https://github.com/LightningNetwork/lnd).
|
||||
|
||||
|
@ -14,7 +14,7 @@ This software follows the [Lightning Network Specifications (BOLTs)](https://git
|
|||
|
||||
:rotating_light: If you intend to run Eclair on mainnet:
|
||||
- Keep in mind that it is beta-quality software and **don't put too much money** in it
|
||||
- Eclair's JSON-RPC API should **NOT** be accessible from the outside world (similarly to Bitcoin Core API)
|
||||
- Eclair's JSON API should **NOT** be accessible from the outside world (similarly to Bitcoin Core API)
|
||||
- Specific [configuration instructions for mainnet](#mainnet-usage) are provided below (by default Eclair runs on testnet)
|
||||
|
||||
---
|
||||
|
@ -26,6 +26,14 @@ Please see the latest [release note](https://github.com/ACINQ/eclair/releases) f
|
|||
|
||||

|
||||
|
||||
## JSON API
|
||||
|
||||
Eclair offers a feature rich HTTP API that enables application developers to easily integrate.
|
||||
|
||||
For more information please visit the [API documentation website](https://acinq.github.io/eclair).
|
||||
|
||||
:warning: You can still use the old API by setting the `eclair.api.use-old-api=true` parameter, but it is now deprecated and will soon be removed. The old documentation is still available [here](OLD-API-DOCS.md).
|
||||
|
||||
## Installation
|
||||
|
||||
### Configuring Bitcoin Core
|
||||
|
@ -128,45 +136,6 @@ Eclair uses [`logback`](https://logback.qos.ch) for logging. To use a different
|
|||
java -Dlogback.configurationFile=/path/to/logback-custom.xml -jar eclair-node-gui-<version>-<commit_id>.jar
|
||||
```
|
||||
|
||||
## JSON-RPC API
|
||||
|
||||
method | params | description
|
||||
------------- |----------------------------------------------------------------------------------------|-----------------------------------------------------------
|
||||
getinfo | | return basic node information (id, chain hash, current block height)
|
||||
connect | nodeId, host, port | open a secure connection to a lightning node
|
||||
connect | uri | open a secure connection to a lightning node
|
||||
open | nodeId, fundingSatoshis, pushMsat = 0, feerateSatPerByte = ?, channelFlags = 0x01 | open a channel with another lightning node, by default push = 0, feerate for the funding tx targets 6 blocks, and channel is announced
|
||||
updaterelayfee | channelId, feeBaseMsat, feeProportionalMillionths | update relay fee for payments going through this channel
|
||||
peers | | list existing local peers
|
||||
channels | | list existing local channels
|
||||
channels | nodeId | list existing local channels opened with a particular nodeId
|
||||
channel | channelId | retrieve detailed information about a given channel
|
||||
channelstats | | retrieves statistics about channel usage (fees, number and average amount of payments)
|
||||
allnodes | | list all known nodes
|
||||
allchannels | | list all known channels
|
||||
allupdates | | list all channels updates
|
||||
allupdates | nodeId | list all channels updates for this nodeId
|
||||
receive | description | generate a payment request without a required amount (can be useful for donations)
|
||||
receive | amountMsat, description | generate a payment request for a given amount
|
||||
receive | amountMsat, description, expirySeconds | generate a payment request for a given amount that expires after given number of seconds
|
||||
parseinvoice | paymentRequest | returns node, amount and payment hash in a payment request
|
||||
findroute | paymentRequest | returns nodes and channels of the route for this payment request if there is any
|
||||
findroute | paymentRequest, amountMsat | returns nodes and channels of the route for this payment request and amount, if there is any
|
||||
findroute | nodeId, amountMsat | returns nodes and channels of the route to the nodeId, if there is any
|
||||
send | amountMsat, paymentHash, nodeId | send a payment to a lightning node
|
||||
send | paymentRequest | send a payment to a lightning node using a BOLT11 payment request
|
||||
send | paymentRequest, amountMsat | send a payment to a lightning node using a BOLT11 payment request and a custom amount
|
||||
checkpayment | paymentHash | returns true if the payment has been received, false otherwise
|
||||
checkpayment | paymentRequest | returns true if the payment has been received, false otherwise
|
||||
close | channelId | close a channel
|
||||
close | channelId, scriptPubKey | close a channel and send the funds to the given scriptPubKey
|
||||
forceclose | channelId | force-close a channel by publishing the local commitment tx (careful: this is more expensive than a regular close and will incur a delay before funds are spendable)"
|
||||
audit | | list all send/received/relayed payments
|
||||
audit | from, to | list send/received/relayed payments in that interval (from <= timestamp < to)
|
||||
networkfees | | list all network fees paid to the miners, by transaction
|
||||
networkfees |from, to | list network fees paid to the miners, by transaction, in that interval (from <= timestamp < to)
|
||||
help | | display available methods
|
||||
|
||||
## Docker
|
||||
|
||||
A [Dockerfile](Dockerfile) image is built on each commit on [docker hub](https://hub.docker.com/r/acinq/eclair) for running a dockerized eclair-node.
|
||||
|
|
|
@ -21,7 +21,7 @@ _eclair-cli()
|
|||
*)
|
||||
# works fine, but is too slow at the moment.
|
||||
# allopts=$($eclaircli help 2>&1 | awk '$1 ~ /^"/ { sub(/,/, ""); print $1}' | sed 's/[":]//g')
|
||||
allopts="connect open peers channels channel allnodes allchannels allupdates receive send close audit findroute updaterelayfee parseinvoice forceclose networkfees channelstats checkpayment getinfo help"
|
||||
allopts="getinfo connect open close forceclose updaterelayfee peers channels channel allnodes allchannels allupdates receive parseinvoice findroute findroutetonode send sendtonode checkpayment audit networkfees channelstats"
|
||||
|
||||
if ! [[ " $allopts " =~ " $prev " ]]; then # prevent double arguments
|
||||
if [[ -z "$cur" || "$cur" =~ ^[a-z] ]]; then
|
||||
|
|
|
@ -1,104 +1,119 @@
|
|||
#!/bin/bash
|
||||
|
||||
# default script values, can be overriden for convenience.
|
||||
api_url='http://localhost:8080'
|
||||
# uncomment the line below if you don't want to provide a password each time you call eclair-cli
|
||||
# api_password='your_api_password'
|
||||
# for some commands the json output can be shortened for better readability
|
||||
short=false
|
||||
|
||||
# prints help message
|
||||
usage() {
|
||||
echo -e "==============================
|
||||
Command line client for eclair
|
||||
==============================
|
||||
|
||||
This tool requires the eclair node's API to be enabled and listening
|
||||
on <$api_url>.
|
||||
|
||||
Usage
|
||||
-----
|
||||
\e[93meclair-cli\e[39m [\e[93mOPTIONS\e[39m]... [\e[93mCOMMAND\e[39m] [--command-param command-value]...
|
||||
|
||||
where OPTIONS can be:
|
||||
-p <password> API's password
|
||||
-a <address> Override the API URL with <address>
|
||||
-h Show available commands
|
||||
|
||||
and COMMAND is one of:
|
||||
getinfo, connect, open, close, forceclose, updaterelayfee,
|
||||
peers, channels, channel, allnodes, allchannels, allupdates,
|
||||
receive, parseinvoice, findroute, findroutetonode,
|
||||
send, sendtonode, checkpayment,
|
||||
audit, networkfees, channelstats
|
||||
|
||||
Examples
|
||||
--------
|
||||
eclair-cli help display available commands
|
||||
eclair-cli -a localhost:1234 peers list the peers of a node hosted on localhost:1234
|
||||
eclair-cli close --channelId 006fb... closes the channel with id 006fb...
|
||||
|
||||
|
||||
Full documentation here: <https://acinq.github.io/apidoc>" 1>&2;
|
||||
exit 1;
|
||||
}
|
||||
|
||||
# -- script's logic begins here
|
||||
|
||||
# Check if jq is installed. If not, display instructions and abort program
|
||||
command -v jq >/dev/null 2>&1 || { echo -e "This tool requires jq.\nFor installation instructions, visit https://stedolan.github.io/jq/download/.\n\nAborting..."; exit 1; }
|
||||
|
||||
# curl installed? If not, give a hint
|
||||
command -v curl >/dev/null 2>&1 || { echo -e "This tool requires curl.\n\nAborting..."; exit 1; }
|
||||
|
||||
FULL_OUTPUT='false'
|
||||
URL='http://localhost:8080'
|
||||
PASSWORD=''
|
||||
|
||||
# -------------------- METHODS
|
||||
|
||||
displayhelp() {
|
||||
echo -e "Usage: eclair-cli [OPTION]... [COMMAND]
|
||||
Client for an eclair node.
|
||||
|
||||
With COMMAND is one of the command listed by \e[01;33meclair-cli help\e[0m.
|
||||
|
||||
-p <password> api's password
|
||||
-a <address> Override the api URL with <address>
|
||||
-v Outputs full json returned by the API
|
||||
|
||||
Examples:
|
||||
eclair-cli -a localhost:1234 peers list the peers
|
||||
eclair-cli close 006fb... closes the channel with id 006fb...
|
||||
|
||||
Note: Uses the json-rpc api exposed by the node on localhost:8080. Make sure the api is enabled.
|
||||
Full documentation at: <https://github.com/ACINQ/eclair>"
|
||||
}
|
||||
|
||||
# Executes a JSON RPC call to a node listening on ${URL}
|
||||
call() {
|
||||
jqexp='if .error == null then .result else .error.message end'
|
||||
# override default jq parsing expression
|
||||
if [ $# -ge 3 ] && [ ${FULL_OUTPUT} == "false" ]; then jqexp=${3}; fi
|
||||
# set password
|
||||
if [ -z ${PASSWORD} ]; then auth="eclair-cli";
|
||||
else auth="eclair-cli:"${PASSWORD}; fi
|
||||
eval curl "--user ${auth} --silent --show-error -X POST -H \"Content-Type: application/json\" -d '{ \"method\": \"'${1}'\", \"params\": '${2}' }' ${URL}" | jq -r "$jqexp"
|
||||
}
|
||||
|
||||
# get script options
|
||||
while getopts 'vu:p:a:' flag; do
|
||||
case "${flag}" in
|
||||
p) PASSWORD="${OPTARG}" ;;
|
||||
a) URL="${OPTARG}" ;;
|
||||
v) FULL_OUTPUT="true" ;;
|
||||
*) echo -e "\nAborting..."; exit 1; ;;
|
||||
esac
|
||||
# extract script options
|
||||
while getopts ':cu:su:p:a:hu:' flag; do
|
||||
case "${flag}" in
|
||||
p) api_password="${OPTARG}" ;;
|
||||
a) api_url="${OPTARG}" ;;
|
||||
h) usage ;;
|
||||
s) short=true ;;
|
||||
*) ;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $(($OPTIND - 1))
|
||||
|
||||
# assigning JSON RPC method and params values from arguments
|
||||
METHOD=${1}
|
||||
# extract api's endpoint (e.g. sendpayment, connect, ...) from params
|
||||
api_endpoint=${1}
|
||||
shift 1
|
||||
|
||||
# Create a JSON Array containing the remaining program args as QUOTED STRINGS, separated with a `,` character
|
||||
PARAMS=""
|
||||
i=1
|
||||
# display a usage method if no method given or help requested
|
||||
if [ -z $api_endpoint ] || [ "$api_endpoint" == "help" ]; then
|
||||
usage;
|
||||
fi
|
||||
|
||||
# transform long options into a HTTP encoded url body.
|
||||
api_payload=""
|
||||
index=1
|
||||
for arg in "${@}"; do
|
||||
if [ $i -eq 1 ]; then PARAMS=$(printf '"%s"' "$arg");
|
||||
else PARAMS=$(printf '%s,"%s"' "$PARAMS" "$arg");
|
||||
fi
|
||||
let "i++"
|
||||
transformed_arg="";
|
||||
case ${arg} in
|
||||
"--"*) # if arg begins with two dashes, it is the name of a parameter. Dashes must be removed, and arg must be followed by an equal sign
|
||||
# also, it must be prefixed by an '&' sign, if it is not the first argument
|
||||
if [ $index -eq 1 ]; then
|
||||
transformed_arg="$transformed_arg${arg:2}=";
|
||||
else
|
||||
transformed_arg="&$transformed_arg${arg:2}=";
|
||||
fi
|
||||
;;
|
||||
*) transformed_arg=$arg
|
||||
;;
|
||||
esac
|
||||
api_payload="$api_payload$transformed_arg";
|
||||
let "index++"
|
||||
done;
|
||||
PARAMS="[${PARAMS}]"
|
||||
|
||||
# Whatever the arguments provided to eclair-cli, a call to the API will be sent. Let it fail!
|
||||
case ${METHOD}_${#} in
|
||||
""_*) displayhelp ;;
|
||||
"help"*) displayhelp
|
||||
echo -e "\nAvailable commands:\n"
|
||||
call "help" [] ;;
|
||||
# jq filter parses response body for error message
|
||||
jq_filter='if type=="object" and .error != null then .error else .';
|
||||
|
||||
"connect_3") call ${METHOD} "'$(printf '["%s","%s",%s]' "${1}" "${2}" "${3}")'" ;; # ${3} is numeric
|
||||
# apply special jq filter if we are in "short" ouput mode -- only for specific commands such as 'channels'
|
||||
if [ "$short" = true ]; then
|
||||
jq_channel_filter="{ nodeId, shortChannelId: .data.shortChannelId, channelId, state, balanceSat: (try (.data.commitments.localCommit.spec.toLocalMsat / 1000 | floor) catch null), capacitySat: .data.commitments.commitInput.amountSatoshis, channelPoint: .data.commitments.commitInput.outPoint }";
|
||||
case $api_endpoint in
|
||||
"channels") jq_filter="$jq_filter | map( $jq_channel_filter )" ;;
|
||||
"channel") jq_filter="$jq_filter | $jq_channel_filter" ;;
|
||||
*) ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
"open_4") call ${METHOD} "'$(printf '["%s",%s,%s,%s]' "${1}" "${2}" "${3}" "${4}")'" ;; # ${2} ${3} ${4} are numeric (funding, push, flags)
|
||||
"open_3") call ${METHOD} "'$(printf '["%s",%s,%s]' "${1}" "${2}" "${3}")'" ;; # ${2} ${3} are numeric (funding, push)
|
||||
"open_2") call ${METHOD} "'$(printf '["%s",%s]' "${1}" "${2}")'" ;; # ${2} is numeric (funding)
|
||||
jq_filter="$jq_filter end";
|
||||
|
||||
"receive_2") call ${METHOD} "'$(printf '[%s,"%s"]' "${1}" "${2}")'" ;; # ${1} is numeric (amount to receive)
|
||||
"receive_3") call ${METHOD} "'$(printf '[%s,"%s",%s]' "${1}" "${2}" "${3}")'" ;; # ${1} is numeric (amount to receive) as is ${2} for expiry in seconds
|
||||
# if no password is provided, auth should only contain user login so that curl prompts for the api password
|
||||
if [ -z $api_password ]; then
|
||||
auth="eclair-cli";
|
||||
else
|
||||
auth="eclair-cli:$api_password";
|
||||
fi
|
||||
|
||||
"channel_"*) call ${METHOD} "'${PARAMS}'" "if .error != null then .error.message else .result | { nodeId, shortChannelId: .data.shortChannelId, channelId, state, balanceSat: (try (.data.commitments.localCommit.spec.toLocalMsat / 1000 | floor) catch null), capacitySat: .data.commitments.commitInput.amountSatoshis, channelPoint: .data.commitments.commitInput.outPoint } end" ;;
|
||||
|
||||
"channels_"*) call ${METHOD} "'${PARAMS}'" "if .error != null then .error.message else .result | map( { nodeId, shortChannelId: .data.shortChannelId, channelId, state, balanceSat: (try (.data.commitments.localCommit.spec.toLocalMsat / 1000 | floor) catch null), capacitySat: .data.commitments.commitInput.amountSatoshis, channelPoint: .data.commitments.commitInput.outPoint } ) end" ;;
|
||||
|
||||
"send_3") call ${METHOD} "'$(printf '[%s,"%s","%s"]' "${1}" "${2}" "${3}")'" ;; # ${1} is numeric (amount of the payment)
|
||||
"send_2") call ${METHOD} "'$(printf '["%s",%s]' "${1}" "${2}")'" ;; # ${2} is numeric (amount overriding the payment request)
|
||||
|
||||
"audit_2") call ${METHOD} "'$(printf '[%s,%s]' "${1}" "${2}")'" ;; # ${1} and ${2} are numeric (unix timestamps)
|
||||
|
||||
"networkfees_2") call ${METHOD} "'$(printf '[%s,%s]' "${1}" "${2}")'" ;; # ${1} and ${2} are numeric (unix timestamps)
|
||||
|
||||
*) # Default case.
|
||||
# Sends the method and, for parameters, use the JSON table containing the remaining args.
|
||||
#
|
||||
# NOTE: Arguments will be sent as QUOTED STRING so if this particular API call requires an INT param,
|
||||
# this call will fail. In that case, a specific rule for that method MUST be set and the ${PARAMS} JSON array can not be used.
|
||||
call ${METHOD} "'${PARAMS}'" ;;
|
||||
|
||||
esac
|
||||
# we're now ready to execute the API call
|
||||
eval curl "--user $auth --silent --show-error -X POST -H \"Content-Type: application/x-www-form-urlencoded\" -d '$api_payload' $api_url/$api_endpoint" | jq -r "$jq_filter"
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
<parent>
|
||||
<groupId>fr.acinq.eclair</groupId>
|
||||
<artifactId>eclair_2.11</artifactId>
|
||||
<version>0.2-android-SNAPSHOT</version>
|
||||
<version>0.3-android-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>eclair-core_2.11</artifactId>
|
||||
|
|
|
@ -195,12 +195,6 @@
|
|||
"t": "50001",
|
||||
"version": "1.4"
|
||||
},
|
||||
"electrum3.hachre.de": {
|
||||
"pruning": "-",
|
||||
"s": "50002",
|
||||
"t": "50001",
|
||||
"version": "1.4"
|
||||
},
|
||||
"electrumx.bot.nu": {
|
||||
"pruning": "-",
|
||||
"s": "50002",
|
||||
|
@ -317,12 +311,6 @@
|
|||
"t": "50001",
|
||||
"version": "1.4"
|
||||
},
|
||||
"oneweek.duckdns.org": {
|
||||
"pruning": "-",
|
||||
"s": "50002",
|
||||
"t": "50001",
|
||||
"version": "1.4"
|
||||
},
|
||||
"orannis.com": {
|
||||
"pruning": "-",
|
||||
"s": "50002",
|
||||
|
|
|
@ -13,6 +13,7 @@ eclair {
|
|||
binding-ip = "127.0.0.1"
|
||||
port = 8080
|
||||
password = "" // password for basic auth, must be non empty if json-rpc api is enabled
|
||||
use-old-api = false
|
||||
}
|
||||
|
||||
watcher-type = "electrum"
|
||||
|
|
|
@ -22,13 +22,14 @@ import java.net.InetSocketAddress
|
|||
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, PoisonPill, Props, ReceiveTimeout, SupervisorStrategy}
|
||||
import com.typesafe.config.{Config, ConfigFactory}
|
||||
import fr.acinq.bitcoin.Crypto.PrivateKey
|
||||
import fr.acinq.bitcoin.{ByteVector32, Block}
|
||||
import fr.acinq.bitcoin.{Block, ByteVector32}
|
||||
import fr.acinq.eclair.NodeParams.ELECTRUM
|
||||
import fr.acinq.eclair.blockchain.electrum.ElectrumClient.{SSL, computeScriptHash}
|
||||
import fr.acinq.eclair.blockchain.electrum.ElectrumClientPool.ElectrumServerAddress
|
||||
import fr.acinq.eclair.blockchain.electrum.{ElectrumClient, ElectrumClientPool}
|
||||
import fr.acinq.eclair.channel._
|
||||
import fr.acinq.eclair.crypto.LocalKeyManager
|
||||
import fr.acinq.eclair.db.Databases
|
||||
import grizzled.slf4j.Logging
|
||||
|
||||
import scala.concurrent.{ExecutionContext, Future, Promise}
|
||||
|
@ -40,7 +41,8 @@ import scala.concurrent.{ExecutionContext, Future, Promise}
|
|||
* @param overrideDefaults use this parameter to programmatically override the node configuration.
|
||||
*/
|
||||
class CheckElectrumSetup(datadir: File,
|
||||
overrideDefaults: Config = ConfigFactory.empty())(implicit system: ActorSystem) extends Logging {
|
||||
overrideDefaults: Config = ConfigFactory.empty(),
|
||||
db: Option[Databases] = None)(implicit system: ActorSystem) extends Logging {
|
||||
|
||||
logger.info(s"hello!")
|
||||
logger.info(s"version=${getClass.getPackage.getImplementationVersion} commit=${getClass.getPackage.getSpecificationVersion}")
|
||||
|
@ -51,7 +53,12 @@ class CheckElectrumSetup(datadir: File,
|
|||
val config = NodeParams.loadConfiguration(datadir, overrideDefaults)
|
||||
val chain = config.getString("chain")
|
||||
val keyManager = new LocalKeyManager(PrivateKey(randomBytes(32), compressed = true).toBin, NodeParams.makeChainHash(chain))
|
||||
val nodeParams = NodeParams.makeNodeParams(datadir, config, keyManager, torAddress_opt = None)
|
||||
val database = db match {
|
||||
case Some(d) => d
|
||||
case None => Databases.sqliteJDBC(new File(datadir, chain))
|
||||
}
|
||||
|
||||
val nodeParams = NodeParams.makeNodeParams(config, keyManager, None, database)
|
||||
|
||||
logger.info(s"nodeid=${nodeParams.nodeId} alias=${nodeParams.alias}")
|
||||
logger.info(s"using chain=$chain chainHash=${nodeParams.chainHash}")
|
||||
|
@ -63,7 +70,7 @@ class CheckElectrumSetup(datadir: File,
|
|||
* false if at least one tx has been spent
|
||||
*/
|
||||
def check: Future[WatchListener.WatchResult] = {
|
||||
val channels = nodeParams.channelsDb.listChannels()
|
||||
val channels = nodeParams.db.channels.listLocalChannels()
|
||||
if (channels.isEmpty) {
|
||||
Future.successful(WatchListener.Ok)
|
||||
} else {
|
||||
|
|
|
@ -28,7 +28,7 @@ object DBCompatChecker extends Logging {
|
|||
* @param nodeParams
|
||||
*/
|
||||
def checkDBCompatibility(nodeParams: NodeParams): Unit =
|
||||
Try(nodeParams.channelsDb.listChannels()) match {
|
||||
Try(nodeParams.db.channels.listLocalChannels()) match {
|
||||
case Success(_) => {}
|
||||
case Failure(_) => throw IncompatibleDBException
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ object DBCompatChecker extends Logging {
|
|||
* @param nodeParams
|
||||
*/
|
||||
def checkNetworkDBCompatibility(nodeParams: NodeParams): Unit =
|
||||
Try(nodeParams.networkDb.listChannels(), nodeParams.networkDb.listNodes(), nodeParams.networkDb.listChannelUpdates()) match {
|
||||
Try(nodeParams.db.network.listChannels(), nodeParams.db.network.listNodes(), nodeParams.db.network.listChannelUpdates()) match {
|
||||
case Success(_) => {}
|
||||
case Failure(_) => throw IncompatibleNetworkDBException
|
||||
}
|
||||
|
|
196
eclair-core/src/main/scala/fr/acinq/eclair/Eclair.scala
Normal file
196
eclair-core/src/main/scala/fr/acinq/eclair/Eclair.scala
Normal file
|
@ -0,0 +1,196 @@
|
|||
package fr.acinq.eclair
|
||||
|
||||
import akka.actor.ActorRef
|
||||
import akka.pattern._
|
||||
import akka.util.Timeout
|
||||
import fr.acinq.bitcoin.Crypto.PublicKey
|
||||
import fr.acinq.bitcoin.{ByteVector32, MilliSatoshi, Satoshi}
|
||||
import fr.acinq.eclair.channel._
|
||||
import fr.acinq.eclair.db.{NetworkFee, Stats}
|
||||
import fr.acinq.eclair.io.Peer.{GetPeerInfo, PeerInfo}
|
||||
import fr.acinq.eclair.io.{NodeURI, Peer}
|
||||
import fr.acinq.eclair.payment.PaymentLifecycle._
|
||||
import fr.acinq.eclair.payment.{PaymentLifecycle, PaymentReceived, PaymentRelayed, PaymentRequest, PaymentSent}
|
||||
import fr.acinq.eclair.router.{ChannelDesc, RouteRequest, RouteResponse}
|
||||
import fr.acinq.eclair.wire.{ChannelAnnouncement, ChannelUpdate, NodeAddress, NodeAnnouncement}
|
||||
import scodec.bits.ByteVector
|
||||
|
||||
import scala.concurrent.Future
|
||||
import scala.concurrent.duration._
|
||||
|
||||
case class GetInfoResponse(nodeId: PublicKey, alias: String, chainHash: ByteVector32, blockHeight: Int, publicAddresses: Seq[NodeAddress])
|
||||
|
||||
case class AuditResponse(sent: Seq[PaymentSent], received: Seq[PaymentReceived], relayed: Seq[PaymentRelayed])
|
||||
|
||||
trait Eclair {
|
||||
|
||||
def connect(uri: String): Future[String]
|
||||
|
||||
def open(nodeId: PublicKey, fundingSatoshis: Long, pushMsat: Option[Long], fundingFeerateSatByte: Option[Long], flags: Option[Int]): Future[String]
|
||||
|
||||
def close(channelIdentifier: Either[ByteVector32, ShortChannelId], scriptPubKey: Option[ByteVector]): Future[String]
|
||||
|
||||
def forceClose(channelIdentifier: Either[ByteVector32, ShortChannelId]): Future[String]
|
||||
|
||||
def updateRelayFee(channelId: String, feeBaseMsat: Long, feeProportionalMillionths: Long): Future[String]
|
||||
|
||||
def peersInfo(): Future[Iterable[PeerInfo]]
|
||||
|
||||
def channelsInfo(toRemoteNode: Option[PublicKey]): Future[Iterable[RES_GETINFO]]
|
||||
|
||||
def channelInfo(channelId: ByteVector32): Future[RES_GETINFO]
|
||||
|
||||
def allnodes(): Future[Iterable[NodeAnnouncement]]
|
||||
|
||||
def allchannels(): Future[Iterable[ChannelDesc]]
|
||||
|
||||
def allupdates(nodeId: Option[PublicKey]): Future[Iterable[ChannelUpdate]]
|
||||
|
||||
def receive(description: String, amountMsat: Option[Long], expire: Option[Long]): Future[String]
|
||||
|
||||
def findRoute(targetNodeId: PublicKey, amountMsat: Long, assistedRoutes: Seq[Seq[PaymentRequest.ExtraHop]] = Seq.empty): Future[RouteResponse]
|
||||
|
||||
def send(recipientNodeId: PublicKey, amountMsat: Long, paymentHash: ByteVector32, assistedRoutes: Seq[Seq[PaymentRequest.ExtraHop]] = Seq.empty, minFinalCltvExpiry: Option[Long] = None): Future[PaymentResult]
|
||||
|
||||
def checkpayment(paymentHash: ByteVector32): Future[Boolean]
|
||||
|
||||
def audit(from_opt: Option[Long], to_opt: Option[Long]): Future[AuditResponse]
|
||||
|
||||
def networkFees(from_opt: Option[Long], to_opt: Option[Long]): Future[Seq[NetworkFee]]
|
||||
|
||||
def channelStats(): Future[Seq[Stats]]
|
||||
|
||||
def getInfoResponse(): Future[GetInfoResponse]
|
||||
|
||||
}
|
||||
|
||||
class EclairImpl(appKit: Kit) extends Eclair {
|
||||
|
||||
implicit val ec = appKit.system.dispatcher
|
||||
implicit val timeout = Timeout(60 seconds) // used by akka ask
|
||||
|
||||
override def connect(uri: String): Future[String] = {
|
||||
(appKit.switchboard ? Peer.Connect(NodeURI.parse(uri))).mapTo[String]
|
||||
}
|
||||
|
||||
override def open(nodeId: PublicKey, fundingSatoshis: Long, pushMsat: Option[Long], fundingFeerateSatByte: Option[Long], flags: Option[Int]): Future[String] = {
|
||||
(appKit.switchboard ? Peer.OpenChannel(
|
||||
remoteNodeId = nodeId,
|
||||
fundingSatoshis = Satoshi(fundingSatoshis),
|
||||
pushMsat = pushMsat.map(MilliSatoshi).getOrElse(MilliSatoshi(0)),
|
||||
fundingTxFeeratePerKw_opt = fundingFeerateSatByte,
|
||||
channelFlags = flags.map(_.toByte))).mapTo[String]
|
||||
}
|
||||
|
||||
override def close(channelIdentifier: Either[ByteVector32, ShortChannelId], scriptPubKey: Option[ByteVector]): Future[String] = {
|
||||
sendToChannel(channelIdentifier.fold[String](_.toString(), _.toString()), CMD_CLOSE(scriptPubKey)).mapTo[String]
|
||||
}
|
||||
|
||||
override def forceClose(channelIdentifier: Either[ByteVector32, ShortChannelId]): Future[String] = {
|
||||
sendToChannel(channelIdentifier.fold[String](_.toString(), _.toString()), CMD_FORCECLOSE).mapTo[String]
|
||||
}
|
||||
|
||||
override def updateRelayFee(channelId: String, feeBaseMsat: Long, feeProportionalMillionths: Long): Future[String] = {
|
||||
sendToChannel(channelId, CMD_UPDATE_RELAY_FEE(feeBaseMsat, feeProportionalMillionths)).mapTo[String]
|
||||
}
|
||||
|
||||
override def peersInfo(): Future[Iterable[PeerInfo]] = for {
|
||||
peers <- (appKit.switchboard ? 'peers).mapTo[Iterable[ActorRef]]
|
||||
peerinfos <- Future.sequence(peers.map(peer => (peer ? GetPeerInfo).mapTo[PeerInfo]))
|
||||
} yield peerinfos
|
||||
|
||||
override def channelsInfo(toRemoteNode: Option[PublicKey]): Future[Iterable[RES_GETINFO]] = toRemoteNode match {
|
||||
case Some(pk) => for {
|
||||
channelsId <- (appKit.register ? 'channelsTo).mapTo[Map[ByteVector32, PublicKey]].map(_.filter(_._2 == pk).keys)
|
||||
channels <- Future.sequence(channelsId.map(channelId => sendToChannel(channelId.toString(), CMD_GETINFO).mapTo[RES_GETINFO]))
|
||||
} yield channels
|
||||
case None => for {
|
||||
channels_id <- (appKit.register ? 'channels).mapTo[Map[ByteVector32, ActorRef]].map(_.keys)
|
||||
channels <- Future.sequence(channels_id.map(channel_id => sendToChannel(channel_id.toHex, CMD_GETINFO).mapTo[RES_GETINFO]))
|
||||
} yield channels
|
||||
}
|
||||
|
||||
override def channelInfo(channelId: ByteVector32): Future[RES_GETINFO] = {
|
||||
sendToChannel(channelId.toString(), CMD_GETINFO).mapTo[RES_GETINFO]
|
||||
}
|
||||
|
||||
override def allnodes(): Future[Iterable[NodeAnnouncement]] = (appKit.router ? 'nodes).mapTo[Iterable[NodeAnnouncement]]
|
||||
|
||||
override def allchannels(): Future[Iterable[ChannelDesc]] = {
|
||||
(appKit.router ? 'channels).mapTo[Iterable[ChannelAnnouncement]].map(_.map(c => ChannelDesc(c.shortChannelId, c.nodeId1, c.nodeId2)))
|
||||
}
|
||||
|
||||
override def allupdates(nodeId: Option[PublicKey]): Future[Iterable[ChannelUpdate]] = nodeId match {
|
||||
case None => (appKit.router ? 'updates).mapTo[Iterable[ChannelUpdate]]
|
||||
case Some(pk) => (appKit.router ? 'updatesMap).mapTo[Map[ChannelDesc, ChannelUpdate]].map(_.filter(e => e._1.a == pk || e._1.b == pk).values)
|
||||
}
|
||||
|
||||
override def receive(description: String, amountMsat: Option[Long], expire: Option[Long]): Future[String] = {
|
||||
(appKit.paymentHandler ? ReceivePayment(description = description, amountMsat_opt = amountMsat.map(MilliSatoshi), expirySeconds_opt = expire)).mapTo[PaymentRequest].map { pr =>
|
||||
PaymentRequest.write(pr)
|
||||
}
|
||||
}
|
||||
|
||||
override def findRoute(targetNodeId: PublicKey, amountMsat: Long, assistedRoutes: Seq[Seq[PaymentRequest.ExtraHop]] = Seq.empty): Future[RouteResponse] = {
|
||||
(appKit.router ? RouteRequest(appKit.nodeParams.nodeId, targetNodeId, amountMsat, assistedRoutes)).mapTo[RouteResponse]
|
||||
}
|
||||
|
||||
override def send(recipientNodeId: PublicKey, amountMsat: Long, paymentHash: ByteVector32, assistedRoutes: Seq[Seq[PaymentRequest.ExtraHop]] = Seq.empty, minFinalCltvExpiry: Option[Long] = None): Future[PaymentResult] = {
|
||||
val sendPayment = minFinalCltvExpiry match {
|
||||
case Some(minCltv) => SendPayment(amountMsat, paymentHash, recipientNodeId, assistedRoutes, finalCltvExpiry = minCltv)
|
||||
case None => SendPayment(amountMsat, paymentHash, recipientNodeId, assistedRoutes)
|
||||
}
|
||||
(appKit.paymentInitiator ? sendPayment).mapTo[PaymentResult].map {
|
||||
case s: PaymentSucceeded => s
|
||||
case f: PaymentFailed => f.copy(failures = PaymentLifecycle.transformForUser(f.failures))
|
||||
}
|
||||
}
|
||||
|
||||
override def checkpayment(paymentHash: ByteVector32): Future[Boolean] = {
|
||||
(appKit.paymentHandler ? CheckPayment(paymentHash)).mapTo[Boolean]
|
||||
}
|
||||
|
||||
override def audit(from_opt: Option[Long], to_opt: Option[Long]): Future[AuditResponse] = {
|
||||
val from = from_opt.getOrElse(0L)
|
||||
val to = to_opt.getOrElse(Long.MaxValue)
|
||||
|
||||
Future(AuditResponse(
|
||||
sent = appKit.nodeParams.db.audit.listSent(from, to),
|
||||
received = appKit.nodeParams.db.audit.listReceived(from, to),
|
||||
relayed = appKit.nodeParams.db.audit.listRelayed(from, to)
|
||||
))
|
||||
}
|
||||
|
||||
override def networkFees(from_opt: Option[Long], to_opt: Option[Long]): Future[Seq[NetworkFee]] = {
|
||||
val from = from_opt.getOrElse(0L)
|
||||
val to = to_opt.getOrElse(Long.MaxValue)
|
||||
|
||||
Future(appKit.nodeParams.db.audit.listNetworkFees(from, to))
|
||||
}
|
||||
|
||||
override def channelStats(): Future[Seq[Stats]] = Future(appKit.nodeParams.db.audit.stats)
|
||||
|
||||
/**
|
||||
* Sends a request to a channel and expects a response
|
||||
*
|
||||
* @param channelIdentifier can be a shortChannelId (BOLT encoded) or a channelId (32-byte hex encoded)
|
||||
* @param request
|
||||
* @return
|
||||
*/
|
||||
def sendToChannel(channelIdentifier: String, request: Any): Future[Any] =
|
||||
for {
|
||||
fwdReq <- Future(Register.ForwardShortId(ShortChannelId(channelIdentifier), request))
|
||||
.recoverWith { case _ => Future(Register.Forward(ByteVector32.fromValidHex(channelIdentifier), request)) }
|
||||
.recoverWith { case _ => Future.failed(new RuntimeException(s"invalid channel identifier '$channelIdentifier'")) }
|
||||
res <- appKit.register ? fwdReq
|
||||
} yield res
|
||||
|
||||
override def getInfoResponse: Future[GetInfoResponse] = Future.successful(
|
||||
GetInfoResponse(nodeId = appKit.nodeParams.nodeId,
|
||||
alias = appKit.nodeParams.alias,
|
||||
chainHash = appKit.nodeParams.chainHash,
|
||||
blockHeight = Globals.blockCount.intValue(),
|
||||
publicAddresses = appKit.nodeParams.publicAddresses)
|
||||
)
|
||||
|
||||
}
|
|
@ -61,12 +61,7 @@ case class NodeParams(keyManager: KeyManager,
|
|||
feeProportionalMillionth: Int,
|
||||
reserveToFundingRatio: Double,
|
||||
maxReserveToFundingRatio: Double,
|
||||
channelsDb: ChannelsDb,
|
||||
peersDb: PeersDb,
|
||||
networkDb: NetworkDb,
|
||||
pendingRelayDb: PendingRelayDb,
|
||||
paymentsDb: PaymentsDb,
|
||||
auditDb: AuditDb,
|
||||
db: Databases,
|
||||
revocationTimeout: FiniteDuration,
|
||||
pingInterval: FiniteDuration,
|
||||
pingTimeout: FiniteDuration,
|
||||
|
@ -128,29 +123,11 @@ object NodeParams {
|
|||
}
|
||||
}
|
||||
|
||||
def makeNodeParams(datadir: File, config: Config, keyManager: KeyManager, torAddress_opt: Option[NodeAddress]): NodeParams = {
|
||||
|
||||
datadir.mkdirs()
|
||||
def makeNodeParams(config: Config, keyManager: KeyManager, torAddress_opt: Option[NodeAddress], database: Databases): NodeParams = {
|
||||
|
||||
val chain = config.getString("chain")
|
||||
val chainHash = makeChainHash(chain)
|
||||
|
||||
val chaindir = new File(datadir, chain)
|
||||
chaindir.mkdir()
|
||||
|
||||
val sqlite = DriverManager.getConnection(s"jdbc:sqlite:${new File(chaindir, "eclair.sqlite")}")
|
||||
SqliteUtils.obtainExclusiveLock(sqlite) // there should only be one process writing to this file
|
||||
val channelsDb = new SqliteChannelsDb(sqlite)
|
||||
val peersDb = new SqlitePeersDb(sqlite)
|
||||
val pendingRelayDb = new SqlitePendingRelayDb(sqlite)
|
||||
val paymentsDb = new SqlitePaymentsDb(sqlite)
|
||||
|
||||
val sqliteNetwork = DriverManager.getConnection(s"jdbc:sqlite:${new File(chaindir, "network.sqlite")}")
|
||||
val networkDb = new SqliteNetworkDb(sqliteNetwork)
|
||||
|
||||
val sqliteAudit = DriverManager.getConnection(s"jdbc:sqlite:${new File(chaindir, "audit.sqlite")}")
|
||||
val auditDb = new SqliteAuditDb(sqliteAudit)
|
||||
|
||||
val color = ByteVector.fromValidHex(config.getString("node-color"))
|
||||
require(color.size == 3, "color should be a 3-bytes hex buffer")
|
||||
|
||||
|
@ -219,12 +196,7 @@ object NodeParams {
|
|||
feeProportionalMillionth = config.getInt("fee-proportional-millionths"),
|
||||
reserveToFundingRatio = config.getDouble("reserve-to-funding-ratio"),
|
||||
maxReserveToFundingRatio = config.getDouble("max-reserve-to-funding-ratio"),
|
||||
channelsDb = channelsDb,
|
||||
peersDb = peersDb,
|
||||
networkDb = networkDb,
|
||||
pendingRelayDb = pendingRelayDb,
|
||||
paymentsDb = paymentsDb,
|
||||
auditDb = auditDb,
|
||||
db = database,
|
||||
revocationTimeout = FiniteDuration(config.getDuration("revocation-timeout", TimeUnit.SECONDS), TimeUnit.SECONDS),
|
||||
pingInterval = FiniteDuration(config.getDuration("ping-interval", TimeUnit.SECONDS), TimeUnit.SECONDS),
|
||||
pingTimeout = FiniteDuration(config.getDuration("ping-timeout", TimeUnit.SECONDS), TimeUnit.SECONDS),
|
||||
|
|
|
@ -35,7 +35,8 @@ import fr.acinq.eclair.blockchain.fee.{ConstantFeeProvider, _}
|
|||
import fr.acinq.eclair.blockchain.{EclairWallet, _}
|
||||
import fr.acinq.eclair.channel.Register
|
||||
import fr.acinq.eclair.crypto.LocalKeyManager
|
||||
import fr.acinq.eclair.io.{Authenticator, Switchboard}
|
||||
import fr.acinq.eclair.db.Databases
|
||||
import fr.acinq.eclair.io.{Authenticator, Server, Switchboard}
|
||||
import fr.acinq.eclair.payment._
|
||||
import fr.acinq.eclair.router._
|
||||
import grizzled.slf4j.Logging
|
||||
|
@ -55,18 +56,35 @@ import scala.concurrent.duration._
|
|||
*/
|
||||
class Setup(datadir: File,
|
||||
overrideDefaults: Config = ConfigFactory.empty(),
|
||||
seed_opt: Option[ByteVector] = None)(implicit system: ActorSystem) extends Logging {
|
||||
seed_opt: Option[ByteVector] = None,
|
||||
db: Option[Databases] = None)(implicit system: ActorSystem) extends Logging {
|
||||
|
||||
logger.info(s"hello!")
|
||||
logger.info(s"version=${getClass.getPackage.getImplementationVersion} commit=${getClass.getPackage.getSpecificationVersion}")
|
||||
logger.info(s"datadir=${datadir.getCanonicalPath}")
|
||||
|
||||
|
||||
datadir.mkdirs()
|
||||
val config = NodeParams.loadConfiguration(datadir, overrideDefaults)
|
||||
val seed = seed_opt.getOrElse(NodeParams.getSeed(datadir))
|
||||
val chain = config.getString("chain")
|
||||
val keyManager = new LocalKeyManager(seed, NodeParams.makeChainHash(chain))
|
||||
val nodeParams = NodeParams.makeNodeParams(datadir, config, keyManager, torAddress_opt = None)
|
||||
|
||||
val database = db match {
|
||||
case Some(d) => d
|
||||
case None => Databases.sqliteJDBC(new File(datadir, chain))
|
||||
}
|
||||
|
||||
val nodeParams = NodeParams.makeNodeParams(config, keyManager, None, database)
|
||||
|
||||
val serverBindingAddress = new InetSocketAddress(
|
||||
config.getString("server.binding-ip"),
|
||||
config.getInt("server.port"))
|
||||
|
||||
// early checks
|
||||
DBCompatChecker.checkDBCompatibility(nodeParams)
|
||||
DBCompatChecker.checkNetworkDBCompatibility(nodeParams)
|
||||
PortChecker.checkAvailable(serverBindingAddress)
|
||||
|
||||
logger.info(s"nodeid=${nodeParams.nodeId} alias=${nodeParams.alias}")
|
||||
logger.info(s"using chain=$chain chainHash=${nodeParams.chainHash}")
|
||||
|
|
|
@ -24,6 +24,7 @@ import fr.acinq.bitcoin.Crypto.{PrivateKey, PublicKey}
|
|||
import fr.acinq.bitcoin.{Satoshi, Script, Transaction, TxIn, TxOut}
|
||||
import fr.acinq.eclair.blockchain.{UtxoStatus, ValidateRequest, ValidateResult}
|
||||
import fr.acinq.eclair.crypto.LocalKeyManager
|
||||
import fr.acinq.eclair.db.Databases
|
||||
import fr.acinq.eclair.io.{Authenticator, NodeURI, Peer}
|
||||
import fr.acinq.eclair.router._
|
||||
import fr.acinq.eclair.transactions.Scripts
|
||||
|
@ -40,7 +41,8 @@ import scala.concurrent.{Future, Promise}
|
|||
*/
|
||||
class SyncLiteSetup(datadir: File,
|
||||
overrideDefaults: Config = ConfigFactory.empty(),
|
||||
syncNodeURI: NodeURI)(implicit system: ActorSystem) extends Logging {
|
||||
syncNodeURI: NodeURI,
|
||||
db: Option[Databases] = None)(implicit system: ActorSystem) extends Logging {
|
||||
|
||||
logger.info(s"hello!")
|
||||
logger.info(s"version=${getClass.getPackage.getImplementationVersion} commit=${getClass.getPackage.getSpecificationVersion}")
|
||||
|
@ -49,7 +51,11 @@ class SyncLiteSetup(datadir: File,
|
|||
val config = NodeParams.loadConfiguration(datadir, overrideDefaults)
|
||||
val chain = config.getString("chain")
|
||||
val keyManager = new LocalKeyManager(PrivateKey(randomBytes(32), compressed = true).toBin, NodeParams.makeChainHash(chain))
|
||||
val nodeParams = NodeParams.makeNodeParams(datadir, config, keyManager, torAddress_opt = None)
|
||||
val database = db match {
|
||||
case Some(d) => d
|
||||
case None => Databases.sqliteJDBC(new File(datadir, chain))
|
||||
}
|
||||
val nodeParams = NodeParams.makeNodeParams(config, keyManager, None, database)
|
||||
|
||||
logger.info(s"nodeid=${nodeParams.nodeId} alias=${nodeParams.alias}")
|
||||
logger.info(s"using chain=$chain chainHash=${nodeParams.chainHash}")
|
||||
|
|
|
@ -28,7 +28,7 @@ import fr.acinq.eclair.channel.BITCOIN_PARENT_TX_CONFIRMED
|
|||
import fr.acinq.eclair.transactions.Scripts
|
||||
import scodec.bits.ByteVector
|
||||
|
||||
import scala.collection.SortedMap
|
||||
import scala.collection.{Set, SortedMap}
|
||||
import scala.concurrent.duration._
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
import scala.util.Try
|
||||
|
@ -41,7 +41,7 @@ import scala.util.Try
|
|||
*/
|
||||
class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext = ExecutionContext.global) extends Actor with ActorLogging {
|
||||
|
||||
import ZmqWatcher.TickNewBlock
|
||||
import ZmqWatcher._
|
||||
|
||||
context.system.eventStream.subscribe(self, classOf[BlockchainEvent])
|
||||
|
||||
|
@ -50,17 +50,21 @@ class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext =
|
|||
|
||||
case class TriggerEvent(w: Watch, e: WatchEvent)
|
||||
|
||||
def receive: Receive = watching(Set(), SortedMap(), None)
|
||||
def receive: Receive = watching(Set(), Map(), SortedMap(), None)
|
||||
|
||||
def watching(watches: Set[Watch], block2tx: SortedMap[Long, Seq[Transaction]], nextTick: Option[Cancellable]): Receive = {
|
||||
def watching(watches: Set[Watch], watchedUtxos: Map[OutPoint, Set[Watch]], block2tx: SortedMap[Long, Seq[Transaction]], nextTick: Option[Cancellable]): Receive = {
|
||||
|
||||
case NewTransaction(tx) =>
|
||||
//log.debug(s"analyzing txid=${tx.txid} tx=$tx")
|
||||
watches.collect {
|
||||
case w@WatchSpentBasic(_, txid, outputIndex, _, event) if tx.txIn.exists(i => i.outPoint.txid == txid && i.outPoint.index == outputIndex) =>
|
||||
self ! TriggerEvent(w, WatchEventSpentBasic(event))
|
||||
case w@WatchSpent(_, txid, outputIndex, _, event) if tx.txIn.exists(i => i.outPoint.txid == txid && i.outPoint.index == outputIndex) =>
|
||||
self ! TriggerEvent(w, WatchEventSpent(event, tx))
|
||||
log.debug(s"analyzing txid={} tx={}", tx.txid, tx)
|
||||
tx.txIn
|
||||
.map(_.outPoint)
|
||||
.flatMap(watchedUtxos.get)
|
||||
.flatten // List[Watch] -> Watch
|
||||
.collect {
|
||||
case w: WatchSpentBasic =>
|
||||
self ! TriggerEvent(w, WatchEventSpentBasic(w.event))
|
||||
case w: WatchSpent =>
|
||||
self ! TriggerEvent(w, WatchEventSpent(w.event, tx))
|
||||
}
|
||||
|
||||
case NewBlock(block) =>
|
||||
|
@ -70,7 +74,7 @@ class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext =
|
|||
log.debug(s"scheduling a new task to check on tx confirmations")
|
||||
// we do this to avoid herd effects in testing when generating a lots of blocks in a row
|
||||
val task = context.system.scheduler.scheduleOnce(2 seconds, self, TickNewBlock)
|
||||
context become watching(watches, block2tx, Some(task))
|
||||
context become watching(watches, watchedUtxos, block2tx, Some(task))
|
||||
|
||||
case TickNewBlock =>
|
||||
client.getBlockCount.map {
|
||||
|
@ -81,19 +85,24 @@ class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext =
|
|||
}
|
||||
// TODO: beware of the herd effect
|
||||
watches.collect { case w: WatchConfirmed => checkConfirmed(w) }
|
||||
context become (watching(watches, block2tx, None))
|
||||
context become watching(watches, watchedUtxos, block2tx, None)
|
||||
|
||||
case TriggerEvent(w, e) if watches.contains(w) =>
|
||||
log.info(s"triggering $w")
|
||||
w.channel ! e
|
||||
// NB: WatchSpent are permanent because we need to detect multiple spending of the funding tx
|
||||
// They are never cleaned up but it is not a big deal for now (1 channel == 1 watch)
|
||||
if (!w.isInstanceOf[WatchSpent]) context.become(watching(watches - w, block2tx, None))
|
||||
w match {
|
||||
case _: WatchSpent =>
|
||||
// NB: WatchSpent are permanent because we need to detect multiple spending of the funding tx
|
||||
// They are never cleaned up but it is not a big deal for now (1 channel == 1 watch)
|
||||
()
|
||||
case _ =>
|
||||
context become watching(watches - w, removeWatchedUtxos(watchedUtxos, w), block2tx, None)
|
||||
}
|
||||
|
||||
case CurrentBlockCount(count) => {
|
||||
val toPublish = block2tx.filterKeys(_ <= count)
|
||||
toPublish.values.flatten.map(tx => publish(tx))
|
||||
context.become(watching(watches, block2tx -- toPublish.keys, None))
|
||||
context become watching(watches, watchedUtxos, block2tx -- toPublish.keys, None)
|
||||
}
|
||||
|
||||
case w: Watch if !watches.contains(w) =>
|
||||
|
@ -139,7 +148,7 @@ class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext =
|
|||
|
||||
log.debug(s"adding watch $w for $sender")
|
||||
context.watch(w.channel)
|
||||
context.become(watching(watches + w, block2tx, nextTick))
|
||||
context become watching(watches + w, addWatchedUtxos(watchedUtxos, w), block2tx, nextTick)
|
||||
|
||||
case PublishAsap(tx) =>
|
||||
val blockCount = Globals.blockCount.get()
|
||||
|
@ -154,7 +163,7 @@ class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext =
|
|||
} else if (cltvTimeout > blockCount) {
|
||||
log.info(s"delaying publication of txid=${tx.txid} until block=$cltvTimeout (curblock=$blockCount)")
|
||||
val block2tx1 = block2tx.updated(cltvTimeout, block2tx.getOrElse(cltvTimeout, Seq.empty[Transaction]) :+ tx)
|
||||
context.become(watching(watches, block2tx1, None))
|
||||
context become watching(watches, watchedUtxos, block2tx1, None)
|
||||
} else publish(tx)
|
||||
|
||||
case WatchEventConfirmed(BITCOIN_PARENT_TX_CONFIRMED(tx), blockHeight, _) =>
|
||||
|
@ -165,7 +174,7 @@ class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext =
|
|||
if (absTimeout > blockCount) {
|
||||
log.info(s"delaying publication of txid=${tx.txid} until block=$absTimeout (curblock=$blockCount)")
|
||||
val block2tx1 = block2tx.updated(absTimeout, block2tx.getOrElse(absTimeout, Seq.empty[Transaction]) :+ tx)
|
||||
context.become(watching(watches, block2tx1, None))
|
||||
context become watching(watches, watchedUtxos, block2tx1, None)
|
||||
} else publish(tx)
|
||||
|
||||
case ValidateRequest(ann) => client.validate(ann).pipeTo(sender)
|
||||
|
@ -173,7 +182,8 @@ class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext =
|
|||
case Terminated(channel) =>
|
||||
// we remove watches associated to dead actor
|
||||
val deprecatedWatches = watches.filter(_.channel == channel)
|
||||
context.become(watching(watches -- deprecatedWatches, block2tx, None))
|
||||
val watchedUtxos1 = deprecatedWatches.foldLeft(watchedUtxos) { case (m, w) => removeWatchedUtxos(m, w) }
|
||||
context.become(watching(watches -- deprecatedWatches, watchedUtxos1, block2tx, None))
|
||||
|
||||
case 'watches => sender ! watches
|
||||
|
||||
|
@ -213,4 +223,38 @@ object ZmqWatcher {
|
|||
|
||||
case object TickNewBlock
|
||||
|
||||
def utxo(w: Watch): Option[OutPoint] =
|
||||
w match {
|
||||
case w: WatchSpent => Some(OutPoint(w.txId.reverse, w.outputIndex))
|
||||
case w: WatchSpentBasic => Some(OutPoint(w.txId.reverse, w.outputIndex))
|
||||
case _ => None
|
||||
}
|
||||
|
||||
/**
|
||||
* The resulting map allows checking spent txes in constant time wrt number of watchers
|
||||
*
|
||||
* @param watches
|
||||
* @return
|
||||
*/
|
||||
def addWatchedUtxos(m: Map[OutPoint, Set[Watch]], w: Watch): Map[OutPoint, Set[Watch]] = {
|
||||
utxo(w) match {
|
||||
case Some(utxo) => m.get(utxo) match {
|
||||
case Some(watches) => m + (utxo -> (watches + w))
|
||||
case None => m + (utxo -> Set(w))
|
||||
}
|
||||
case None => m
|
||||
}
|
||||
}
|
||||
|
||||
def removeWatchedUtxos(m: Map[OutPoint, Set[Watch]], w: Watch): Map[OutPoint, Set[Watch]] = {
|
||||
utxo(w) match {
|
||||
case Some(utxo) => m.get(utxo) match {
|
||||
case Some(watches) if watches - w == Set.empty => m - utxo
|
||||
case Some(watches) => m + (utxo -> (watches - w))
|
||||
case None => m
|
||||
}
|
||||
case None => m
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -42,6 +42,7 @@ import scodec.bits.ByteVector
|
|||
import scala.annotation.tailrec
|
||||
import scala.concurrent.ExecutionContext
|
||||
import scala.concurrent.duration._
|
||||
import scala.util.{Failure, Success, Try}
|
||||
|
||||
/**
|
||||
* For later optimizations, see http://normanmaurer.me/presentations/2014-facebook-eng-netty/slides.html
|
||||
|
@ -599,9 +600,15 @@ object ElectrumClient {
|
|||
case _ => ScriptHashSubscriptionResponse(scriptHash, "")
|
||||
}
|
||||
case BroadcastTransaction(tx) =>
|
||||
val JString(txid) = json.result
|
||||
require(ByteVector32.fromValidHex(txid) == tx.txid)
|
||||
BroadcastTransactionResponse(tx, None)
|
||||
val JString(message) = json.result
|
||||
// if we got here, it means that the server's response does not contain an error and message should be our
|
||||
// transaction id. However, it seems that at least on testnet some servers still use an older version of the
|
||||
// Electrum protocol and return an error message in the result field
|
||||
Try(ByteVector32.fromValidHex(message)) match {
|
||||
case Success(txid) if txid == tx.txid => BroadcastTransactionResponse(tx, None)
|
||||
case Success(txid) => BroadcastTransactionResponse(tx, Some(Error(1, s"response txid $txid does not match request txid ${tx.txid}")))
|
||||
case Failure(_) => BroadcastTransactionResponse(tx, Some(Error(1, message)))
|
||||
}
|
||||
case GetHeader(height) =>
|
||||
val JString(hex) = json.result
|
||||
GetHeaderResponse(height, BlockHeader.read(hex))
|
||||
|
|
|
@ -106,7 +106,7 @@ class ElectrumClientPool(serverAddresses: Set[ElectrumServerAddress])(implicit v
|
|||
|
||||
whenUnhandled {
|
||||
case Event(Connect, _) =>
|
||||
Random.shuffle(serverAddresses.toSeq diff addresses.values.toSeq).headOption match {
|
||||
pickAddress(serverAddresses, addresses.values.toSet) match {
|
||||
case Some(ElectrumServerAddress(address, ssl)) =>
|
||||
val resolved = new InetSocketAddress(address.getHostName, address.getPort)
|
||||
val client = context.actorOf(Props(new ElectrumClient(resolved, ssl)))
|
||||
|
@ -211,6 +211,16 @@ object ElectrumClientPool {
|
|||
stream.close()
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param serverAddresses all addresses to choose from
|
||||
* @param usedAddresses current connections
|
||||
* @return a random address that we're not connected to yet
|
||||
*/
|
||||
def pickAddress(serverAddresses: Set[ElectrumServerAddress], usedAddresses: Set[InetSocketAddress]): Option[ElectrumServerAddress] = {
|
||||
Random.shuffle(serverAddresses.filterNot(a => usedAddresses.contains(a.adress)).toSeq).headOption
|
||||
}
|
||||
|
||||
// @formatter:off
|
||||
sealed trait State
|
||||
case object Disconnected extends State
|
||||
|
|
|
@ -83,7 +83,7 @@ object Channel {
|
|||
|
||||
}
|
||||
|
||||
class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: PublicKey, blockchain: ActorRef, router: ActorRef, relayer: ActorRef, origin_opt: Option[ActorRef] = None)(implicit ec: ExecutionContext = ExecutionContext.Implicits.global) extends FSM[State, Data] with FSMDiagnosticActorLogging[State, Data] {
|
||||
class Channel(val nodeParams: NodeParams, val wallet: EclairWallet, remoteNodeId: PublicKey, blockchain: ActorRef, router: ActorRef, relayer: ActorRef, origin_opt: Option[ActorRef] = None)(implicit ec: ExecutionContext = ExecutionContext.Implicits.global) extends FSM[State, Data] with FSMDiagnosticActorLogging[State, Data] {
|
||||
|
||||
import Channel._
|
||||
import nodeParams.keyManager
|
||||
|
@ -466,6 +466,12 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
wallet.rollback(d.fundingTx)
|
||||
replyToUser(Left(RemoteError(e)))
|
||||
handleRemoteError(e, d)
|
||||
|
||||
case Event(INPUT_DISCONNECTED, d: DATA_WAIT_FOR_FUNDING_SIGNED) =>
|
||||
// we rollback the funding tx, it will never be published
|
||||
wallet.rollback(d.fundingTx)
|
||||
replyToUser(Left(LocalError(new RuntimeException("disconnected"))))
|
||||
goto(CLOSED)
|
||||
})
|
||||
|
||||
when(WAIT_FOR_FUNDING_CONFIRMED)(handleExceptions {
|
||||
|
@ -645,7 +651,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
trimmedHtlcs collect {
|
||||
case DirectedHtlc(_, u) =>
|
||||
log.info(s"adding paymentHash=${u.paymentHash} cltvExpiry=${u.cltvExpiry} to htlcs db for commitNumber=$nextCommitNumber")
|
||||
nodeParams.channelsDb.addOrUpdateHtlcInfo(d.channelId, nextCommitNumber, u.paymentHash, u.cltvExpiry)
|
||||
nodeParams.db.channels.addOrUpdateHtlcInfo(d.channelId, nextCommitNumber, u.paymentHash, u.cltvExpiry)
|
||||
}
|
||||
if (!Helpers.aboveReserve(d.commitments) && Helpers.aboveReserve(commitments1)) {
|
||||
// we just went above reserve (can't go below), let's refresh our channel_update to enable/disable it accordingly
|
||||
|
@ -1300,7 +1306,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
stateData match {
|
||||
case d: HasCommitments =>
|
||||
log.info(s"deleting database record for channelId=${d.channelId}")
|
||||
nodeParams.channelsDb.removeChannel(d.channelId)
|
||||
nodeParams.db.channels.removeChannel(d.channelId)
|
||||
case _ => {}
|
||||
}
|
||||
log.info("shutting down")
|
||||
|
@ -1903,7 +1909,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
def handleRemoteSpentOther(tx: Transaction, d: HasCommitments) = {
|
||||
log.warning(s"funding tx spent in txid=${tx.txid}")
|
||||
|
||||
Helpers.Closing.claimRevokedRemoteCommitTxOutputs(keyManager, d.commitments, tx, nodeParams.channelsDb) match {
|
||||
Helpers.Closing.claimRevokedRemoteCommitTxOutputs(keyManager, d.commitments, tx, nodeParams.db.channels) match {
|
||||
case Some(revokedCommitPublished) =>
|
||||
log.warning(s"txid=${tx.txid} was a revoked commitment, publishing the penalty tx")
|
||||
val exc = FundingTxSpent(d.channelId, tx)
|
||||
|
@ -2061,7 +2067,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
|
||||
def store[T](d: T)(implicit tp: T <:< HasCommitments): T = {
|
||||
log.debug(s"updating database record for channelId={}", d.channelId)
|
||||
nodeParams.channelsDb.addOrUpdateChannel(d)
|
||||
nodeParams.db.channels.addOrUpdateChannel(d)
|
||||
context.system.eventStream.publish(ChannelPersisted(self, remoteNodeId, d.channelId, d))
|
||||
d
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ trait ChannelsDb {
|
|||
|
||||
def removeChannel(channelId: ByteVector32)
|
||||
|
||||
def listChannels(): Seq[HasCommitments]
|
||||
def listLocalChannels(): Seq[HasCommitments]
|
||||
|
||||
def addOrUpdateHtlcInfo(channelId: ByteVector32, commitmentNumber: Long, paymentHash: ByteVector32, cltvExpiry: Long)
|
||||
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
package fr.acinq.eclair.db
|
||||
|
||||
import java.io.File
|
||||
import java.sql.{Connection, DriverManager}
|
||||
|
||||
import fr.acinq.eclair.db.sqlite._
|
||||
|
||||
trait Databases {
|
||||
|
||||
val network: NetworkDb
|
||||
|
||||
val audit: AuditDb
|
||||
|
||||
val channels: ChannelsDb
|
||||
|
||||
val peers: PeersDb
|
||||
|
||||
val payments: PaymentsDb
|
||||
|
||||
val pendingRelay: PendingRelayDb
|
||||
|
||||
}
|
||||
|
||||
object Databases {
|
||||
|
||||
/**
|
||||
* Given a parent folder it creates or loads all the databases from a JDBC connection
|
||||
* @param dbdir
|
||||
* @return
|
||||
*/
|
||||
def sqliteJDBC(dbdir: File): Databases = {
|
||||
dbdir.mkdir()
|
||||
val sqliteEclair = DriverManager.getConnection(s"jdbc:sqlite:${new File(dbdir, "eclair.sqlite")}")
|
||||
val sqliteNetwork = DriverManager.getConnection(s"jdbc:sqlite:${new File(dbdir, "network.sqlite")}")
|
||||
val sqliteAudit = DriverManager.getConnection(s"jdbc:sqlite:${new File(dbdir, "audit.sqlite")}")
|
||||
SqliteUtils.obtainExclusiveLock(sqliteEclair) // there should only be one process writing to this file
|
||||
|
||||
databaseByConnections(sqliteAudit, sqliteNetwork, sqliteEclair)
|
||||
}
|
||||
|
||||
def databaseByConnections(auditJdbc: Connection, networkJdbc: Connection, eclairJdbc: Connection) = new Databases {
|
||||
override val network = new SqliteNetworkDb(networkJdbc)
|
||||
override val audit = new SqliteAuditDb(auditJdbc)
|
||||
override val channels = new SqliteChannelsDb(eclairJdbc)
|
||||
override val peers = new SqlitePeersDb(eclairJdbc)
|
||||
override val payments = new SqlitePaymentsDb(eclairJdbc)
|
||||
override val pendingRelay = new SqlitePendingRelayDb(eclairJdbc)
|
||||
}
|
||||
|
||||
}
|
|
@ -73,7 +73,7 @@ class SqliteChannelsDb(sqlite: Connection) extends ChannelsDb {
|
|||
}
|
||||
}
|
||||
|
||||
override def listChannels(): Seq[HasCommitments] = {
|
||||
override def listLocalChannels(): Seq[HasCommitments] = {
|
||||
using(sqlite.createStatement) { statement =>
|
||||
val rs = statement.executeQuery("SELECT data FROM local_channels")
|
||||
codecSequence(rs, stateDataCodec)
|
||||
|
|
|
@ -27,6 +27,7 @@ import fr.acinq.bitcoin.{ByteVector32, DeterministicWallet, MilliSatoshi, Protoc
|
|||
import fr.acinq.eclair.blockchain.EclairWallet
|
||||
import fr.acinq.eclair.channel._
|
||||
import fr.acinq.eclair.crypto.TransportHandler
|
||||
import fr.acinq.eclair.secureRandom
|
||||
import fr.acinq.eclair.router._
|
||||
import fr.acinq.eclair.wire._
|
||||
import fr.acinq.eclair.{wire, _}
|
||||
|
@ -96,7 +97,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
|
|||
val address_opt = if (outgoing) {
|
||||
// we store the node address upon successful outgoing connection, so we can reconnect later
|
||||
// any previous address is overwritten
|
||||
NodeAddress.fromParts(address.getHostString, address.getPort).map(nodeAddress => nodeParams.peersDb.addOrUpdatePeer(remoteNodeId, nodeAddress))
|
||||
NodeAddress.fromParts(address.getHostString, address.getPort).map(nodeAddress => nodeParams.db.peers.addOrUpdatePeer(remoteNodeId, nodeAddress))
|
||||
Some(address)
|
||||
} else None
|
||||
|
||||
|
@ -156,7 +157,10 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
|
|||
|
||||
// let's bring existing/requested channels online
|
||||
d.channels.values.toSet[ActorRef].foreach(_ ! INPUT_RECONNECTED(d.transport, d.localInit, remoteInit)) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
|
||||
goto(CONNECTED) using ConnectedData(d.address_opt, d.transport, d.localInit, remoteInit, d.channels.map { case (k: ChannelId, v) => (k, v) }) forMax (30 seconds) // forMax will trigger a StateTimeout
|
||||
// we will delay all rebroadcasts with this value in order to prevent herd effects (each peer has a different delay)
|
||||
val rebroadcastDelay = Random.nextInt(nodeParams.routerConf.routerBroadcastInterval.toSeconds.toInt).seconds
|
||||
log.info(s"rebroadcast will be delayed by $rebroadcastDelay")
|
||||
goto(CONNECTED) using ConnectedData(d.address_opt, d.transport, d.localInit, remoteInit, d.channels.map { case (k: ChannelId, v) => (k, v) }, rebroadcastDelay) forMax (30 seconds) // forMax will trigger a StateTimeout
|
||||
} else {
|
||||
log.warning(s"incompatible features, disconnecting")
|
||||
d.origin_opt.foreach(origin => origin ! Status.Failure(new RuntimeException("incompatible features")))
|
||||
|
@ -246,7 +250,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
|
|||
log.debug(s"received pong with latency=$latency")
|
||||
cancelTimer(PingTimeout.toString())
|
||||
// pings are sent periodically with some randomization
|
||||
val nextDelay = nodeParams.pingInterval + secureRandom.nextInt(10).seconds
|
||||
val nextDelay = nodeParams.pingInterval + Random.nextInt(10).seconds
|
||||
setTimer(SendPing.toString, SendPing, nextDelay, repeat = false)
|
||||
case None =>
|
||||
log.debug(s"received unexpected pong with size=${data.length}")
|
||||
|
@ -331,6 +335,10 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
|
|||
stay
|
||||
|
||||
case Event(rebroadcast: Rebroadcast, d: ConnectedData) =>
|
||||
context.system.scheduler.scheduleOnce(d.rebroadcastDelay, self, DelayedRebroadcast(rebroadcast))(context.dispatcher)
|
||||
stay
|
||||
|
||||
case Event(DelayedRebroadcast(rebroadcast), d: ConnectedData) =>
|
||||
|
||||
/**
|
||||
* Send and count in a single iteration
|
||||
|
@ -467,6 +475,12 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
|
|||
sender ! PeerInfo(remoteNodeId, stateName.toString, d.address_opt, d.channels.values.toSet.size) // we use toSet to dedup because a channel can have a TemporaryChannelId + a ChannelId
|
||||
stay
|
||||
|
||||
case Event(_: Rebroadcast, _) => stay // ignored
|
||||
|
||||
case Event(_: DelayedRebroadcast, _) => stay // ignored
|
||||
|
||||
case Event(_: RoutingState, _) => stay // ignored
|
||||
|
||||
case Event(_: TransportHandler.ReadAck, _) => stay // ignored
|
||||
|
||||
case Event(Peer.Reconnect, _) => stay // we got connected in the meantime
|
||||
|
@ -476,6 +490,8 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
|
|||
case Event(_: Pong, _) => stay // we got disconnected before receiving the pong
|
||||
|
||||
case Event(_: PingTimeout, _) => stay // we got disconnected after sending a ping
|
||||
|
||||
case Event(_: BadMessage, _) => stay // we got disconnected while syncing
|
||||
}
|
||||
|
||||
onTransition {
|
||||
|
@ -499,7 +515,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
|
|||
|
||||
def stopPeer() = {
|
||||
log.info("removing peer from db")
|
||||
nodeParams.peersDb.removePeer(remoteNodeId)
|
||||
nodeParams.db.peers.removePeer(remoteNodeId)
|
||||
stop(FSM.Normal)
|
||||
}
|
||||
|
||||
|
@ -540,7 +556,7 @@ object Peer {
|
|||
case class Nothing() extends Data { override def address_opt = None; override def channels = Map.empty }
|
||||
case class DisconnectedData(address_opt: Option[InetSocketAddress], channels: Map[FinalChannelId, ActorRef], attempts: Int = 0) extends Data
|
||||
case class InitializingData(address_opt: Option[InetSocketAddress], transport: ActorRef, channels: Map[FinalChannelId, ActorRef], origin_opt: Option[ActorRef], localInit: wire.Init) extends Data
|
||||
case class ConnectedData(address_opt: Option[InetSocketAddress], transport: ActorRef, localInit: wire.Init, remoteInit: wire.Init, channels: Map[ChannelId, ActorRef], gossipTimestampFilter: Option[GossipTimestampFilter] = None, behavior: Behavior = Behavior(), expectedPong_opt: Option[ExpectedPong] = None) extends Data
|
||||
case class ConnectedData(address_opt: Option[InetSocketAddress], transport: ActorRef, localInit: wire.Init, remoteInit: wire.Init, channels: Map[ChannelId, ActorRef], rebroadcastDelay: FiniteDuration, gossipTimestampFilter: Option[GossipTimestampFilter] = None, behavior: Behavior = Behavior(), expectedPong_opt: Option[ExpectedPong] = None) extends Data
|
||||
case class ExpectedPong(ping: Ping, timestamp: Long = Platform.currentTime)
|
||||
case class PingTimeout(ping: Ping)
|
||||
|
||||
|
@ -568,6 +584,8 @@ object Peer {
|
|||
|
||||
case class PeerRoutingMessage(transport: ActorRef, remoteNodeId: PublicKey, message: RoutingMessage)
|
||||
|
||||
case class DelayedRebroadcast(rebroadcast: Rebroadcast)
|
||||
|
||||
sealed trait BadMessage
|
||||
case class InvalidSignature(r: RoutingMessage) extends BadMessage
|
||||
case class InvalidAnnouncement(c: ChannelAnnouncement) extends BadMessage
|
||||
|
|
|
@ -43,8 +43,8 @@ class Switchboard(nodeParams: NodeParams, authenticator: ActorRef, watcher: Acto
|
|||
|
||||
// we load peers and channels from database
|
||||
{
|
||||
val channels = nodeParams.channelsDb.listChannels()
|
||||
val peers = nodeParams.peersDb.listPeers()
|
||||
val channels = nodeParams.db.channels.listLocalChannels()
|
||||
val peers = nodeParams.db.peers.listPeers()
|
||||
|
||||
checkBrokenHtlcsLink(channels, nodeParams.privateKey) match {
|
||||
case Nil => ()
|
||||
|
|
|
@ -27,7 +27,7 @@ import scala.concurrent.duration._
|
|||
|
||||
class Auditor(nodeParams: NodeParams) extends Actor with ActorLogging {
|
||||
|
||||
val db = nodeParams.auditDb
|
||||
val db = nodeParams.db.audit
|
||||
|
||||
context.system.eventStream.subscribe(self, classOf[PaymentEvent])
|
||||
context.system.eventStream.subscribe(self, classOf[NetworkFeePaid])
|
||||
|
|
|
@ -24,7 +24,7 @@ import fr.acinq.eclair.channel._
|
|||
class CommandBuffer(nodeParams: NodeParams, register: ActorRef) extends Actor with ActorLogging {
|
||||
|
||||
import CommandBuffer._
|
||||
import nodeParams.pendingRelayDb
|
||||
import nodeParams.db._
|
||||
|
||||
context.system.eventStream.subscribe(self, classOf[ChannelStateChanged])
|
||||
|
||||
|
@ -34,17 +34,17 @@ class CommandBuffer(nodeParams: NodeParams, register: ActorRef) extends Actor wi
|
|||
// save command in db
|
||||
register forward Register.Forward(channelId, cmd)
|
||||
// we also store the preimage in a db (note that this happens *after* forwarding the fulfill to the channel, so we don't add latency)
|
||||
pendingRelayDb.addPendingRelay(channelId, htlcId, cmd)
|
||||
pendingRelay.addPendingRelay(channelId, htlcId, cmd)
|
||||
|
||||
case CommandAck(channelId, htlcId) =>
|
||||
//delete from db
|
||||
log.debug(s"fulfill/fail acked for channelId=$channelId htlcId=$htlcId")
|
||||
pendingRelayDb.removePendingRelay(channelId, htlcId)
|
||||
pendingRelay.removePendingRelay(channelId, htlcId)
|
||||
|
||||
case ChannelStateChanged(channel, _, _, WAIT_FOR_INIT_INTERNAL | OFFLINE | SYNCING, NORMAL | SHUTDOWN | CLOSING, d: HasCommitments) =>
|
||||
import d.channelId
|
||||
// if channel is in a state where it can have pending htlcs, we send them the fulfills we know of
|
||||
pendingRelayDb.listPendingRelay(channelId) match {
|
||||
pendingRelay.listPendingRelay(channelId) match {
|
||||
case Nil => ()
|
||||
case msgs =>
|
||||
log.info(s"re-sending ${msgs.size} unacked fulfills/fails to channel $channelId")
|
||||
|
|
|
@ -65,7 +65,7 @@ class LocalPaymentHandler(nodeParams: NodeParams) extends Actor with ActorLoggin
|
|||
} recover { case t => sender ! Status.Failure(t) }
|
||||
|
||||
case CheckPayment(paymentHash) =>
|
||||
nodeParams.paymentsDb.findByPaymentHash(paymentHash) match {
|
||||
nodeParams.db.payments.findByPaymentHash(paymentHash) match {
|
||||
case Some(_) => sender ! true
|
||||
case _ => sender ! false
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ class LocalPaymentHandler(nodeParams: NodeParams) extends Actor with ActorLoggin
|
|||
case _ =>
|
||||
log.info(s"received payment for paymentHash=${htlc.paymentHash} amountMsat=${htlc.amountMsat}")
|
||||
// amount is correct or was not specified in the payment request
|
||||
nodeParams.paymentsDb.addPayment(Payment(htlc.paymentHash, htlc.amountMsat, Platform.currentTime / 1000))
|
||||
nodeParams.db.payments.addPayment(Payment(htlc.paymentHash, htlc.amountMsat, Platform.currentTime / 1000))
|
||||
sender ! CMD_FULFILL_HTLC(htlc.id, paymentPreimage, commit = true)
|
||||
context.system.eventStream.publish(PaymentReceived(MilliSatoshi(htlc.amountMsat), htlc.paymentHash, htlc.channelId))
|
||||
context.become(run(hash2preimage - htlc.paymentHash))
|
||||
|
|
|
@ -310,13 +310,21 @@ object Graph {
|
|||
}
|
||||
|
||||
/**
|
||||
* This forces channel_update(s) with fees=0 to have a minimum of 1msat for the baseFee. Note that
|
||||
* the update is not being modified and the result of the route computation will still have the update
|
||||
* with fees=0 which is what will be used to build the onion.
|
||||
*
|
||||
* @param edge the edge for which we want to compute the weight
|
||||
* @param amountWithFees the value that this edge will have to carry along
|
||||
* @return the new amount updated with the necessary fees for this edge
|
||||
*/
|
||||
private def edgeFeeCost(edge: GraphEdge, amountWithFees: Long): Long = {
|
||||
amountWithFees + nodeFee(edge.update.feeBaseMsat, edge.update.feeProportionalMillionths, amountWithFees)
|
||||
if(edgeHasZeroFee(edge)) amountWithFees + nodeFee(baseMsat = 1, proportional = 0, amountWithFees)
|
||||
else amountWithFees + nodeFee(edge.update.feeBaseMsat, edge.update.feeProportionalMillionths, amountWithFees)
|
||||
}
|
||||
|
||||
private def edgeHasZeroFee(edge: GraphEdge): Boolean = {
|
||||
edge.update.feeBaseMsat == 0 && edge.update.feeProportionalMillionths == 0
|
||||
}
|
||||
|
||||
// Calculates the total cost of a path (amount + fees), direct channels with the source will have a cost of 0 (pay no fees)
|
||||
|
|
|
@ -137,7 +137,7 @@ class Router(nodeParams: NodeParams, watcher: ActorRef, initialized: Option[Prom
|
|||
}
|
||||
)
|
||||
|
||||
val db = nodeParams.networkDb
|
||||
val db = nodeParams.db.network
|
||||
|
||||
{
|
||||
log.info("loading network announcements from db...")
|
||||
|
|
|
@ -16,15 +16,10 @@
|
|||
|
||||
package fr.acinq.eclair
|
||||
|
||||
import java.io.File
|
||||
import java.nio.file._
|
||||
import java.nio.file.attribute.BasicFileAttributes
|
||||
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import fr.acinq.bitcoin.Block
|
||||
import fr.acinq.eclair.crypto.LocalKeyManager
|
||||
import org.scalatest.FunSuite
|
||||
|
||||
import scala.util.Try
|
||||
|
||||
class StartupSpec extends FunSuite {
|
||||
|
@ -38,31 +33,18 @@ class StartupSpec extends FunSuite {
|
|||
assert(baseUkraineAlias.getBytes.length === 27)
|
||||
|
||||
// we add 2 UTF-8 chars, each is 3-bytes long -> total new length 33 bytes!
|
||||
val goUkraineGo = threeBytesUTFChar+"BitcoinLightningNodeUkraine"+threeBytesUTFChar
|
||||
val goUkraineGo = threeBytesUTFChar + "BitcoinLightningNodeUkraine" + threeBytesUTFChar
|
||||
|
||||
assert(goUkraineGo.length === 29)
|
||||
assert(goUkraineGo.getBytes.length === 33) // too long for the alias, should be truncated
|
||||
|
||||
val illegalAliasConf = ConfigFactory.parseString(s"node-alias = $goUkraineGo")
|
||||
val conf = illegalAliasConf.withFallback(ConfigFactory.parseResources("reference.conf").getConfig("eclair"))
|
||||
val tempConfParentDir = new File("temp-test.conf")
|
||||
|
||||
val keyManager = new LocalKeyManager(seed = randomKey.toBin, chainHash = Block.TestnetGenesisBlock.hash)
|
||||
|
||||
// try to create a NodeParams instance with a conf that contains an illegal alias
|
||||
val nodeParamsAttempt = Try(NodeParams.makeNodeParams(tempConfParentDir, conf, keyManager, None))
|
||||
val nodeParamsAttempt = Try(NodeParams.makeNodeParams(conf, keyManager, None, TestConstants.inMemoryDb()))
|
||||
assert(nodeParamsAttempt.isFailure && nodeParamsAttempt.failed.get.getMessage.contains("alias, too long"))
|
||||
|
||||
// destroy conf files after the test
|
||||
Files.walkFileTree(tempConfParentDir.toPath, new SimpleFileVisitor[Path]() {
|
||||
override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = {
|
||||
Files.deleteIfExists(file)
|
||||
FileVisitResult.CONTINUE
|
||||
}
|
||||
})
|
||||
|
||||
tempConfParentDir.listFiles.foreach(_.delete())
|
||||
tempConfParentDir.deleteOnExit()
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -16,18 +16,18 @@
|
|||
|
||||
package fr.acinq.eclair
|
||||
|
||||
import java.sql.DriverManager
|
||||
import java.sql.{Connection, DriverManager}
|
||||
|
||||
import fr.acinq.bitcoin.Crypto.PrivateKey
|
||||
import fr.acinq.bitcoin.{Block, ByteVector32, Script}
|
||||
import fr.acinq.eclair.NodeParams.BITCOIND
|
||||
import fr.acinq.eclair.crypto.LocalKeyManager
|
||||
import fr.acinq.eclair.db._
|
||||
import fr.acinq.eclair.db.sqlite._
|
||||
import fr.acinq.eclair.io.Peer
|
||||
import fr.acinq.eclair.router.RouterConf
|
||||
import fr.acinq.eclair.wire.{Color, NodeAddress}
|
||||
import scodec.bits.ByteVector
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
/**
|
||||
|
@ -38,12 +38,15 @@ object TestConstants {
|
|||
val pushMsat = 200000000L
|
||||
val feeratePerKw = 10000L
|
||||
|
||||
def sqliteInMemory() = DriverManager.getConnection("jdbc:sqlite::memory:")
|
||||
|
||||
def inMemoryDb(connection: Connection = sqliteInMemory()): Databases = Databases.databaseByConnections(connection, connection, connection)
|
||||
|
||||
|
||||
object Alice {
|
||||
val seed = ByteVector32(ByteVector.fill(32)(1))
|
||||
val keyManager = new LocalKeyManager(seed, Block.RegtestGenesisBlock.hash)
|
||||
|
||||
def sqlite = DriverManager.getConnection("jdbc:sqlite::memory:")
|
||||
|
||||
// This is a function, and not a val! When called will return a new NodeParams
|
||||
def nodeParams = NodeParams(
|
||||
keyManager = keyManager,
|
||||
|
@ -66,12 +69,7 @@ object TestConstants {
|
|||
feeProportionalMillionth = 10,
|
||||
reserveToFundingRatio = 0.01, // note: not used (overridden below)
|
||||
maxReserveToFundingRatio = 0.05,
|
||||
channelsDb = new SqliteChannelsDb(sqlite),
|
||||
peersDb = new SqlitePeersDb(sqlite),
|
||||
networkDb = new SqliteNetworkDb(sqlite),
|
||||
pendingRelayDb = new SqlitePendingRelayDb(sqlite),
|
||||
paymentsDb = new SqlitePaymentsDb(sqlite),
|
||||
auditDb = new SqliteAuditDb(sqlite),
|
||||
db = inMemoryDb(sqliteInMemory),
|
||||
revocationTimeout = 20 seconds,
|
||||
pingInterval = 30 seconds,
|
||||
pingTimeout = 10 seconds,
|
||||
|
@ -114,8 +112,6 @@ object TestConstants {
|
|||
val seed = ByteVector32(ByteVector.fill(32)(2))
|
||||
val keyManager = new LocalKeyManager(seed, Block.RegtestGenesisBlock.hash)
|
||||
|
||||
def sqlite = DriverManager.getConnection("jdbc:sqlite::memory:")
|
||||
|
||||
def nodeParams = NodeParams(
|
||||
keyManager = keyManager,
|
||||
alias = "bob",
|
||||
|
@ -137,12 +133,7 @@ object TestConstants {
|
|||
feeProportionalMillionth = 10,
|
||||
reserveToFundingRatio = 0.01, // note: not used (overridden below)
|
||||
maxReserveToFundingRatio = 0.05,
|
||||
channelsDb = new SqliteChannelsDb(sqlite),
|
||||
peersDb = new SqlitePeersDb(sqlite),
|
||||
networkDb = new SqliteNetworkDb(sqlite),
|
||||
pendingRelayDb = new SqlitePendingRelayDb(sqlite),
|
||||
paymentsDb = new SqlitePaymentsDb(sqlite),
|
||||
auditDb = new SqliteAuditDb(sqlite),
|
||||
db = inMemoryDb(sqliteInMemory),
|
||||
revocationTimeout = 20 seconds,
|
||||
pingInterval = 30 seconds,
|
||||
pingTimeout = 10 seconds,
|
||||
|
|
|
@ -27,6 +27,8 @@ import scala.util.Try
|
|||
*/
|
||||
class TestWallet extends EclairWallet {
|
||||
|
||||
var rolledback = Set.empty[Transaction]
|
||||
|
||||
override def getBalance: Future[Satoshi] = ???
|
||||
|
||||
override def getFinalAddress: Future[String] = Future.successful("2MsRZ1asG6k94m6GYUufDGaZJMoJ4EV5JKs")
|
||||
|
@ -36,7 +38,10 @@ class TestWallet extends EclairWallet {
|
|||
|
||||
override def commit(tx: Transaction): Future[Boolean] = Future.successful(true)
|
||||
|
||||
override def rollback(tx: Transaction): Future[Boolean] = Future.successful(true)
|
||||
override def rollback(tx: Transaction): Future[Boolean] = {
|
||||
rolledback = rolledback + tx
|
||||
Future.successful(true)
|
||||
}
|
||||
|
||||
override def doubleSpent(tx: Transaction): Future[Boolean] = Future.successful(false)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
package fr.acinq.eclair.blockchain.bitcoind
|
||||
|
||||
import fr.acinq.bitcoin.OutPoint
|
||||
import fr.acinq.eclair.blockchain.{Watch, WatchConfirmed, WatchSpent, WatchSpentBasic}
|
||||
import fr.acinq.eclair.channel.BITCOIN_FUNDING_SPENT
|
||||
import fr.acinq.eclair.randomBytes32
|
||||
import org.scalatest.FunSuite
|
||||
|
||||
class ZmqWatcherSpec extends FunSuite {
|
||||
|
||||
test("add/remove watches from/to utxo map") {
|
||||
import ZmqWatcher._
|
||||
|
||||
val m0 = Map.empty[OutPoint, Set[Watch]]
|
||||
|
||||
val txid = randomBytes32
|
||||
val outputIndex = 42
|
||||
|
||||
val utxo = OutPoint(txid.reverse, outputIndex)
|
||||
|
||||
val w1 = WatchSpent(null, txid, outputIndex, randomBytes32, BITCOIN_FUNDING_SPENT)
|
||||
val w2 = WatchSpent(null, txid, outputIndex, randomBytes32, BITCOIN_FUNDING_SPENT)
|
||||
val w3 = WatchSpentBasic(null, txid, outputIndex, randomBytes32, BITCOIN_FUNDING_SPENT)
|
||||
val w4 = WatchSpentBasic(null, randomBytes32, 5, randomBytes32, BITCOIN_FUNDING_SPENT)
|
||||
val w5 = WatchConfirmed(null, txid, randomBytes32, 3, BITCOIN_FUNDING_SPENT)
|
||||
|
||||
// we test as if the collection was immutable
|
||||
val m1 = addWatchedUtxos(m0, w1)
|
||||
assert(m1.keySet == Set(utxo) && m1.size == 1)
|
||||
val m2 = addWatchedUtxos(m1, w2)
|
||||
assert(m2.keySet == Set(utxo) && m2(utxo).size == 2)
|
||||
val m3 = addWatchedUtxos(m2, w3)
|
||||
assert(m3.keySet == Set(utxo) && m3(utxo).size == 3)
|
||||
val m4 = addWatchedUtxos(m3, w4)
|
||||
assert(m4.keySet == Set(utxo, OutPoint(w4.txId.reverse, w4.outputIndex)) && m3(utxo).size == 3)
|
||||
val m5 = addWatchedUtxos(m4, w5)
|
||||
assert(m5.keySet == Set(utxo, OutPoint(w4.txId.reverse, w4.outputIndex)) && m5(utxo).size == 3)
|
||||
val m6 = removeWatchedUtxos(m5, w3)
|
||||
assert(m6.keySet == Set(utxo, OutPoint(w4.txId.reverse, w4.outputIndex)) && m6(utxo).size == 2)
|
||||
val m7 = removeWatchedUtxos(m6, w3)
|
||||
assert(m7.keySet == Set(utxo, OutPoint(w4.txId.reverse, w4.outputIndex)) && m7(utxo).size == 2)
|
||||
val m8 = removeWatchedUtxos(m7, w2)
|
||||
assert(m8.keySet == Set(utxo, OutPoint(w4.txId.reverse, w4.outputIndex)) && m8(utxo).size == 1)
|
||||
val m9 = removeWatchedUtxos(m8, w1)
|
||||
assert(m9.keySet == Set(OutPoint(w4.txId.reverse, w4.outputIndex)))
|
||||
val m10 = removeWatchedUtxos(m9, w4)
|
||||
assert(m10.isEmpty)
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -20,6 +20,7 @@ import java.net.InetSocketAddress
|
|||
|
||||
import akka.actor.{ActorRef, ActorSystem, Props}
|
||||
import akka.testkit.{TestKit, TestProbe}
|
||||
import akka.util.Timeout
|
||||
import fr.acinq.bitcoin.{ByteVector32, Crypto, Transaction}
|
||||
import fr.acinq.eclair.blockchain.electrum.ElectrumClient._
|
||||
import grizzled.slf4j.Logging
|
||||
|
@ -27,6 +28,7 @@ import org.scalatest.{BeforeAndAfterAll, FunSuiteLike}
|
|||
import scodec.bits._
|
||||
|
||||
import scala.concurrent.duration._
|
||||
import scala.util.Random
|
||||
|
||||
|
||||
class ElectrumClientPoolSpec extends TestKit(ActorSystem("test")) with FunSuiteLike with Logging with BeforeAndAfterAll {
|
||||
|
@ -35,17 +37,35 @@ class ElectrumClientPoolSpec extends TestKit(ActorSystem("test")) with FunSuiteL
|
|||
// this is tx #2690 of block #500000
|
||||
val referenceTx = Transaction.read("0200000001983c5b32ced1de5ae97d3ce9b7436f8bb0487d15bf81e5cae97b1e238dc395c6000000006a47304402205957c75766e391350eba2c7b752f0056cb34b353648ecd0992a8a81fc9bcfe980220629c286592842d152cdde71177cd83086619744a533f262473298cacf60193500121021b8b51f74dbf0ac1e766d162c8707b5e8d89fc59da0796f3b4505e7c0fb4cf31feffffff0276bd0101000000001976a914219de672ba773aa0bc2e15cdd9d2e69b734138fa88ac3e692001000000001976a914301706dede031e9fb4b60836e073a4761855f6b188ac09a10700")
|
||||
val scriptHash = Crypto.sha256(referenceTx.txOut(0).publicKeyScript).reverse
|
||||
import scala.concurrent.ExecutionContext.Implicits.global
|
||||
val serverAddresses = {
|
||||
val stream = classOf[ElectrumClientSpec].getResourceAsStream("/electrum/servers_mainnet.json")
|
||||
val addresses = ElectrumClientPool.readServerAddresses(stream, sslEnabled = false)
|
||||
stream.close()
|
||||
addresses
|
||||
}
|
||||
|
||||
implicit val timeout = 20 seconds
|
||||
|
||||
import concurrent.ExecutionContext.Implicits.global
|
||||
|
||||
override protected def afterAll(): Unit = {
|
||||
TestKit.shutdownActorSystem(system)
|
||||
}
|
||||
|
||||
test("pick a random, unused server address") {
|
||||
val usedAddresses = Random.shuffle(serverAddresses.toSeq).take(serverAddresses.size / 2).map(_.adress).toSet
|
||||
for(_ <- 1 to 10) {
|
||||
val Some(pick) = ElectrumClientPool.pickAddress(serverAddresses, usedAddresses)
|
||||
assert(!usedAddresses.contains(pick.adress))
|
||||
}
|
||||
}
|
||||
|
||||
test("init an electrumx connection pool") {
|
||||
val random = new Random()
|
||||
val stream = classOf[ElectrumClientSpec].getResourceAsStream("/electrum/servers_mainnet.json")
|
||||
val addresses = ElectrumClientPool.readServerAddresses(stream, sslEnabled = false).take(2) + ElectrumClientPool.ElectrumServerAddress(new InetSocketAddress("electrum.acinq.co", 50002), SSL.STRICT)
|
||||
assert(addresses.nonEmpty)
|
||||
val addresses = random.shuffle(serverAddresses.toSeq).take(2).toSet + ElectrumClientPool.ElectrumServerAddress(new InetSocketAddress("electrum.acinq.co", 50002), SSL.STRICT)
|
||||
stream.close()
|
||||
assert(addresses.nonEmpty)
|
||||
pool = system.actorOf(Props(new ElectrumClientPool(addresses)), "electrum-client")
|
||||
}
|
||||
|
||||
|
@ -54,19 +74,19 @@ class ElectrumClientPoolSpec extends TestKit(ActorSystem("test")) with FunSuiteL
|
|||
// make sure our master is stable, if the first master that we select is behind the other servers we will switch
|
||||
// during the first few seconds
|
||||
awaitCond({
|
||||
probe.expectMsgType[ElectrumReady]
|
||||
probe.expectMsgType[ElectrumReady](30 seconds)
|
||||
probe.receiveOne(5 seconds) == null
|
||||
}, max = 15 seconds, interval = 1000 millis) }
|
||||
}, max = 60 seconds, interval = 1000 millis) }
|
||||
|
||||
test("get transaction") {
|
||||
probe.send(pool, GetTransaction(referenceTx.txid))
|
||||
val GetTransactionResponse(tx) = probe.expectMsgType[GetTransactionResponse]
|
||||
val GetTransactionResponse(tx) = probe.expectMsgType[GetTransactionResponse](timeout)
|
||||
assert(tx == referenceTx)
|
||||
}
|
||||
|
||||
test("get merkle tree") {
|
||||
probe.send(pool, GetMerkle(referenceTx.txid, 500000))
|
||||
val response = probe.expectMsgType[GetMerkleResponse]
|
||||
val response = probe.expectMsgType[GetMerkleResponse](timeout)
|
||||
assert(response.txid == referenceTx.txid)
|
||||
assert(response.block_height == 500000)
|
||||
assert(response.pos == 2690)
|
||||
|
@ -76,26 +96,26 @@ class ElectrumClientPoolSpec extends TestKit(ActorSystem("test")) with FunSuiteL
|
|||
test("header subscription") {
|
||||
val probe1 = TestProbe()
|
||||
probe1.send(pool, HeaderSubscription(probe1.ref))
|
||||
val HeaderSubscriptionResponse(_, header) = probe1.expectMsgType[HeaderSubscriptionResponse]
|
||||
val HeaderSubscriptionResponse(_, header) = probe1.expectMsgType[HeaderSubscriptionResponse](timeout)
|
||||
logger.info(s"received header for block ${header.blockId}")
|
||||
}
|
||||
|
||||
test("scripthash subscription") {
|
||||
val probe1 = TestProbe()
|
||||
probe1.send(pool, ScriptHashSubscription(scriptHash, probe1.ref))
|
||||
val ScriptHashSubscriptionResponse(scriptHash1, status) = probe1.expectMsgType[ScriptHashSubscriptionResponse]
|
||||
val ScriptHashSubscriptionResponse(scriptHash1, status) = probe1.expectMsgType[ScriptHashSubscriptionResponse](timeout)
|
||||
assert(status != "")
|
||||
}
|
||||
|
||||
test("get scripthash history") {
|
||||
probe.send(pool, GetScriptHashHistory(scriptHash))
|
||||
val GetScriptHashHistoryResponse(scriptHash1, history) = probe.expectMsgType[GetScriptHashHistoryResponse]
|
||||
val GetScriptHashHistoryResponse(scriptHash1, history) = probe.expectMsgType[GetScriptHashHistoryResponse](timeout)
|
||||
assert(history.contains((TransactionHistoryItem(500000, referenceTx.txid))))
|
||||
}
|
||||
|
||||
test("list script unspents") {
|
||||
probe.send(pool, ScriptHashListUnspent(scriptHash))
|
||||
val ScriptHashListUnspentResponse(scriptHash1, unspents) = probe.expectMsgType[ScriptHashListUnspentResponse]
|
||||
val ScriptHashListUnspentResponse(scriptHash1, unspents) = probe.expectMsgType[ScriptHashListUnspentResponse](timeout)
|
||||
assert(unspents.isEmpty)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,4 +85,13 @@ class WaitForFundingSignedStateSpec extends TestkitBaseClass with StateTestsHelp
|
|||
awaitCond(alice.stateName == CLOSED)
|
||||
}
|
||||
|
||||
test("recv INPUT_DISCONNECTED") { f =>
|
||||
import f._
|
||||
val fundingTx = alice.stateData.asInstanceOf[DATA_WAIT_FOR_FUNDING_SIGNED].fundingTx
|
||||
assert(alice.underlyingActor.wallet.asInstanceOf[TestWallet].rolledback.isEmpty)
|
||||
alice ! INPUT_DISCONNECTED
|
||||
awaitCond(alice.stateName == CLOSED)
|
||||
assert(alice.underlyingActor.wallet.asInstanceOf[TestWallet].rolledback.contains(fundingTx))
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -484,11 +484,11 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
// depending on who starts signing first, there will be one or two commitments because both sides have changes
|
||||
assert(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.localCommit.index === 1)
|
||||
assert(bob.stateData.asInstanceOf[DATA_NORMAL].commitments.localCommit.index === 2)
|
||||
assert(alice.underlyingActor.nodeParams.channelsDb.listHtlcInfos(alice.stateData.asInstanceOf[DATA_NORMAL].channelId, 0).size == 0)
|
||||
assert(alice.underlyingActor.nodeParams.channelsDb.listHtlcInfos(alice.stateData.asInstanceOf[DATA_NORMAL].channelId, 1).size == 2)
|
||||
assert(alice.underlyingActor.nodeParams.channelsDb.listHtlcInfos(alice.stateData.asInstanceOf[DATA_NORMAL].channelId, 2).size == 4)
|
||||
assert(bob.underlyingActor.nodeParams.channelsDb.listHtlcInfos(bob.stateData.asInstanceOf[DATA_NORMAL].channelId, 0).size == 0)
|
||||
assert(bob.underlyingActor.nodeParams.channelsDb.listHtlcInfos(bob.stateData.asInstanceOf[DATA_NORMAL].channelId, 1).size == 3)
|
||||
assert(alice.underlyingActor.nodeParams.db.channels.listHtlcInfos(alice.stateData.asInstanceOf[DATA_NORMAL].channelId, 0).size == 0)
|
||||
assert(alice.underlyingActor.nodeParams.db.channels.listHtlcInfos(alice.stateData.asInstanceOf[DATA_NORMAL].channelId, 1).size == 2)
|
||||
assert(alice.underlyingActor.nodeParams.db.channels.listHtlcInfos(alice.stateData.asInstanceOf[DATA_NORMAL].channelId, 2).size == 4)
|
||||
assert(bob.underlyingActor.nodeParams.db.channels.listHtlcInfos(bob.stateData.asInstanceOf[DATA_NORMAL].channelId, 0).size == 0)
|
||||
assert(bob.underlyingActor.nodeParams.db.channels.listHtlcInfos(bob.stateData.asInstanceOf[DATA_NORMAL].channelId, 1).size == 3)
|
||||
}
|
||||
|
||||
test("recv CMD_SIGN (htlcs with same pubkeyScript but different amounts)") { f =>
|
||||
|
|
|
@ -50,10 +50,10 @@ class SqliteChannelsDbSpec extends FunSuite {
|
|||
|
||||
intercept[SQLiteException](db.addOrUpdateHtlcInfo(channel.channelId, commitNumber, paymentHash1, cltvExpiry1)) // no related channel
|
||||
|
||||
assert(db.listChannels().toSet === Set.empty)
|
||||
assert(db.listLocalChannels().toSet === Set.empty)
|
||||
db.addOrUpdateChannel(channel)
|
||||
db.addOrUpdateChannel(channel)
|
||||
assert(db.listChannels() === List(channel))
|
||||
assert(db.listLocalChannels() === List(channel))
|
||||
|
||||
assert(db.listHtlcInfos(channel.channelId, commitNumber).toList == Nil)
|
||||
db.addOrUpdateHtlcInfo(channel.channelId, commitNumber, paymentHash1, cltvExpiry1)
|
||||
|
@ -62,7 +62,7 @@ class SqliteChannelsDbSpec extends FunSuite {
|
|||
assert(db.listHtlcInfos(channel.channelId, 43).toList == Nil)
|
||||
|
||||
db.removeChannel(channel.channelId)
|
||||
assert(db.listChannels() === Nil)
|
||||
assert(db.listLocalChannels() === Nil)
|
||||
assert(db.listHtlcInfos(channel.channelId, commitNumber).toList == Nil)
|
||||
}
|
||||
|
||||
|
|
|
@ -19,17 +19,21 @@ package fr.acinq.eclair.payment
|
|||
import akka.actor.FSM.{CurrentState, SubscribeTransitionCallBack, Transition}
|
||||
import akka.actor.Status
|
||||
import akka.testkit.{TestFSMRef, TestProbe}
|
||||
import fr.acinq.bitcoin.{Block, ByteVector32, MilliSatoshi}
|
||||
import fr.acinq.bitcoin.Script.{pay2wsh, write}
|
||||
import fr.acinq.bitcoin.{Block, ByteVector32, MilliSatoshi, Satoshi, Transaction, TxOut}
|
||||
import fr.acinq.eclair.blockchain.{UtxoStatus, ValidateRequest, ValidateResult, WatchSpentBasic}
|
||||
import fr.acinq.eclair.blockchain.WatchEventSpentBasic
|
||||
import fr.acinq.eclair.channel.Register.ForwardShortId
|
||||
import fr.acinq.eclair.channel.{AddHtlcFailed, BITCOIN_FUNDING_EXTERNAL_CHANNEL_SPENT, ChannelUnavailable}
|
||||
import fr.acinq.eclair.crypto.Sphinx
|
||||
import fr.acinq.eclair.crypto.Sphinx.ErrorPacket
|
||||
import fr.acinq.eclair.io.Peer.PeerRoutingMessage
|
||||
import fr.acinq.eclair.payment.PaymentLifecycle._
|
||||
import fr.acinq.eclair.router.Announcements.makeChannelUpdate
|
||||
import fr.acinq.eclair.router.Announcements.{makeChannelUpdate, makeNodeAnnouncement}
|
||||
import fr.acinq.eclair.router._
|
||||
import fr.acinq.eclair.transactions.Scripts
|
||||
import fr.acinq.eclair.wire._
|
||||
import fr.acinq.eclair.{Globals, randomBytes32}
|
||||
import fr.acinq.eclair.{Globals, ShortChannelId, randomBytes32}
|
||||
|
||||
/**
|
||||
* Created by PM on 29/08/2016.
|
||||
|
@ -322,6 +326,57 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
|
|||
assert(fee === MilliSatoshi(paymentOK.amountMsat - request.amountMsat))
|
||||
}
|
||||
|
||||
test("payment succeeded to a channel with fees=0") { fixture =>
|
||||
import fixture._
|
||||
import fr.acinq.eclair.randomKey
|
||||
|
||||
// the network will be a --(1)--> b ---(2)--> c --(3)--> d and e --(4)--> f (we are a) and b -> g has fees=0
|
||||
// \
|
||||
// \--(5)--> g
|
||||
|
||||
val (priv_g, priv_funding_g) = (randomKey, randomKey)
|
||||
val (g, funding_g) = (priv_g.publicKey, priv_funding_g.publicKey)
|
||||
val ann_g = makeNodeAnnouncement(priv_g, "node-G", Color(-30, 10, -50), Nil)
|
||||
val channelId_bg = ShortChannelId(420000, 5, 0)
|
||||
val chan_bg = channelAnnouncement(channelId_bg, priv_b, priv_g, priv_funding_b, priv_funding_g)
|
||||
val channelUpdate_bg = makeChannelUpdate(Block.RegtestGenesisBlock.hash, priv_b, g, channelId_bg, cltvExpiryDelta = 9, htlcMinimumMsat = 0, feeBaseMsat = 0, feeProportionalMillionths = 0, htlcMaximumMsat = 500000000L)
|
||||
val channelUpdate_gb = makeChannelUpdate(Block.RegtestGenesisBlock.hash, priv_g, b, channelId_bg, cltvExpiryDelta = 9, htlcMinimumMsat = 0, feeBaseMsat = 10, feeProportionalMillionths = 8, htlcMaximumMsat = 500000000L)
|
||||
assert(Router.getDesc(channelUpdate_bg, chan_bg) === ChannelDesc(chan_bg.shortChannelId, priv_b.publicKey, priv_g.publicKey))
|
||||
router ! PeerRoutingMessage(null, remoteNodeId, chan_bg)
|
||||
router ! PeerRoutingMessage(null, remoteNodeId, ann_g)
|
||||
router ! PeerRoutingMessage(null, remoteNodeId, channelUpdate_bg)
|
||||
router ! PeerRoutingMessage(null, remoteNodeId, channelUpdate_gb)
|
||||
|
||||
// actual test begins
|
||||
val paymentFSM = system.actorOf(PaymentLifecycle.props(a, router, TestProbe().ref))
|
||||
val monitor = TestProbe()
|
||||
val sender = TestProbe()
|
||||
val eventListener = TestProbe()
|
||||
system.eventStream.subscribe(eventListener.ref, classOf[PaymentEvent])
|
||||
|
||||
paymentFSM ! SubscribeTransitionCallBack(monitor.ref)
|
||||
val CurrentState(_, WAITING_FOR_REQUEST) = monitor.expectMsgClass(classOf[CurrentState[_]])
|
||||
|
||||
// we send a payment to G which is just after the
|
||||
val request = SendPayment(defaultAmountMsat, defaultPaymentHash, g)
|
||||
sender.send(paymentFSM, request)
|
||||
|
||||
// the route will be A -> B -> G where B -> G has a channel_update with fees=0
|
||||
val Transition(_, WAITING_FOR_REQUEST, WAITING_FOR_ROUTE) = monitor.expectMsgClass(classOf[Transition[_]])
|
||||
val Transition(_, WAITING_FOR_ROUTE, WAITING_FOR_PAYMENT_COMPLETE) = monitor.expectMsgClass(classOf[Transition[_]])
|
||||
|
||||
sender.send(paymentFSM, UpdateFulfillHtlc(ByteVector32.Zeroes, 0, defaultPaymentHash))
|
||||
|
||||
val paymentOK = sender.expectMsgType[PaymentSucceeded]
|
||||
val PaymentSent(MilliSatoshi(request.amountMsat), fee, request.paymentHash, paymentOK.paymentPreimage, _, _) = eventListener.expectMsgType[PaymentSent]
|
||||
|
||||
// during the route computation the fees were treated as if they were 1msat but when sending the onion we actually put zero
|
||||
// NB: A -> B doesn't pay fees because it's our direct neighbor
|
||||
// NB: B -> G doesn't asks for fees at all
|
||||
assert(fee === MilliSatoshi(0))
|
||||
assert(fee === MilliSatoshi(paymentOK.amountMsat - request.amountMsat))
|
||||
}
|
||||
|
||||
test("filter errors properly") { fixture =>
|
||||
val failures = LocalFailure(RouteNotFound) :: RemoteFailure(Hop(a, b, channelUpdate_ab) :: Nil, ErrorPacket(a, TemporaryNodeFailure)) :: LocalFailure(AddHtlcFailed(ByteVector32.Zeroes, ByteVector32.Zeroes, ChannelUnavailable(ByteVector32.Zeroes), Local(None), None, None)) :: LocalFailure(RouteNotFound) :: Nil
|
||||
val filtered = PaymentLifecycle.transformForUser(failures)
|
||||
|
|
|
@ -57,11 +57,11 @@ class PruningSpec extends TestkitBaseClass with BeforeAndAfterAll {
|
|||
val routingInfoA = fakeRoutingInfo
|
||||
routingInfoA.map {
|
||||
case (a, u1, u2, n1, n2) =>
|
||||
paramsA.networkDb.addChannel(a, txid, Satoshi(100000))
|
||||
paramsA.networkDb.addChannelUpdate(u1)
|
||||
paramsA.networkDb.addChannelUpdate(u2)
|
||||
paramsA.networkDb.addNode(n1)
|
||||
paramsA.networkDb.addNode(n2)
|
||||
paramsA.db.network.addChannel(a, txid, Satoshi(100000))
|
||||
paramsA.db.network.addChannelUpdate(u1)
|
||||
paramsA.db.network.addChannelUpdate(u2)
|
||||
paramsA.db.network.addNode(n1)
|
||||
paramsA.db.network.addNode(n2)
|
||||
}
|
||||
val probe = TestProbe()
|
||||
val switchboard = system.actorOf(Props(new Actor {
|
||||
|
|
|
@ -590,6 +590,24 @@ class RouteCalculationSpec extends FunSuite {
|
|||
assert(route1.map(hops2Ids) === Success(1 :: 2 :: 4 :: 5 :: Nil))
|
||||
}
|
||||
|
||||
test("ensure the route calculation terminates correctly when selecting 0-fees edges") {
|
||||
|
||||
// the graph contains a possible 0-cost path that goes back on its steps ( e -> f, f -> e )
|
||||
val updates = List(
|
||||
makeUpdate(1L, a, b, 10, 10), // a -> b
|
||||
makeUpdate(2L, b, c, 10, 10),
|
||||
makeUpdate(4L, c, d, 10, 10),
|
||||
makeUpdate(3L, b, e, 0, 0), // b -> e
|
||||
makeUpdate(6L, e, f, 0, 0), // e -> f
|
||||
makeUpdate(6L, f, e, 0, 0), // e <- f
|
||||
makeUpdate(5L, e, d, 0, 0) // e -> d
|
||||
).toMap
|
||||
|
||||
val g = makeGraph(updates)
|
||||
|
||||
val route1 = Router.findRoute(g, a, d, DEFAULT_AMOUNT_MSAT, numRoutes = 1, routeParams = DEFAULT_ROUTE_PARAMS)
|
||||
assert(route1.map(hops2Ids) === Success(1 :: 3 :: 5 :: Nil))
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
<parent>
|
||||
<groupId>fr.acinq.eclair</groupId>
|
||||
<artifactId>eclair_2.11</artifactId>
|
||||
<version>0.2-android-SNAPSHOT</version>
|
||||
<version>0.3-android-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>eclair-node_2.11</artifactId>
|
||||
|
|
3
pom.xml
3
pom.xml
|
@ -20,7 +20,7 @@
|
|||
|
||||
<groupId>fr.acinq.eclair</groupId>
|
||||
<artifactId>eclair_2.11</artifactId>
|
||||
<version>0.2-android-SNAPSHOT</version>
|
||||
<version>0.3-android-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<modules>
|
||||
|
@ -215,6 +215,7 @@
|
|||
<buildDirectory>${project.build.directory}</buildDirectory>
|
||||
</systemProperties>
|
||||
<argLine>-Xmx1024m</argLine>
|
||||
<argLine>-Dfile.encoding=UTF-8</argLine>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
|
|
Loading…
Add table
Reference in a new issue