mirror of
https://github.com/ACINQ/eclair.git
synced 2025-03-13 11:35:47 +01:00
Merge branch 'master' into wip-android
This commit is contained in:
commit
2f43e3bff9
82 changed files with 1263 additions and 1403 deletions
2
LICENSE
2
LICENSE
|
@ -186,7 +186,7 @@ Apache License
|
|||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyrigh 2014 ACINQ SAS
|
||||
Copyright 2014 ACINQ SAS
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
30
README.md
30
README.md
|
@ -27,7 +27,7 @@ Please see the latest [release note](https://github.com/ACINQ/eclair/releases) f
|
|||
|
||||
## Installation
|
||||
|
||||
:warning: **Those are valid for the most up-to-date, unreleased, version of eclair. Here are the [instructions for Eclair 0.2-alpha8](https://github.com/ACINQ/eclair/blob/v0.2-alpha8/README.md#installation)**.
|
||||
:warning: **Those are valid for the most up-to-date, unreleased, version of eclair. Here are the [instructions for Eclair 0.2-alpha10](https://github.com/ACINQ/eclair/blob/v0.2-alpha10/README.md#installation)**.
|
||||
|
||||
### Configuring Bitcoin Core
|
||||
|
||||
|
@ -37,17 +37,19 @@ Run bitcoind with the following minimal `bitcoin.conf`:
|
|||
```
|
||||
testnet=1
|
||||
server=1
|
||||
rpcuser=XXX
|
||||
rpcpassword=XXX
|
||||
rpcuser=foo
|
||||
rpcpassword=bar
|
||||
txindex=1
|
||||
zmqpubrawblock=tcp://127.0.0.1:29000
|
||||
zmqpubrawtx=tcp://127.0.0.1:29000
|
||||
```
|
||||
|
||||
Eclair will use any BTC it finds in the Bitcoin Core wallet to fund any channels you choose to open. Eclair will return BTC from closed channels to this wallet.
|
||||
|
||||
On **__testnet__**, you also need to make sure that all your UTXOs are `p2sh-of-p2wpkh`.
|
||||
To do this, use the debug console, create a new address with `getnewaddress`, import it as a witness address with `addwitnessaddress`, and
|
||||
send all your balance to this witness address.
|
||||
If you need to create and send funds manually, don't forget to create and specify a witness address for the change output (this option is avaliable on the GUI once you set the `Enable coin control features` wallet option).
|
||||
If you need to create and send funds manually, don't forget to create and specify a witness address for the change output (this option is available on the GUI once you set the `Enable coin control features` wallet option).
|
||||
|
||||
|
||||
### Installing Eclair
|
||||
|
@ -90,16 +92,16 @@ eclair.node-color=49daaa
|
|||
|
||||
Here are some of the most common options:
|
||||
|
||||
name | description | default value
|
||||
-----------------------------|---------------------------|--------------
|
||||
eclair.server.port | Lightning TCP port | 9735
|
||||
eclair.api.enabled | Enable/disable the API | false. By default the API is disabled. If you want to enable it, you must set a user/password.
|
||||
eclair.api.port | API HTTP port | 8080
|
||||
eclair.api.user | API user (BASIC) | "" (must be set if the API is enabled)
|
||||
eclair.api.password | API password (BASIC) | "" (must be set if the API is enabled)
|
||||
eclair.bitcoind.rpcuser | Bitcoin Core RPC user | foo
|
||||
eclair.bitcoind.rpcpassword | Bitcoin Core RPC password | bar
|
||||
eclair.bitcoind.zmq | Bitcoin Core ZMQ address | tcp://127.0.0.1:29000
|
||||
name | description | default value
|
||||
-----------------------------|---------------------------------------------------------------------------------------|--------------
|
||||
eclair.server.port | Lightning TCP port | 9735
|
||||
eclair.api.enabled | Enable/disable the API | false. By default the API is disabled. If you want to enable it, you must set a password.
|
||||
eclair.api.port | API HTTP port | 8080
|
||||
eclair.api.password | API password (BASIC) | "" (must be set if the API is enabled)
|
||||
eclair.bitcoind.rpcuser | Bitcoin Core RPC user | foo
|
||||
eclair.bitcoind.rpcpassword | Bitcoin Core RPC password | bar
|
||||
eclair.bitcoind.zmq | Bitcoin Core ZMQ address | "tcp://127.0.0.1:29000"
|
||||
eclair.gui.unit | Unit in which amounts are displayed (possible values: msat, sat, mbtc, btc) | btc
|
||||
|
||||
Quotes are not required unless the value contains special characters. Full syntax guide [here](https://github.com/lightbend/config/blob/master/HOCON.md).
|
||||
|
||||
|
|
0
eclair-core/eclair-cli
Normal file → Executable file
0
eclair-core/eclair-cli
Normal file → Executable file
|
@ -54,7 +54,7 @@ eclair {
|
|||
default-feerate-per-kb = 20000 // default bitcoin core value
|
||||
|
||||
max-htlc-value-in-flight-msat = 100000000000 // 1 BTC ~= unlimited
|
||||
htlc-minimum-msat = 10000
|
||||
htlc-minimum-msat = 1
|
||||
max-accepted-htlcs = 30
|
||||
|
||||
reserve-to-funding-ratio = 0.01 // recommended by BOLT #2
|
||||
|
@ -65,7 +65,7 @@ eclair {
|
|||
expiry-delta-blocks = 144
|
||||
|
||||
fee-base-msat = 10000
|
||||
fee-proportional-millionths = 100 // fee charged per transferred satoshi in millionths of a satoshi (100 = 0.1%)
|
||||
fee-proportional-millionths = 100 // fee charged per transferred satoshi in millionths of a satoshi (100 = 0.01%)
|
||||
|
||||
// maximum local vs remote feerate mismatch; 1.0 means 100%
|
||||
// actual check is abs((local feerate - remote fee rate) / (local fee rate + remote fee rate)/2) > fee rate mismatch
|
||||
|
@ -83,4 +83,6 @@ eclair {
|
|||
auto-reconnect = true
|
||||
|
||||
payment-handler = "local"
|
||||
payment-request-expiry = 1 hour // default expiry for payment requests generated by this node
|
||||
max-pending-payment-requests = 10000000
|
||||
}
|
|
@ -16,6 +16,15 @@ object DBCompatChecker extends Logging {
|
|||
case Success(_) => {}
|
||||
case Failure(_) => throw IncompatibleDBException
|
||||
}
|
||||
}
|
||||
|
||||
case object IncompatibleDBException extends RuntimeException("DB files are not compatible with this version of eclair.")
|
||||
/**
|
||||
* Tests if the network database is readable.
|
||||
*
|
||||
* @param nodeParams
|
||||
*/
|
||||
def checkNetworkDBCompatibility(nodeParams: NodeParams): Unit =
|
||||
Try(nodeParams.networkDb.listChannels(), nodeParams.networkDb.listNodes(), nodeParams.networkDb.listChannelUpdates()) match {
|
||||
case Success(_) => {}
|
||||
case Failure(_) => throw IncompatibleNetworkDBException
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ case class NodeParams(extendedPrivateKey: ExtendedPrivateKey,
|
|||
channelsDb: ChannelsDb,
|
||||
peersDb: PeersDb,
|
||||
networkDb: NetworkDb,
|
||||
preimagesDb: PreimagesDb,
|
||||
pendingRelayDb: PendingRelayDb,
|
||||
paymentsDb: PaymentsDb,
|
||||
routerBroadcastInterval: FiniteDuration,
|
||||
routerValidateInterval: FiniteDuration,
|
||||
|
@ -55,7 +55,9 @@ case class NodeParams(extendedPrivateKey: ExtendedPrivateKey,
|
|||
chainHash: BinaryData,
|
||||
channelFlags: Byte,
|
||||
channelExcludeDuration: FiniteDuration,
|
||||
watcherType: WatcherType) {
|
||||
watcherType: WatcherType,
|
||||
paymentRequestExpiry: FiniteDuration,
|
||||
maxPendingPaymentRequests: Int) {
|
||||
val nodeId = privateKey.publicKey
|
||||
}
|
||||
|
||||
|
@ -82,17 +84,21 @@ object NodeParams {
|
|||
.withFallback(overrideDefaults)
|
||||
.withFallback(ConfigFactory.load()).getConfig("eclair")
|
||||
|
||||
def makeNodeParams(datadir: File, config: Config): NodeParams = {
|
||||
def makeNodeParams(datadir: File, config: Config, seed_opt: Option[BinaryData] = None): NodeParams = {
|
||||
|
||||
datadir.mkdirs()
|
||||
|
||||
val seedPath = new File(datadir, "seed.dat")
|
||||
val seed: BinaryData = seedPath.exists() match {
|
||||
case true => Files.toByteArray(seedPath)
|
||||
case false =>
|
||||
val seed = randomKey.toBin
|
||||
Files.write(seed, seedPath)
|
||||
seed
|
||||
val seed: BinaryData = seed_opt match {
|
||||
case Some(s) => s
|
||||
case None =>
|
||||
val seedPath = new File(datadir, "seed.dat")
|
||||
seedPath.exists() match {
|
||||
case true => Files.toByteArray(seedPath)
|
||||
case false =>
|
||||
val seed = randomKey.toBin
|
||||
Files.write(seed, seedPath)
|
||||
seed
|
||||
}
|
||||
}
|
||||
val master = DeterministicWallet.generate(seed)
|
||||
val extendedPrivateKey = DeterministicWallet.derivePrivateKey(master, DeterministicWallet.hardened(46) :: DeterministicWallet.hardened(0) :: Nil)
|
||||
|
@ -107,10 +113,12 @@ object NodeParams {
|
|||
val sqlite = DriverManager.getConnection(s"jdbc:sqlite:${new File(datadir, "eclair.sqlite")}")
|
||||
val channelsDb = new SqliteChannelsDb(sqlite)
|
||||
val peersDb = new SqlitePeersDb(sqlite)
|
||||
val networkDb = new SqliteNetworkDb(sqlite)
|
||||
val preimagesDb = new SqlitePreimagesDb(sqlite)
|
||||
val pendingRelayDb = new SqlitePendingRelayDb(sqlite)
|
||||
val paymentsDb = new SqlitePaymentsDb(sqlite)
|
||||
|
||||
val sqliteNetwork = DriverManager.getConnection(s"jdbc:sqlite:${new File(datadir, "network.sqlite")}")
|
||||
val networkDb = new SqliteNetworkDb(sqliteNetwork)
|
||||
|
||||
val color = BinaryData(config.getString("node-color"))
|
||||
require(color.size == 3, "color should be a 3-bytes hex buffer")
|
||||
|
||||
|
@ -151,7 +159,7 @@ object NodeParams {
|
|||
channelsDb = channelsDb,
|
||||
peersDb = peersDb,
|
||||
networkDb = networkDb,
|
||||
preimagesDb = preimagesDb,
|
||||
pendingRelayDb = pendingRelayDb,
|
||||
paymentsDb = paymentsDb,
|
||||
routerBroadcastInterval = FiniteDuration(config.getDuration("router-broadcast-interval", TimeUnit.SECONDS), TimeUnit.SECONDS),
|
||||
routerValidateInterval = FiniteDuration(config.getDuration("router-validate-interval", TimeUnit.SECONDS), TimeUnit.SECONDS),
|
||||
|
@ -162,6 +170,8 @@ object NodeParams {
|
|||
chainHash = chainHash,
|
||||
channelFlags = config.getInt("channel-flags").toByte,
|
||||
channelExcludeDuration = FiniteDuration(config.getDuration("channel-exclude-duration", TimeUnit.SECONDS), TimeUnit.SECONDS),
|
||||
watcherType = watcherType)
|
||||
watcherType = watcherType,
|
||||
paymentRequestExpiry = FiniteDuration(config.getDuration("payment-request-expiry", TimeUnit.SECONDS), TimeUnit.SECONDS),
|
||||
maxPendingPaymentRequests = config.getInt("max-pending-payment-requests"))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,4 +23,4 @@ object PortChecker {
|
|||
|
||||
}
|
||||
|
||||
case class TCPBindException(port: Int) extends RuntimeException
|
||||
case class TCPBindException(port: Int) extends RuntimeException(s"could not bind to port $port")
|
|
@ -6,7 +6,7 @@ import java.net.InetSocketAddress
|
|||
import akka.actor.{ActorRef, ActorSystem, Props, SupervisorStrategy}
|
||||
import akka.util.Timeout
|
||||
import com.typesafe.config.{Config, ConfigFactory}
|
||||
import fr.acinq.bitcoin.Block
|
||||
import fr.acinq.bitcoin.{BinaryData, Block}
|
||||
import fr.acinq.eclair.NodeParams.{BITCOINJ, ELECTRUM}
|
||||
import fr.acinq.eclair.blockchain.bitcoinj.{BitcoinjKit, BitcoinjWallet, BitcoinjWatcher}
|
||||
import fr.acinq.eclair.blockchain.electrum.{ElectrumClient, ElectrumEclairWallet, ElectrumWallet, ElectrumWatcher}
|
||||
|
@ -24,15 +24,22 @@ import scala.concurrent.{Await, ExecutionContext, Future}
|
|||
|
||||
|
||||
/**
|
||||
* Setup eclair from a datadir.
|
||||
* <p>
|
||||
* Created by PM on 25/01/2016.
|
||||
*
|
||||
* @param datadir directory where eclair-core will write/read its data
|
||||
* @param overrideDefaults
|
||||
* @param actorSystem
|
||||
* @param seed_opt optional seed, if set eclair will use it instead of generating one and won't create a seed.dat file.
|
||||
*/
|
||||
class Setup(datadir: File, wallet_opt: Option[EclairWallet] = None, overrideDefaults: Config = ConfigFactory.empty(), actorSystem: ActorSystem = ActorSystem()) extends Logging {
|
||||
class Setup(datadir: File, wallet_opt: Option[EclairWallet] = None, overrideDefaults: Config = ConfigFactory.empty(), actorSystem: ActorSystem = ActorSystem(), seed_opt: Option[BinaryData] = None) extends Logging {
|
||||
|
||||
logger.info(s"hello!")
|
||||
logger.info(s"version=${getClass.getPackage.getImplementationVersion} commit=${getClass.getPackage.getSpecificationVersion}")
|
||||
|
||||
val config: Config = NodeParams.loadConfiguration(datadir, overrideDefaults)
|
||||
val nodeParams: NodeParams = NodeParams.makeNodeParams(datadir, config)
|
||||
val nodeParams: NodeParams = NodeParams.makeNodeParams(datadir, config, seed_opt)
|
||||
val chain: String = config.getString("chain")
|
||||
|
||||
logger.info(s"nodeid=${nodeParams.privateKey.publicKey.toBin} alias=${nodeParams.alias}")
|
||||
|
@ -94,10 +101,11 @@ class Setup(datadir: File, wallet_opt: Option[EclairWallet] = None, overrideDefa
|
|||
val wallet = bitcoin match {
|
||||
case _ if wallet_opt.isDefined => wallet_opt.get
|
||||
case Bitcoinj(bitcoinj) => new BitcoinjWallet(bitcoinj.initialized.map(_ => bitcoinj.wallet()))
|
||||
case Electrum(electrumClient) =>
|
||||
val electrumSeedPath = new File(datadir, "electrum_seed.dat")
|
||||
val electrumWallet = system.actorOf(ElectrumWallet.props(electrumSeedPath, electrumClient, ElectrumWallet.WalletParameters(Block.RegtestGenesisBlock.hash, allowSpendUnconfirmed = true)), "electrum-wallet")
|
||||
new ElectrumEclairWallet(electrumWallet)
|
||||
case Electrum(electrumClient) => seed_opt match {
|
||||
case Some(seed) => val electrumWallet = system.actorOf(ElectrumWallet.props(seed, electrumClient, ElectrumWallet.WalletParameters(Block.TestnetGenesisBlock.hash)), "electrum-wallet")
|
||||
new ElectrumEclairWallet(electrumWallet)
|
||||
case _ => throw new RuntimeException("electrum wallet requires a seed to set up")
|
||||
}
|
||||
case _ => ???
|
||||
}
|
||||
|
||||
|
@ -152,4 +160,10 @@ case class Kit(nodeParams: NodeParams,
|
|||
|
||||
|
||||
|
||||
case object EmptyAPIPasswordException extends RuntimeException("must set a user/password for the json-rpc api")
|
||||
case object BitcoinWalletDisabledException extends RuntimeException("bitcoind must have wallet support enabled")
|
||||
|
||||
case object EmptyAPIPasswordException extends RuntimeException("must set a password for the json-rpc api")
|
||||
|
||||
case object IncompatibleDBException extends RuntimeException("database is not compatible with this version of eclair")
|
||||
|
||||
case object IncompatibleNetworkDBException extends RuntimeException("network database is not compatible with this version of eclair")
|
||||
|
|
|
@ -60,8 +60,7 @@ final case class WatchEventLost(event: BitcoinEvent) extends WatchEvent
|
|||
* Publish the provided tx as soon as possible depending on locktime and csv
|
||||
*/
|
||||
final case class PublishAsap(tx: Transaction)
|
||||
final case class ParallelGetRequest(ann: Seq[ChannelAnnouncement])
|
||||
final case class IndividualResult(c: ChannelAnnouncement, tx: Option[Transaction], unspent: Boolean)
|
||||
final case class ParallelGetResponse(r: Seq[IndividualResult])
|
||||
final case class ValidateRequest(ann: ChannelAnnouncement)
|
||||
final case class ValidateResult(c: ChannelAnnouncement, tx: Option[Transaction], unspent: Boolean, t: Option[Throwable])
|
||||
|
||||
// @formatter:on
|
||||
|
|
|
@ -76,7 +76,7 @@ class BitcoinCoreWallet(rpcClient: BitcoinJsonRPCClient)(implicit system: ActorS
|
|||
} yield MakeFundingTxResponse(fundingTx, outputIndex)
|
||||
|
||||
override def commit(tx: Transaction): Future[Boolean] = publishTransaction(tx)
|
||||
.map(_ => true) // if bitcoind says OK, then we consider the tx succesfully published
|
||||
.map(_ => true) // if bitcoind says OK, then we consider the tx successfully published
|
||||
.recoverWith { case JsonRPCError(e) =>
|
||||
logger.warn(s"txid=${tx.txid} error=$e")
|
||||
getTransaction(tx.txid).map(_ => true).recover { case _ => false } // if we get a parseable error from bitcoind AND the tx is NOT in the mempool/blockchain, then we consider that the tx was not published
|
||||
|
|
|
@ -1,200 +0,0 @@
|
|||
package fr.acinq.eclair.blockchain.bitcoind
|
||||
|
||||
import java.util.concurrent.Executors
|
||||
|
||||
import akka.actor.{Actor, ActorLogging, Cancellable, Props, Terminated}
|
||||
import akka.pattern.pipe
|
||||
import fr.acinq.bitcoin._
|
||||
import fr.acinq.eclair.Globals
|
||||
import fr.acinq.eclair.blockchain._
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.ExtendedBitcoinClient
|
||||
import fr.acinq.eclair.channel.BITCOIN_PARENT_TX_CONFIRMED
|
||||
import fr.acinq.eclair.transactions.Scripts
|
||||
|
||||
import scala.collection.SortedMap
|
||||
import scala.concurrent.duration._
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
import scala.util.Try
|
||||
|
||||
/**
|
||||
* A blockchain watcher that:
|
||||
* - receives bitcoin events (new blocks and new txes) directly from the bitcoin network
|
||||
* - also uses bitcoin-core rpc api, most notably for tx confirmation count and blockcount (because reorgs)
|
||||
* Created by PM on 21/02/2016.
|
||||
*/
|
||||
class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext = ExecutionContext.global) extends Actor with ActorLogging {
|
||||
|
||||
import ZmqWatcher.TickNewBlock
|
||||
|
||||
context.system.eventStream.subscribe(self, classOf[BlockchainEvent])
|
||||
|
||||
// this is to initialize block count
|
||||
self ! TickNewBlock
|
||||
|
||||
case class TriggerEvent(w: Watch, e: WatchEvent)
|
||||
|
||||
def receive: Receive = watching(Set(), SortedMap(), None)
|
||||
|
||||
def watching(watches: Set[Watch], block2tx: SortedMap[Long, Seq[Transaction]], nextTick: Option[Cancellable]): Receive = {
|
||||
|
||||
case NewTransaction(tx) =>
|
||||
//log.debug(s"analyzing txid=${tx.txid} tx=$tx")
|
||||
watches.collect {
|
||||
case w@WatchSpentBasic(_, txid, outputIndex, _, event) if tx.txIn.exists(i => i.outPoint.txid == txid && i.outPoint.index == outputIndex) =>
|
||||
self ! TriggerEvent(w, WatchEventSpentBasic(event))
|
||||
case w@WatchSpent(_, txid, outputIndex, _, event) if tx.txIn.exists(i => i.outPoint.txid == txid && i.outPoint.index == outputIndex) =>
|
||||
self ! TriggerEvent(w, WatchEventSpent(event, tx))
|
||||
}
|
||||
|
||||
case NewBlock(block) =>
|
||||
// using a Try because in tests we generate fake blocks
|
||||
log.debug(s"received blockid=${Try(block.blockId).getOrElse(BinaryData(""))}")
|
||||
nextTick.map(_.cancel()) // this may fail or succeed, worse case scenario we will have two ticks in a row (no big deal)
|
||||
log.debug(s"scheduling a new task to check on tx confirmations")
|
||||
// we do this to avoid herd effects in testing when generating a lots of blocks in a row
|
||||
val task = context.system.scheduler.scheduleOnce(2 seconds, self, TickNewBlock)
|
||||
context become watching(watches, block2tx, Some(task))
|
||||
|
||||
case TickNewBlock =>
|
||||
client.getBlockCount.map {
|
||||
case count =>
|
||||
log.debug(s"setting blockCount=$count")
|
||||
Globals.blockCount.set(count)
|
||||
context.system.eventStream.publish(CurrentBlockCount(count))
|
||||
}
|
||||
// TODO: beware of the herd effect
|
||||
watches.collect { case w: WatchConfirmed => checkConfirmed(w) }
|
||||
context become (watching(watches, block2tx, None))
|
||||
|
||||
case TriggerEvent(w, e) if watches.contains(w) =>
|
||||
log.info(s"triggering $w")
|
||||
w.channel ! e
|
||||
// NB: WatchSpent are permanent because we need to detect multiple spending of the funding tx
|
||||
// They are never cleaned up but it is not a big deal for now (1 channel == 1 watch)
|
||||
if (!w.isInstanceOf[WatchSpent]) context.become(watching(watches - w, block2tx, None))
|
||||
|
||||
case CurrentBlockCount(count) => {
|
||||
val toPublish = block2tx.filterKeys(_ <= count)
|
||||
toPublish.values.flatten.map(tx => publish(tx))
|
||||
context.become(watching(watches, block2tx -- toPublish.keys, None))
|
||||
}
|
||||
|
||||
case w: Watch if !watches.contains(w) =>
|
||||
w match {
|
||||
case WatchSpentBasic(_, txid, outputIndex, _, _) =>
|
||||
// not: we assume parent tx was published, we just need to make sure this particular output has not been spent
|
||||
client.isTransactionOuputSpendable(txid.toString(), outputIndex, true).collect {
|
||||
case false =>
|
||||
log.warning(s"output=$outputIndex of txid=$txid has already been spent")
|
||||
self ! TriggerEvent(w, WatchEventSpentBasic(w.event))
|
||||
}
|
||||
|
||||
case WatchSpent(_, txid, outputIndex, _, _) =>
|
||||
// first let's see if the parent tx was published or not
|
||||
client.getTxConfirmations(txid.toString()).collect {
|
||||
case Some(_) =>
|
||||
// parent tx was published, we need to make sure this particular output has not been spent
|
||||
client.isTransactionOuputSpendable(txid.toString(), outputIndex, true).collect {
|
||||
case false =>
|
||||
log.warning(s"output=$outputIndex of txid=$txid has already been spent")
|
||||
log.warning(s"looking first in the mempool")
|
||||
client.getMempool().map { mempoolTxs =>
|
||||
mempoolTxs.filter(tx => tx.txIn.exists(i => i.outPoint.txid == txid && i.outPoint.index == outputIndex)) match {
|
||||
case Nil =>
|
||||
log.warning(s"couldn't find spending tx in the mempool, looking into blocks...")
|
||||
client.lookForSpendingTx(None, txid.toString(), outputIndex).map { tx =>
|
||||
log.warning(s"found the spending tx in the blockchain: txid=${tx.txid}")
|
||||
self ! NewTransaction(tx)
|
||||
}
|
||||
case txs =>
|
||||
log.warning(s"found ${txs.size} spending txs in the mempool: txids=${txs.map(_.txid).mkString(",")}")
|
||||
txs.foreach(tx => self ! NewTransaction(tx))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case w: WatchConfirmed => checkConfirmed(w) // maybe the tx is already tx, in that case the watch will be triggered and removed immediately
|
||||
|
||||
case _: WatchLost => () // TODO: not implemented
|
||||
|
||||
case w => log.warning(s"ignoring $w")
|
||||
}
|
||||
|
||||
log.debug(s"adding watch $w for $sender")
|
||||
context.watch(w.channel)
|
||||
context.become(watching(watches + w, block2tx, nextTick))
|
||||
|
||||
case PublishAsap(tx) =>
|
||||
val blockCount = Globals.blockCount.get()
|
||||
val cltvTimeout = Scripts.cltvTimeout(tx)
|
||||
val csvTimeout = Scripts.csvTimeout(tx)
|
||||
if (csvTimeout > 0) {
|
||||
require(tx.txIn.size == 1, s"watcher only supports tx with 1 input, this tx has ${tx.txIn.size} inputs")
|
||||
val parentTxid = tx.txIn(0).outPoint.txid
|
||||
log.info(s"txid=${tx.txid} has a relative timeout of $csvTimeout blocks, watching parenttxid=$parentTxid tx=$tx")
|
||||
val parentPublicKey = fr.acinq.bitcoin.Script.write(fr.acinq.bitcoin.Script.pay2wsh(tx.txIn.head.witness.stack.last))
|
||||
self ! WatchConfirmed(self, parentTxid, parentPublicKey, minDepth = 1, BITCOIN_PARENT_TX_CONFIRMED(tx))
|
||||
} else if (cltvTimeout > blockCount) {
|
||||
log.info(s"delaying publication of txid=${tx.txid} until block=$cltvTimeout (curblock=$blockCount)")
|
||||
val block2tx1 = block2tx.updated(cltvTimeout, block2tx.getOrElse(cltvTimeout, Seq.empty[Transaction]) :+ tx)
|
||||
context.become(watching(watches, block2tx1, None))
|
||||
} else publish(tx)
|
||||
|
||||
case WatchEventConfirmed(BITCOIN_PARENT_TX_CONFIRMED(tx), blockHeight, _) =>
|
||||
log.info(s"parent tx of txid=${tx.txid} has been confirmed")
|
||||
val blockCount = Globals.blockCount.get()
|
||||
val csvTimeout = Scripts.csvTimeout(tx)
|
||||
val absTimeout = blockHeight + csvTimeout
|
||||
if (absTimeout > blockCount) {
|
||||
log.info(s"delaying publication of txid=${tx.txid} until block=$absTimeout (curblock=$blockCount)")
|
||||
val block2tx1 = block2tx.updated(absTimeout, block2tx.getOrElse(absTimeout, Seq.empty[Transaction]) :+ tx)
|
||||
context.become(watching(watches, block2tx1, None))
|
||||
} else publish(tx)
|
||||
|
||||
case ParallelGetRequest(ann) => client.getParallel(ann).pipeTo(sender)
|
||||
|
||||
case Terminated(channel) =>
|
||||
// we remove watches associated to dead actor
|
||||
val deprecatedWatches = watches.filter(_.channel == channel)
|
||||
context.become(watching(watches -- deprecatedWatches, block2tx, None))
|
||||
|
||||
case 'watches => sender ! watches
|
||||
|
||||
}
|
||||
|
||||
// NOTE: we use a single thread to publish transactions so that it preserves order.
|
||||
// CHANGING THIS WILL RESULT IN CONCURRENCY ISSUES WHILE PUBLISHING PARENT AND CHILD TXS
|
||||
val singleThreadExecutionContext = ExecutionContext.fromExecutor(Executors.newSingleThreadExecutor())
|
||||
|
||||
def publish(tx: Transaction, isRetry: Boolean = false): Unit = {
|
||||
log.info(s"publishing tx (isRetry=$isRetry): txid=${tx.txid} tx=$tx")
|
||||
client.publishTransaction(tx)(singleThreadExecutionContext).recover {
|
||||
case t: Throwable if t.getMessage.contains("-25") && !isRetry => // we retry only once
|
||||
import akka.pattern.after
|
||||
|
||||
import scala.concurrent.duration._
|
||||
after(3 seconds, context.system.scheduler)(Future.successful({})).map(x => publish(tx, isRetry = true))
|
||||
case t: Throwable => log.error(s"cannot publish tx: reason=${t.getMessage} txid=${tx.txid} tx=$tx")
|
||||
}
|
||||
}
|
||||
|
||||
def checkConfirmed(w: WatchConfirmed) = {
|
||||
log.debug(s"checking confirmations of txid=${w.txId}")
|
||||
client.getTxConfirmations(w.txId.toString).map {
|
||||
case Some(confirmations) if confirmations >= w.minDepth =>
|
||||
client.getTransactionShortId(w.txId.toString).map {
|
||||
case (height, index) => self ! TriggerEvent(w, WatchEventConfirmed(w.event, height, index))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
object ZmqWatcher {
|
||||
|
||||
def props(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext = ExecutionContext.global) = Props(new ZmqWatcher(client)(ec))
|
||||
|
||||
case object TickNewBlock
|
||||
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
package fr.acinq.eclair.blockchain.bitcoind.rpc
|
||||
|
||||
import akka.actor.ActorSystem
|
||||
import com.ning.http.client._
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.JsonAST.{JInt, JNull, JString, JValue}
|
||||
import org.json4s.jackson.JsonMethods.parse
|
||||
import org.json4s.jackson.Serialization._
|
||||
|
||||
import scala.concurrent.ExecutionContext.Implicits.global
|
||||
import scala.concurrent.{ExecutionContext, Future, Promise}
|
||||
|
||||
class BasicBitcoinJsonRPCClient(config: AsyncHttpClientConfig, host: String, port: Int, ssl: Boolean)(implicit system: ActorSystem) extends BitcoinJsonRPCClient {
|
||||
|
||||
def this(user: String, password: String, host: String = "127.0.0.1", port: Int = 8332, ssl: Boolean = false)(implicit system: ActorSystem) = this(
|
||||
new AsyncHttpClientConfig.Builder()
|
||||
.setRealm(new Realm.RealmBuilder().setPrincipal(user).setPassword(password).setUsePreemptiveAuth(true).setScheme(Realm.AuthScheme.BASIC).build)
|
||||
.build,
|
||||
host,
|
||||
port,
|
||||
ssl
|
||||
)
|
||||
|
||||
val client: AsyncHttpClient = new AsyncHttpClient(config)
|
||||
|
||||
implicit val formats = DefaultFormats
|
||||
|
||||
override def invoke(method: String, params: Any*)(implicit ec: ExecutionContext): Future[JValue] =
|
||||
invoke(JsonRPCRequest(method = method, params = params))
|
||||
|
||||
def jsonResponse2Exception(jsonRPCResponse: JsonRPCResponse): JsonRPCResponse = jsonRPCResponse match {
|
||||
case JsonRPCResponse(_, Some(error), _) => throw JsonRPCError(error)
|
||||
case o => o
|
||||
}
|
||||
|
||||
def invoke(request: JsonRPCRequest)(implicit ec: ExecutionContext): Future[JValue] = {
|
||||
val promise = Promise[JValue]()
|
||||
client
|
||||
.preparePost((if (ssl) "https" else "http") + s"://$host:$port/")
|
||||
.addHeader("Content-Type", "application/json")
|
||||
.setBody(write(request))
|
||||
.execute(new AsyncCompletionHandler[Unit] {
|
||||
override def onCompleted(response: Response): Unit =
|
||||
try {
|
||||
val jvalue = parse(response.getResponseBody)
|
||||
val jerror = jvalue \ "error"
|
||||
val result = jvalue \ "result"
|
||||
if (jerror != JNull) {
|
||||
for {
|
||||
JInt(code) <- jerror \ "code"
|
||||
JString(message) <- jerror \ "message"
|
||||
} yield promise.failure(new JsonRPCError(Error(code.toInt, message)))
|
||||
} else {
|
||||
promise.success(result)
|
||||
}
|
||||
} catch {
|
||||
case t: Throwable => promise.failure(t)
|
||||
}
|
||||
|
||||
override def onThrowable(t: Throwable): Unit = promise.failure(t)
|
||||
})
|
||||
promise.future
|
||||
}
|
||||
|
||||
def invoke(request: Seq[(String, Seq[Any])])(implicit ec: ExecutionContext): Future[Seq[JValue]] = ???
|
||||
|
||||
}
|
|
@ -2,66 +2,19 @@ package fr.acinq.eclair.blockchain.bitcoind.rpc
|
|||
|
||||
import java.io.IOException
|
||||
|
||||
import akka.actor.ActorSystem
|
||||
import com.ning.http.client._
|
||||
import org.json4s.{DefaultFormats, DefaultReaders}
|
||||
import org.json4s.JsonAST.{JInt, JNull, JString, JValue}
|
||||
import org.json4s.jackson.JsonMethods.parse
|
||||
import org.json4s.jackson.Serialization._
|
||||
import org.json4s.JsonAST.JValue
|
||||
|
||||
import scala.concurrent.{ExecutionContext, Future, Promise}
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
|
||||
trait BitcoinJsonRPCClient {
|
||||
|
||||
def invoke(method: String, params: Any*)(implicit ec: ExecutionContext): Future[JValue]
|
||||
|
||||
}
|
||||
|
||||
// @formatter:off
|
||||
case class JsonRPCRequest(jsonrpc: String = "1.0", id: String = "scala-client", method: String, params: Seq[Any])
|
||||
case class Error(code: Int, message: String)
|
||||
case class JsonRPCResponse(result: JValue, error: Option[Error], id: String)
|
||||
case class JsonRPCError(error: Error) extends IOException(s"${error.message} (code: ${error.code})")
|
||||
// @formatter:on
|
||||
|
||||
class BitcoinJsonRPCClient(config: AsyncHttpClientConfig, host: String, port: Int, ssl: Boolean)(implicit system: ActorSystem) {
|
||||
|
||||
def this(user: String, password: String, host: String = "127.0.0.1", port: Int = 8332, ssl: Boolean = false)(implicit system: ActorSystem) = this(
|
||||
new AsyncHttpClientConfig.Builder()
|
||||
.setRealm(new Realm.RealmBuilder().setPrincipal(user).setPassword(password).setUsePreemptiveAuth(true).setScheme(Realm.AuthScheme.BASIC).build)
|
||||
.build,
|
||||
host,
|
||||
port,
|
||||
ssl
|
||||
)
|
||||
|
||||
val client: AsyncHttpClient = new AsyncHttpClient(config)
|
||||
|
||||
implicit val formats = DefaultFormats
|
||||
|
||||
def invoke(method: String, params: Any*)(implicit ec: ExecutionContext): Future[JValue] = {
|
||||
val promise = Promise[JValue]()
|
||||
client
|
||||
.preparePost((if (ssl) "https" else "http") + s"://$host:$port/")
|
||||
.addHeader("Content-Type", "application/json")
|
||||
.setBody(write(JsonRPCRequest(method = method, params = params)))
|
||||
.execute(new AsyncCompletionHandler[Unit] {
|
||||
override def onCompleted(response: Response): Unit =
|
||||
try {
|
||||
val jvalue = parse(response.getResponseBody)
|
||||
val jerror = jvalue \ "error"
|
||||
val result = jvalue \ "result"
|
||||
if (jerror != JNull) {
|
||||
for {
|
||||
JInt(code) <- jerror \ "code"
|
||||
JString(message) <- jerror \ "message"
|
||||
} yield promise.failure(new JsonRPCError(Error(code.toInt, message)))
|
||||
} else {
|
||||
promise.success(result)
|
||||
}
|
||||
} catch {
|
||||
case t: Throwable => promise.failure(t)
|
||||
}
|
||||
|
||||
override def onThrowable(t: Throwable): Unit = promise.failure(t)
|
||||
})
|
||||
promise.future
|
||||
}
|
||||
|
||||
def invoke(request: Seq[(String, Seq[Any])])(implicit ec: ExecutionContext): Future[Seq[JValue]] = ???
|
||||
|
||||
}
|
||||
// @formatter:on
|
|
@ -1,7 +1,7 @@
|
|||
package fr.acinq.eclair.blockchain.bitcoind.rpc
|
||||
|
||||
import fr.acinq.bitcoin._
|
||||
import fr.acinq.eclair.blockchain.{IndividualResult, ParallelGetResponse}
|
||||
import fr.acinq.eclair.blockchain.ValidateResult
|
||||
import fr.acinq.eclair.fromShortId
|
||||
import fr.acinq.eclair.wire.ChannelAnnouncement
|
||||
import org.json4s.JsonAST._
|
||||
|
@ -94,12 +94,11 @@ class ExtendedBitcoinClient(val rpcClient: BitcoinJsonRPCClient) {
|
|||
tx <- getTransaction(txid)
|
||||
} yield tx
|
||||
|
||||
def isTransactionOuputSpendable(txId: String, ouputIndex: Int, includeMempool: Boolean)(implicit ec: ExecutionContext): Future[Boolean] =
|
||||
def isTransactionOutputSpendable(txId: String, outputIndex: Int, includeMempool: Boolean)(implicit ec: ExecutionContext): Future[Boolean] =
|
||||
for {
|
||||
json <- rpcClient.invoke("gettxout", txId, ouputIndex, includeMempool)
|
||||
json <- rpcClient.invoke("gettxout", txId, outputIndex, includeMempool)
|
||||
} yield json != JNull
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @param txId transaction id
|
||||
|
@ -140,35 +139,25 @@ class ExtendedBitcoinClient(val rpcClient: BitcoinJsonRPCClient) {
|
|||
case JInt(count) => count.toLong
|
||||
}
|
||||
|
||||
def getParallel(awaiting: Seq[ChannelAnnouncement]): Future[ParallelGetResponse] = {
|
||||
def validate(c: ChannelAnnouncement)(implicit ec: ExecutionContext): Future[ValidateResult] = {
|
||||
case class TxCoordinate(blockHeight: Int, txIndex: Int, outputIndex: Int)
|
||||
|
||||
val coordinates = awaiting.map {
|
||||
case c =>
|
||||
val (blockHeight, txIndex, outputIndex) = fromShortId(c.shortChannelId)
|
||||
TxCoordinate(blockHeight, txIndex, outputIndex)
|
||||
}.zipWithIndex
|
||||
|
||||
import ExecutionContext.Implicits.global
|
||||
implicit val formats = org.json4s.DefaultFormats
|
||||
val (blockHeight, txIndex, outputIndex) = fromShortId(c.shortChannelId)
|
||||
val coordinates = TxCoordinate(blockHeight, txIndex, outputIndex)
|
||||
|
||||
for {
|
||||
blockHashes: Seq[String] <- rpcClient.invoke(coordinates.map(coord => ("getblockhash", coord._1.blockHeight :: Nil))).map(_.map(_.extractOrElse[String]("00" * 32)))
|
||||
txids: Seq[String] <- rpcClient.invoke(blockHashes.map(h => ("getblock", h :: Nil)))
|
||||
.map(_.zipWithIndex)
|
||||
.map(_.map {
|
||||
case (json, idx) => Try {
|
||||
val JArray(txs) = json \ "tx"
|
||||
txs(coordinates(idx)._1.txIndex).extract[String]
|
||||
} getOrElse ("00" * 32)
|
||||
})
|
||||
txs <- rpcClient.invoke(txids.map(txid => ("getrawtransaction", txid :: Nil))).map(_.map {
|
||||
case JString(raw) => Some(Transaction.read(raw))
|
||||
case _ => None
|
||||
})
|
||||
unspent <- rpcClient.invoke(txids.zipWithIndex.map(txid => ("gettxout", txid._1 :: coordinates(txid._2)._1.outputIndex :: true :: Nil))).map(_.map(_ != JNull))
|
||||
} yield ParallelGetResponse(awaiting.zip(txs.zip(unspent)).map(x => IndividualResult(x._1, x._2._1, x._2._2)))
|
||||
}
|
||||
blockHash: String <- rpcClient.invoke("getblockhash", coordinates.blockHeight).map(_.extractOrElse[String]("00" * 32))
|
||||
txid: String <- rpcClient.invoke("getblock", blockHash).map {
|
||||
case json => Try {
|
||||
val JArray(txs) = json \ "tx"
|
||||
txs(coordinates.txIndex).extract[String]
|
||||
} getOrElse ("00" * 32)
|
||||
}
|
||||
tx <- getRawTransaction(txid)
|
||||
unspent <- isTransactionOutputSpendable(txid, coordinates.outputIndex, includeMempool = true)
|
||||
} yield ValidateResult(c, Some(Transaction.read(tx)), unspent, None)
|
||||
|
||||
} recover { case t: Throwable => ValidateResult(c, None, false, Some(t)) }
|
||||
|
||||
/**
|
||||
*
|
||||
|
|
|
@ -1,89 +0,0 @@
|
|||
package fr.acinq.eclair.blockchain.bitcoind.zmq
|
||||
|
||||
import akka.actor.{Actor, ActorLogging}
|
||||
import fr.acinq.bitcoin.{Block, Transaction}
|
||||
import fr.acinq.eclair.blockchain.{NewBlock, NewTransaction}
|
||||
import org.zeromq.ZMQ.Event
|
||||
import org.zeromq.{ZContext, ZMQ, ZMsg}
|
||||
|
||||
import scala.concurrent.Promise
|
||||
import scala.concurrent.duration._
|
||||
import scala.util.Try
|
||||
|
||||
/**
|
||||
* Created by PM on 04/04/2017.
|
||||
*/
|
||||
class ZMQActor(address: String, connected: Option[Promise[Boolean]] = None) extends Actor with ActorLogging {
|
||||
|
||||
import ZMQActor._
|
||||
|
||||
val ctx = new ZContext
|
||||
|
||||
val subscriber = ctx.createSocket(ZMQ.SUB)
|
||||
subscriber.monitor("inproc://events", ZMQ.EVENT_CONNECTED | ZMQ.EVENT_DISCONNECTED)
|
||||
subscriber.connect(address)
|
||||
subscriber.subscribe("rawblock".getBytes(ZMQ.CHARSET))
|
||||
subscriber.subscribe("rawtx".getBytes(ZMQ.CHARSET))
|
||||
|
||||
val monitor = ctx.createSocket(ZMQ.PAIR)
|
||||
monitor.connect("inproc://events")
|
||||
|
||||
import scala.concurrent.ExecutionContext.Implicits.global
|
||||
|
||||
// we check messages in a non-blocking manner with an interval, making sure to retrieve all messages before waiting again
|
||||
def checkEvent: Unit = Option(Event.recv(monitor, ZMQ.DONTWAIT)) match {
|
||||
case Some(event) =>
|
||||
self ! event
|
||||
checkEvent
|
||||
case None =>
|
||||
context.system.scheduler.scheduleOnce(1 second)(checkEvent)
|
||||
}
|
||||
|
||||
def checkMsg: Unit = Option(ZMsg.recvMsg(subscriber, ZMQ.DONTWAIT)) match {
|
||||
case Some(msg) =>
|
||||
self ! msg
|
||||
checkMsg
|
||||
case None =>
|
||||
context.system.scheduler.scheduleOnce(1 second)(checkMsg)
|
||||
}
|
||||
|
||||
checkEvent
|
||||
checkMsg
|
||||
|
||||
override def receive: Receive = {
|
||||
|
||||
case event: Event => event.getEvent match {
|
||||
case ZMQ.EVENT_CONNECTED =>
|
||||
log.info(s"connected to ${event.getAddress}")
|
||||
Try(connected.map(_.success(true)))
|
||||
context.system.eventStream.publish(ZMQConnected)
|
||||
case ZMQ.EVENT_DISCONNECTED =>
|
||||
log.warning(s"disconnected from ${event.getAddress}")
|
||||
context.system.eventStream.publish(ZMQDisconnected)
|
||||
case x => log.error(s"unexpected event $x")
|
||||
}
|
||||
|
||||
case msg: ZMsg => msg.popString() match {
|
||||
case "rawblock" =>
|
||||
val block = Block.read(msg.pop().getData)
|
||||
log.debug(s"received blockid=${block.blockId}")
|
||||
context.system.eventStream.publish(NewBlock(block))
|
||||
case "rawtx" =>
|
||||
val tx = Transaction.read(msg.pop().getData)
|
||||
log.debug(s"received txid=${tx.txid}")
|
||||
context.system.eventStream.publish(NewTransaction(tx))
|
||||
case topic => log.warning(s"unexpected topic=$topic")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
object ZMQActor {
|
||||
|
||||
// @formatter:off
|
||||
sealed trait ZMQEvent
|
||||
case object ZMQConnected extends ZMQEvent
|
||||
case object ZMQDisconnected extends ZMQEvent
|
||||
// @formatter:on
|
||||
|
||||
}
|
|
@ -105,18 +105,16 @@ class BitcoinjWatcher(val kit: WalletAppKit)(implicit ec: ExecutionContext = Exe
|
|||
context.become(watching(watches, block2tx1, oldEvents, sent))
|
||||
} else publish(tx)
|
||||
|
||||
case ParallelGetRequest(announcements) => sender ! ParallelGetResponse(announcements.map {
|
||||
case c =>
|
||||
log.info(s"blindly validating channel=$c")
|
||||
val pubkeyScript = write(pay2wsh(Scripts.multiSig2of2(PublicKey(c.bitcoinKey1), PublicKey(c.bitcoinKey2))))
|
||||
val (_, _, outputIndex) = fromShortId(c.shortChannelId)
|
||||
val fakeFundingTx = Transaction(
|
||||
version = 2,
|
||||
txIn = Seq.empty[TxIn],
|
||||
txOut = List.fill(outputIndex + 1)(TxOut(Satoshi(0), pubkeyScript)), // quick and dirty way to be sure that the outputIndex'th output is of the expected format
|
||||
lockTime = 0)
|
||||
IndividualResult(c, Some(fakeFundingTx), true)
|
||||
})
|
||||
case ValidateRequest(c) =>
|
||||
log.info(s"blindly validating channel=$c")
|
||||
val pubkeyScript = write(pay2wsh(Scripts.multiSig2of2(PublicKey(c.bitcoinKey1), PublicKey(c.bitcoinKey2))))
|
||||
val (_, _, outputIndex) = fromShortId(c.shortChannelId)
|
||||
val fakeFundingTx = Transaction(
|
||||
version = 2,
|
||||
txIn = Seq.empty[TxIn],
|
||||
txOut = List.fill(outputIndex + 1)(TxOut(Satoshi(0), pubkeyScript)), // quick and dirty way to be sure that the outputIndex'th output is of the expected format
|
||||
lockTime = 0)
|
||||
sender ! ValidateResult(c, Some(fakeFundingTx), true, None)
|
||||
|
||||
case Terminated(channel) =>
|
||||
// we remove watches associated to dead actor
|
||||
|
|
|
@ -63,7 +63,5 @@ class ElectrumEclairWallet(val wallet: ActorRef)(implicit system: ActorSystem, e
|
|||
}
|
||||
}
|
||||
|
||||
def getMnemonics: Future[Seq[String]] = (wallet ? GetMnemonicCode).mapTo[GetMnemonicCodeResponse].map(_.mnemonics)
|
||||
|
||||
override def rollback(tx: Transaction): Future[Boolean] = (wallet ? CancelTransaction(tx)).map(_ => true)
|
||||
}
|
||||
|
|
|
@ -1,15 +1,11 @@
|
|||
package fr.acinq.eclair.blockchain.electrum
|
||||
|
||||
import java.io.File
|
||||
|
||||
import akka.actor.{ActorRef, LoggingFSM, Props}
|
||||
import com.google.common.io.Files
|
||||
import akka.actor.{ActorRef, FSM, Props}
|
||||
import fr.acinq.bitcoin.Crypto.{PrivateKey, PublicKey}
|
||||
import fr.acinq.bitcoin.DeterministicWallet.{ExtendedPrivateKey, derivePrivateKey, hardened}
|
||||
import fr.acinq.bitcoin.{Base58, Base58Check, BinaryData, Block, Crypto, DeterministicWallet, MnemonicCode, OP_PUSHDATA, OutPoint, SIGHASH_ALL, Satoshi, Script, ScriptFlags, ScriptWitness, SigVersion, Transaction, TxIn, TxOut}
|
||||
import fr.acinq.bitcoin.{Base58, Base58Check, BinaryData, Block, Crypto, DeterministicWallet, OP_PUSHDATA, OutPoint, SIGHASH_ALL, Satoshi, Script, ScriptWitness, SigVersion, Transaction, TxIn, TxOut}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.Error
|
||||
import fr.acinq.eclair.blockchain.electrum.ElectrumClient.{GetTransaction, GetTransactionResponse, TransactionHistoryItem, computeScriptHash}
|
||||
import fr.acinq.eclair.randomBytes
|
||||
import fr.acinq.eclair.transactions.Transactions
|
||||
import grizzled.slf4j.Logging
|
||||
|
||||
|
@ -28,16 +24,15 @@ import scala.util.{Failure, Success, Try}
|
|||
* client <--- ask tx ----- wallet
|
||||
* client ---- tx ----> wallet
|
||||
*
|
||||
* @param mnemonics
|
||||
* @param seed
|
||||
* @param client
|
||||
* @param params
|
||||
*/
|
||||
class ElectrumWallet(mnemonics: Seq[String], client: ActorRef, params: ElectrumWallet.WalletParameters) extends LoggingFSM[ElectrumWallet.State, ElectrumWallet.Data] {
|
||||
class ElectrumWallet(seed: BinaryData, client: ActorRef, params: ElectrumWallet.WalletParameters) extends FSM[ElectrumWallet.State, ElectrumWallet.Data] {
|
||||
|
||||
import ElectrumWallet._
|
||||
import params._
|
||||
|
||||
val seed = MnemonicCode.toSeed(mnemonics, "")
|
||||
val master = DeterministicWallet.generate(seed)
|
||||
|
||||
val accountMaster = accountKey(master)
|
||||
|
@ -241,7 +236,6 @@ class ElectrumWallet(mnemonics: Seq[String], client: ActorRef, params: ElectrumW
|
|||
}
|
||||
|
||||
whenUnhandled {
|
||||
case Event(GetMnemonicCode, _) => stay replying GetMnemonicCodeResponse(mnemonics)
|
||||
|
||||
case Event(GetCurrentReceiveAddress, data) => stay replying GetCurrentReceiveAddressResponse(data.currentReceiveAddress)
|
||||
|
||||
|
@ -263,20 +257,7 @@ object ElectrumWallet {
|
|||
// use 32 bytes seed, which will generate a 24 words mnemonic code
|
||||
val SEED_BYTES_LENGTH = 32
|
||||
|
||||
def props(mnemonics: Seq[String], client: ActorRef, params: WalletParameters): Props = Props(new ElectrumWallet(mnemonics, client, params))
|
||||
|
||||
def props(file: File, client: ActorRef, params: WalletParameters): Props = {
|
||||
val entropy: BinaryData = (file.exists(), file.canRead(), file.isFile) match {
|
||||
case (true, true, true) => Files.toByteArray(file)
|
||||
case (false, _, _) =>
|
||||
val buffer = randomBytes(SEED_BYTES_LENGTH)
|
||||
Files.write(buffer, file)
|
||||
buffer
|
||||
case _ => throw new IllegalArgumentException(s"cannot create wallet:$file exist but cannot read from")
|
||||
}
|
||||
val mnemonics = MnemonicCode.toMnemonics(entropy)
|
||||
Props(new ElectrumWallet(mnemonics, client, params))
|
||||
}
|
||||
def props(seed: BinaryData, client: ActorRef, params: WalletParameters): Props = Props(new ElectrumWallet(seed, client, params))
|
||||
|
||||
case class WalletParameters(chainHash: BinaryData, minimumFee: Satoshi = Satoshi(2000), dustLimit: Satoshi = Satoshi(546), swipeRange: Int = 10, allowSpendUnconfirmed: Boolean = true)
|
||||
|
||||
|
@ -289,9 +270,6 @@ object ElectrumWallet {
|
|||
sealed trait Request
|
||||
sealed trait Response
|
||||
|
||||
case object GetMnemonicCode extends RuntimeException
|
||||
case class GetMnemonicCodeResponse(mnemonics: Seq[String]) extends Response
|
||||
|
||||
case object GetBalance extends Request
|
||||
case class GetBalanceResponse(confirmed: Satoshi, unconfirmed: Satoshi) extends Response
|
||||
|
||||
|
@ -418,12 +396,12 @@ object ElectrumWallet {
|
|||
}
|
||||
|
||||
/**
|
||||
* Wallet state, which stores data returned by EletrumX servers.
|
||||
* Wallet state, which stores data returned by ElectrumX servers.
|
||||
* Most items are indexed by script hash (i.e. by pubkey script sha256 hash).
|
||||
* Height follow ElectrumX's conventions:
|
||||
* - h > 0 means that the tx was confirmed at block #h
|
||||
* - 0 means unconfirmed, but all input are confirmed
|
||||
* < 0 means unconfirmed, and sonme inputs are unconfirmed as well
|
||||
* < 0 means unconfirmed, and some inputs are unconfirmed as well
|
||||
*
|
||||
* @param tip current blockchain tip
|
||||
* @param accountKeys account keys
|
||||
|
|
|
@ -19,8 +19,7 @@ class ElectrumWatcher(client: ActorRef) extends Actor with Stash with ActorLoggi
|
|||
client ! ElectrumClient.AddStatusListener(self)
|
||||
|
||||
override def unhandled(message: Any): Unit = message match {
|
||||
case ParallelGetRequest(announcements) => sender ! ParallelGetResponse(announcements.map {
|
||||
case c =>
|
||||
case ValidateRequest(c) =>
|
||||
log.info(s"blindly validating channel=$c")
|
||||
val pubkeyScript = Script.write(Script.pay2wsh(Scripts.multiSig2of2(PublicKey(c.bitcoinKey1), PublicKey(c.bitcoinKey2))))
|
||||
val (_, _, outputIndex) = fromShortId(c.shortChannelId)
|
||||
|
@ -29,8 +28,8 @@ class ElectrumWatcher(client: ActorRef) extends Actor with Stash with ActorLoggi
|
|||
txIn = Seq.empty[TxIn],
|
||||
txOut = List.fill(outputIndex + 1)(TxOut(Satoshi(0), pubkeyScript)), // quick and dirty way to be sure that the outputIndex'th output is of the expected format
|
||||
lockTime = 0)
|
||||
IndividualResult(c, Some(fakeFundingTx), true)
|
||||
})
|
||||
sender ! ValidateResult(c, Some(fakeFundingTx), true, None)
|
||||
|
||||
case _ => log.warning(s"unhandled message $message")
|
||||
}
|
||||
|
||||
|
@ -184,7 +183,7 @@ class ElectrumWatcher(client: ActorRef) extends Actor with Stash with ActorLoggi
|
|||
|
||||
case ElectrumClient.ElectrumDisconnected =>
|
||||
// we remember watches and keep track of tx that have not yet been published
|
||||
// we also re-send the txes that we previsouly sent but hadn't yet received the confirmation
|
||||
// we also re-send the txes that we previously sent but hadn't yet received the confirmation
|
||||
context become disconnected(watches, sent.map(PublishAsap(_)), block2tx)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package fr.acinq.eclair.channel
|
|||
|
||||
import java.nio.charset.StandardCharsets
|
||||
|
||||
import akka.actor.{ActorRef, FSM, LoggingFSM, OneForOneStrategy, Props, Status, SupervisorStrategy}
|
||||
import akka.actor.{ActorRef, FSM, OneForOneStrategy, Props, Status, SupervisorStrategy}
|
||||
import akka.event.Logging.MDC
|
||||
import akka.pattern.pipe
|
||||
import fr.acinq.bitcoin.Crypto.{PublicKey, ripemd160, sha256}
|
||||
|
@ -35,7 +35,7 @@ object Channel {
|
|||
val ANNOUNCEMENTS_MINCONF = 6
|
||||
|
||||
// https://github.com/lightningnetwork/lightning-rfc/blob/master/02-peer-protocol.md#requirements
|
||||
val MAX_FUNDING_SATOSHIS = 16777216 // = 2^24
|
||||
val MAX_FUNDING_SATOSHIS = 16777216L // = 2^24
|
||||
val MIN_FUNDING_SATOSHIS = 1000
|
||||
val MAX_ACCEPTED_HTLCS = 483
|
||||
|
||||
|
@ -46,7 +46,7 @@ object Channel {
|
|||
|
||||
}
|
||||
|
||||
class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: PublicKey, blockchain: ActorRef, router: ActorRef, relayer: ActorRef)(implicit ec: ExecutionContext = ExecutionContext.Implicits.global) extends LoggingFSM[State, Data] with FSMDiagnosticActorLogging[State, Data] {
|
||||
class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: PublicKey, blockchain: ActorRef, router: ActorRef, relayer: ActorRef)(implicit ec: ExecutionContext = ExecutionContext.Implicits.global) extends FSM[State, Data] with FSMDiagnosticActorLogging[State, Data] {
|
||||
|
||||
import Channel._
|
||||
|
||||
|
@ -484,15 +484,15 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
// note: spec would allow us to keep sending new htlcs after having received their shutdown (and not sent ours)
|
||||
// but we want to converge as fast as possible and they would probably not route them anyway
|
||||
val error = NoMoreHtlcsClosingInProgress(d.channelId)
|
||||
handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), Some(d.channelUpdate)), c)
|
||||
handleCommandError(AddHtlcFailed(d.channelId, c.paymentHash, error, origin(c), Some(d.channelUpdate)), c)
|
||||
|
||||
case Event(c: CMD_ADD_HTLC, d: DATA_NORMAL) =>
|
||||
Try(Commitments.sendAdd(d.commitments, c, origin(c))) match {
|
||||
case Success(Right((commitments1, add))) =>
|
||||
if (c.commit) self ! CMD_SIGN
|
||||
handleCommandSuccess(sender, d.copy(commitments = commitments1)) sending add
|
||||
case Success(Left(error)) => handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), Some(d.channelUpdate)), c)
|
||||
case Failure(cause) => handleCommandError(AddHtlcFailed(d.channelId, cause, origin(c), Some(d.channelUpdate)), c)
|
||||
case Success(Left(error)) => handleCommandError(AddHtlcFailed(d.channelId, c.paymentHash, error, origin(c), Some(d.channelUpdate)), c)
|
||||
case Failure(cause) => handleCommandError(AddHtlcFailed(d.channelId, c.paymentHash, cause, origin(c), Some(d.channelUpdate)), c)
|
||||
}
|
||||
|
||||
case Event(add: UpdateAddHtlc, d: DATA_NORMAL) =>
|
||||
|
@ -575,7 +575,11 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
Try(Commitments.sendCommit(d.commitments)) match {
|
||||
case Success((commitments1, commit)) =>
|
||||
log.debug(s"sending a new sig, spec:\n${Commitments.specs2String(commitments1)}")
|
||||
commitments1.localChanges.signed.collect { case u: UpdateFulfillHtlc => relayer ! AckFulfillCmd(u.channelId, u.id) }
|
||||
commitments1.localChanges.signed.collect {
|
||||
case u: UpdateFulfillHtlc => relayer ! CommandBuffer.CommandAck(u.channelId, u.id)
|
||||
case u: UpdateFailHtlc => relayer ! CommandBuffer.CommandAck(u.channelId, u.id)
|
||||
case u: UpdateFailMalformedHtlc => relayer ! CommandBuffer.CommandAck(u.channelId, u.id)
|
||||
}
|
||||
handleCommandSuccess(sender, store(d.copy(commitments = commitments1))) sending commit
|
||||
case Failure(cause) => handleCommandError(cause, c)
|
||||
}
|
||||
|
@ -691,7 +695,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
val closingSigned = Closing.makeFirstClosingTx(d.commitments, localShutdown.scriptPubKey, remoteShutdown.scriptPubKey)
|
||||
goto(NEGOTIATING) using store(DATA_NEGOTIATING(d.commitments, localShutdown, remoteShutdown, closingSigned :: Nil)) sending sendList :+ closingSigned
|
||||
} else {
|
||||
// there are some pending signed htlcs, we need to fail/fullfill them
|
||||
// there are some pending signed htlcs, we need to fail/fulfill them
|
||||
goto(SHUTDOWN) using store(DATA_SHUTDOWN(d.commitments, localShutdown, remoteShutdown)) sending sendList
|
||||
}
|
||||
}
|
||||
|
@ -781,7 +785,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
log.debug(s"sending channel_update announcement (disable)")
|
||||
val channelUpdate = Announcements.makeChannelUpdate(nodeParams.chainHash, nodeParams.privateKey, remoteNodeId, d.shortChannelId, d.channelUpdate.cltvExpiryDelta, d.channelUpdate.htlcMinimumMsat, d.channelUpdate.feeBaseMsat, d.channelUpdate.feeProportionalMillionths, enable = false)
|
||||
d.commitments.localChanges.proposed.collect {
|
||||
case add: UpdateAddHtlc => relayer ! Status.Failure(AddHtlcFailed(d.channelId, ChannelUnavailable(d.channelId), d.commitments.originChannels(add.id), Some(channelUpdate)))
|
||||
case add: UpdateAddHtlc => relayer ! Status.Failure(AddHtlcFailed(d.channelId, add.paymentHash, ChannelUnavailable(d.channelId), d.commitments.originChannels(add.id), Some(channelUpdate)))
|
||||
}
|
||||
goto(OFFLINE) using d.copy(channelUpdate = channelUpdate)
|
||||
|
||||
|
@ -877,7 +881,11 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
Try(Commitments.sendCommit(d.commitments)) match {
|
||||
case Success((commitments1, commit)) =>
|
||||
log.debug(s"sending a new sig, spec:\n${Commitments.specs2String(commitments1)}")
|
||||
commitments1.localChanges.signed.collect { case u: UpdateFulfillHtlc => relayer ! AckFulfillCmd(u.channelId, u.id) }
|
||||
commitments1.localChanges.signed.collect {
|
||||
case u: UpdateFulfillHtlc => relayer ! CommandBuffer.CommandAck(u.channelId, u.id)
|
||||
case u: UpdateFailHtlc => relayer ! CommandBuffer.CommandAck(u.channelId, u.id)
|
||||
case u: UpdateFailMalformedHtlc => relayer ! CommandBuffer.CommandAck(u.channelId, u.id)
|
||||
}
|
||||
handleCommandSuccess(sender, store(d.copy(commitments = commitments1))) sending commit
|
||||
case Failure(cause) => handleCommandError(cause, c)
|
||||
}
|
||||
|
@ -1046,51 +1054,15 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
// one of the outputs of the local/remote/revoked commit was spent
|
||||
// we just put a watch to be notified when it is confirmed
|
||||
blockchain ! WatchConfirmed(self, tx, nodeParams.minDepthBlocks, BITCOIN_TX_CONFIRMED(tx))
|
||||
|
||||
// when a remote or local commitment tx containing outgoing htlcs is published on the network,
|
||||
// we watch it in order to extract payment preimage if funds are pulled by the counterparty
|
||||
// we can then use these preimages to fulfill origin htlcs
|
||||
log.warning(s"processing BITCOIN_OUTPUT_SPENT with txid=${tx.txid} tx=$tx")
|
||||
require(tx.txIn.size == 1, s"htlc tx should only have 1 input")
|
||||
val witness = tx.txIn(0).witness
|
||||
val extracted_opt = witness match {
|
||||
case ScriptWitness(Seq(localSig, paymentPreimage, htlcOfferedScript)) if paymentPreimage.size == 32 =>
|
||||
log.info(s"extracted preimage=$paymentPreimage from tx=$tx (claim-htlc-success)")
|
||||
Some(paymentPreimage)
|
||||
case ScriptWitness(Seq(BinaryData.empty, remoteSig, localSig, paymentPreimage, htlcReceivedScript)) if paymentPreimage.size == 32 =>
|
||||
log.info(s"extracted preimage=$paymentPreimage from tx=$tx (htlc-success)")
|
||||
Some(paymentPreimage)
|
||||
case ScriptWitness(Seq(BinaryData.empty, remoteSig, localSig, BinaryData.empty, htlcOfferedScript)) =>
|
||||
val paymentHash160 = BinaryData(htlcOfferedScript.slice(109, 109 + 20))
|
||||
log.info(s"extracted paymentHash160=$paymentHash160 from tx=$tx (htlc-timeout)")
|
||||
Some(paymentHash160)
|
||||
case ScriptWitness(Seq(remoteSig, BinaryData.empty, htlcReceivedScript)) =>
|
||||
val paymentHash160 = BinaryData(htlcReceivedScript.slice(69, 69 + 20))
|
||||
log.info(s"extracted paymentHash160=$paymentHash160 from tx=$tx (claim-htlc-timeout)")
|
||||
Some(paymentHash160)
|
||||
case _ =>
|
||||
// this is not an htlc witness (we don't watch only htlc outputs)
|
||||
None
|
||||
}
|
||||
extracted_opt map { extracted =>
|
||||
// we only consider htlcs in our local commitment, because we only care about outgoing htlcs, which disappear first in the remote commitment
|
||||
// if an outgoing htlc is in the remote commitment, then:
|
||||
// - either it is in the local commitment (it was never fulfilled)
|
||||
// - or we have already received the fulfill and forwarded it upstream
|
||||
val outgoingHtlcs = d.commitments.localCommit.spec.htlcs.filter(_.direction == OUT).map(_.add)
|
||||
outgoingHtlcs.collect {
|
||||
case add if add.paymentHash == sha256(extracted) =>
|
||||
val origin = d.commitments.originChannels(add.id)
|
||||
log.warning(s"found a match between preimage=$extracted and origin=$origin: htlc was fulfilled")
|
||||
// let's just pretend we received the preimage from the counterparty
|
||||
relayer ! ForwardFulfill(UpdateFulfillHtlc(add.channelId, add.id, extracted), origin)
|
||||
case add if ripemd160(add.paymentHash) == extracted =>
|
||||
val origin = d.commitments.originChannels(add.id)
|
||||
log.warning(s"found a match between paymentHash160=$extracted and origin=$origin: htlc timed out")
|
||||
relayer ! Status.Failure(AddHtlcFailed(d.channelId, HtlcTimedout(d.channelId), origin, None))
|
||||
}
|
||||
// TODO: should we handle local htlcs here as well? currently timed out htlcs that we sent will never have an answer
|
||||
// TODO: we do not handle the case where htlcs transactions end up being unconfirmed this can happen if an htlc-success tx is published right before a htlc timed out
|
||||
val fulfills = Closing.extractPreimages(d.commitments.localCommit, tx)
|
||||
fulfills map { fulfill =>
|
||||
val origin = d.commitments.originChannels(fulfill.id)
|
||||
log.warning(s"fulfilling htlc #${fulfill.id} paymentHash=${sha256(fulfill.paymentPreimage)} origin=$origin")
|
||||
relayer ! ForwardFulfill(fulfill, origin)
|
||||
}
|
||||
stay
|
||||
|
||||
|
@ -1101,6 +1073,16 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
val remoteCommitPublished1 = d.remoteCommitPublished.map(Closing.updateRemoteCommitPublished(_, tx))
|
||||
val nextRemoteCommitPublished1 = d.nextRemoteCommitPublished.map(Closing.updateRemoteCommitPublished(_, tx))
|
||||
val revokedCommitPublished1 = d.revokedCommitPublished.map(Closing.updateRevokedCommitPublished(_, tx))
|
||||
// we may need to fail some htlcs in case a commitment tx was published and they have reached the timeout threshold
|
||||
val timedoutHtlcs =
|
||||
Closing.timedoutHtlcs(d.commitments.localCommit, Satoshi(d.commitments.localParams.dustLimitSatoshis), tx) ++
|
||||
Closing.timedoutHtlcs(d.commitments.remoteCommit, Satoshi(d.commitments.remoteParams.dustLimitSatoshis), tx) ++
|
||||
d.commitments.remoteNextCommitInfo.left.toSeq.flatMap(r => Closing.timedoutHtlcs(r.nextRemoteCommit, Satoshi(d.commitments.remoteParams.dustLimitSatoshis), tx))
|
||||
timedoutHtlcs.foreach { add =>
|
||||
val origin = d.commitments.originChannels(add.id)
|
||||
log.warning(s"failing htlc #${add.id} paymentHash=${add.paymentHash} origin=$origin: htlc timed out")
|
||||
relayer ! Status.Failure(AddHtlcFailed(d.channelId, add.paymentHash, HtlcTimedout(d.channelId), origin, None))
|
||||
}
|
||||
// then let's see if any of the possible close scenarii can be considered done
|
||||
val mutualCloseDone = d.mutualClosePublished.exists(_.txid == tx.txid) // this case is trivial, in a mutual close scenario we only need to make sure that one of the closing txes is confirmed
|
||||
val localCommitDone = localCommitPublished1.map(Closing.isLocalCommitDone(_)).getOrElse(false)
|
||||
|
@ -1160,7 +1142,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
wallet.rollback(fundingTx)
|
||||
stay
|
||||
|
||||
case Event(INPUT_DISCONNECTED, _) => stay // we are disconnected, but it doesn't matter anymoer
|
||||
case Event(INPUT_DISCONNECTED, _) => stay // we are disconnected, but it doesn't matter anymore
|
||||
})
|
||||
|
||||
when(OFFLINE)(handleExceptions {
|
||||
|
@ -1307,8 +1289,8 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
log.info(s"rejecting htlc request in state=$stateName")
|
||||
val error = ChannelUnavailable(d.channelId)
|
||||
d match {
|
||||
case normal: DATA_NORMAL => handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), Some(normal.channelUpdate)), c) // can happen if we are in OFFLINE or SYNCING state (channelUpdate will have enable=false)
|
||||
case _ => handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), None), c) // we don't provide a channel_update: this will be a permanent channel failure
|
||||
case normal: DATA_NORMAL => handleCommandError(AddHtlcFailed(d.channelId, c.paymentHash, error, origin(c), Some(normal.channelUpdate)), c) // can happen if we are in OFFLINE or SYNCING state (channelUpdate will have enable=false)
|
||||
case _ => handleCommandError(AddHtlcFailed(d.channelId, c.paymentHash, error, origin(c), None), c) // we don't provide a channel_update: this will be a permanent channel failure
|
||||
}
|
||||
|
||||
// we only care about this event in NORMAL and SHUTDOWN state, and we never unregister to the event stream
|
||||
|
@ -1667,7 +1649,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
|
||||
if (revWasSentLast) resendRevocation
|
||||
case Right(_) if commitments1.remoteCommit.index + 1 == channelReestablish.nextLocalCommitmentNumber =>
|
||||
// there wasn't any sig in-flight when the disconnection occured
|
||||
// there wasn't any sig in-flight when the disconnection occurred
|
||||
resendRevocation
|
||||
case _ => throw CommitmentSyncError(d.channelId)
|
||||
}
|
||||
|
|
|
@ -54,5 +54,5 @@ case class InvalidRevocation (override val channelId: BinaryDa
|
|||
case class CommitmentSyncError (override val channelId: BinaryData) extends ChannelException(channelId, "commitment sync error")
|
||||
case class RevocationSyncError (override val channelId: BinaryData) extends ChannelException(channelId, "revocation sync error")
|
||||
case class InvalidFailureCode (override val channelId: BinaryData) extends ChannelException(channelId, "UpdateFailMalformedHtlc message doesn't have BADONION bit set")
|
||||
case class AddHtlcFailed (override val channelId: BinaryData, t: Throwable, origin: Origin, channelUpdate: Option[ChannelUpdate]) extends ChannelException(channelId, s"cannot add htlc with origin=$origin reason=${t.getMessage}")
|
||||
case class AddHtlcFailed (override val channelId: BinaryData, paymentHash: BinaryData, t: Throwable, origin: Origin, channelUpdate: Option[ChannelUpdate]) extends ChannelException(channelId, s"cannot add htlc with origin=$origin reason=${t.getMessage}")
|
||||
// @formatter:on
|
|
@ -47,7 +47,8 @@ case class Commitments(localParams: LocalParams, remoteParams: RemoteParams,
|
|||
|
||||
def hasTimedoutOutgoingHtlcs(blockheight: Long): Boolean =
|
||||
localCommit.spec.htlcs.exists(htlc => htlc.direction == OUT && blockheight >= htlc.add.expiry) ||
|
||||
remoteCommit.spec.htlcs.exists(htlc => htlc.direction == IN && blockheight >= htlc.add.expiry)
|
||||
remoteCommit.spec.htlcs.exists(htlc => htlc.direction == IN && blockheight >= htlc.add.expiry) ||
|
||||
remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit.spec.htlcs.exists(htlc => htlc.direction == IN && blockheight >= htlc.add.expiry)).getOrElse(false)
|
||||
|
||||
def addLocalProposal(proposal: UpdateMessage): Commitments = Commitments.addLocalProposal(this, proposal)
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package fr.acinq.eclair.channel
|
||||
|
||||
import akka.event.LoggingAdapter
|
||||
import fr.acinq.bitcoin.Crypto.{Point, PublicKey, Scalar, sha256}
|
||||
import fr.acinq.bitcoin.Crypto.{Point, PublicKey, Scalar, ripemd160, sha256}
|
||||
import fr.acinq.bitcoin.Script._
|
||||
import fr.acinq.bitcoin.{OutPoint, _}
|
||||
import fr.acinq.eclair.blockchain.EclairWallet
|
||||
|
@ -47,7 +47,7 @@ object Helpers {
|
|||
if (open.pushMsat > 1000 * open.fundingSatoshis) throw new InvalidPushAmount(open.temporaryChannelId, open.pushMsat, 1000 * open.fundingSatoshis)
|
||||
val localFeeratePerKw = Globals.feeratesPerKw.get.block_1
|
||||
if (isFeeDiffTooHigh(open.feeratePerKw, localFeeratePerKw, nodeParams.maxFeerateMismatch)) throw new FeerateTooDifferent(open.temporaryChannelId, localFeeratePerKw, open.feeratePerKw)
|
||||
// only enfore dust limit check on mainnet
|
||||
// only enforce dust limit check on mainnet
|
||||
if (nodeParams.chainHash == Block.LivenetGenesisBlock.hash) {
|
||||
if (open.dustLimitSatoshis < Channel.MIN_DUSTLIMIT) throw new InvalidDustLimit(open.temporaryChannelId, open.dustLimitSatoshis, Channel.MIN_DUSTLIMIT)
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ object Helpers {
|
|||
*/
|
||||
def validateParamsFunder(nodeParams: NodeParams, open: OpenChannel, accept: AcceptChannel): Unit = {
|
||||
if (accept.maxAcceptedHtlcs > Channel.MAX_ACCEPTED_HTLCS) throw new InvalidMaxAcceptedHtlcs(accept.temporaryChannelId, accept.maxAcceptedHtlcs, Channel.MAX_ACCEPTED_HTLCS)
|
||||
// only enfore dust limit check on mainnet
|
||||
// only enforce dust limit check on mainnet
|
||||
if (nodeParams.chainHash == Block.LivenetGenesisBlock.hash) {
|
||||
if (accept.dustLimitSatoshis < Channel.MIN_DUSTLIMIT) throw new InvalidDustLimit(accept.temporaryChannelId, accept.dustLimitSatoshis, Channel.MIN_DUSTLIMIT)
|
||||
}
|
||||
|
@ -429,6 +429,90 @@ object Helpers {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* In CLOSING state, any time we see a new transaction, we try to extract a preimage from it in order to fulfill the
|
||||
* corresponding incoming htlc in an upstream channel.
|
||||
*
|
||||
* Not doing that would result in us losing money, because the downstream node would pull money from one side, and
|
||||
* the upstream node would get refunded after a timeout.
|
||||
*
|
||||
* @param localCommit
|
||||
* @param tx
|
||||
* @return a set of fulfills that need to be sent upstream if extraction was successful
|
||||
*/
|
||||
def extractPreimages(localCommit: LocalCommit, tx: Transaction)(implicit log: LoggingAdapter): Set[UpdateFulfillHtlc] = {
|
||||
val paymentPreimages = tx.txIn.map(_.witness match {
|
||||
case ScriptWitness(Seq(localSig, paymentPreimage, htlcOfferedScript)) if paymentPreimage.size == 32 =>
|
||||
log.info(s"extracted paymentPreimage=$paymentPreimage from tx=$tx (claim-htlc-success)")
|
||||
Some(paymentPreimage)
|
||||
case ScriptWitness(Seq(BinaryData.empty, remoteSig, localSig, paymentPreimage, htlcReceivedScript)) if paymentPreimage.size == 32 =>
|
||||
log.info(s"extracted paymentPreimage=$paymentPreimage from tx=$tx (htlc-success)")
|
||||
Some(paymentPreimage)
|
||||
case _ => None
|
||||
}).toSet.flatten
|
||||
paymentPreimages flatMap { paymentPreimage =>
|
||||
// we only consider htlcs in our local commitment, because we only care about outgoing htlcs, which disappear first in the remote commitment
|
||||
// if an outgoing htlc is in the remote commitment, then:
|
||||
// - either it is in the local commitment (it was never fulfilled)
|
||||
// - or we have already received the fulfill and forwarded it upstream
|
||||
val outgoingHtlcs = localCommit.spec.htlcs.filter(_.direction == OUT).map(_.add)
|
||||
outgoingHtlcs.collect {
|
||||
case add if add.paymentHash == sha256(paymentPreimage) =>
|
||||
// let's just pretend we received the preimage from the counterparty and build a fulfill message
|
||||
UpdateFulfillHtlc(add.channelId, add.id, paymentPreimage)
|
||||
}
|
||||
// TODO: should we handle local htlcs here as well? currently timed out htlcs that we sent will never have an answer
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* In CLOSING state, when we are notified that a transaction has been confirmed, we analyze it to find out if one or
|
||||
* more htlcs have timed out and need to be failed in an upstream channel.
|
||||
*
|
||||
* @param localCommit
|
||||
* @param localDustLimit
|
||||
* @param tx a tx that has reached mindepth
|
||||
* @return a set of htlcs that need to be failed upstream
|
||||
*/
|
||||
def timedoutHtlcs(localCommit: LocalCommit, localDustLimit: Satoshi, tx: Transaction)(implicit log: LoggingAdapter): Set[UpdateAddHtlc] =
|
||||
if (tx.txid == localCommit.publishableTxs.commitTx.tx.txid) {
|
||||
// the tx is a commitment tx, we can immediately fail all dust htlcs (they don't have an output in the tx)
|
||||
(localCommit.spec.htlcs.filter(_.direction == OUT) -- Transactions.trimOfferedHtlcs(localDustLimit, localCommit.spec)).map(_.add)
|
||||
} else {
|
||||
// maybe this is a timeout tx, in that case we can resolve and fail the corresponding htlc
|
||||
tx.txIn.map(_.witness match {
|
||||
case ScriptWitness(Seq(BinaryData.empty, remoteSig, localSig, BinaryData.empty, htlcOfferedScript)) =>
|
||||
val paymentHash160 = BinaryData(htlcOfferedScript.slice(109, 109 + 20))
|
||||
log.info(s"extracted paymentHash160=$paymentHash160 from tx=$tx (htlc-timeout)")
|
||||
localCommit.spec.htlcs.filter(_.direction == OUT).map(_.add).filter(add => ripemd160(add.paymentHash) == paymentHash160)
|
||||
case _ => Set.empty
|
||||
}).toSet.flatten
|
||||
}
|
||||
|
||||
/**
|
||||
* In CLOSING state, when we are notified that a transaction has been confirmed, we analyze it to find out if one or
|
||||
* more htlcs have timed out and need to be failed in an upstream channel.
|
||||
*
|
||||
* @param remoteCommit
|
||||
* @param remoteDustLimit
|
||||
* @param tx a tx that has reached mindepth
|
||||
* @return a set of htlcs that need to be failed upstream
|
||||
*/
|
||||
def timedoutHtlcs(remoteCommit: RemoteCommit, remoteDustLimit: Satoshi, tx: Transaction)(implicit log: LoggingAdapter): Set[UpdateAddHtlc] =
|
||||
if (tx.txid == remoteCommit.txid) {
|
||||
// the tx is a commitment tx, we can immediately fail all dust htlcs (they don't have an output in the tx)
|
||||
(remoteCommit.spec.htlcs.filter(_.direction == IN) -- Transactions.trimReceivedHtlcs(remoteDustLimit, remoteCommit.spec)).map(_.add)
|
||||
} else {
|
||||
// maybe this is a timeout tx, in that case we can resolve and fail the corresponding htlc
|
||||
tx.txIn.map(_.witness match {
|
||||
case ScriptWitness(Seq(remoteSig, BinaryData.empty, htlcReceivedScript)) =>
|
||||
val paymentHash160 = BinaryData(htlcReceivedScript.slice(69, 69 + 20))
|
||||
log.info(s"extracted paymentHash160=$paymentHash160 from tx=$tx (claim-htlc-timeout)")
|
||||
remoteCommit.spec.htlcs.filter(_.direction == IN).map(_.add).filter(add => ripemd160(add.paymentHash) == paymentHash160)
|
||||
case _ => Set.empty
|
||||
}).toSet.flatten
|
||||
}
|
||||
|
||||
/**
|
||||
* In CLOSING state, when we are notified that a transaction has been confirmed, we check if this tx belongs in the
|
||||
* local commit scenario and keep track of it.
|
||||
|
@ -476,7 +560,6 @@ object Helpers {
|
|||
val isCommitTx = remoteCommitPublished.commitTx.txid == tx.txid
|
||||
// does the tx spend an output of the local commitment tx?
|
||||
val spendsTheCommitTx = remoteCommitPublished.commitTx.txid == outPoint.txid
|
||||
// TODO: we don't currently spend htlc transactions
|
||||
isCommitTx || spendsTheCommitTx
|
||||
}
|
||||
// then we add the relevant outpoints to the map keeping track of which txid spends which outpoint
|
||||
|
@ -502,6 +585,7 @@ object Helpers {
|
|||
val isCommitTx = revokedCommitPublished.commitTx.txid == tx.txid
|
||||
// does the tx spend an output of the local commitment tx?
|
||||
val spendsTheCommitTx = revokedCommitPublished.commitTx.txid == outPoint.txid
|
||||
// TODO: we don't currently spend/steal htlc transactions
|
||||
isCommitTx || spendsTheCommitTx
|
||||
}
|
||||
// then we add the relevant outpoints to the map keeping track of which txid spends which outpoint
|
||||
|
@ -519,7 +603,7 @@ object Helpers {
|
|||
def isLocalCommitDone(localCommitPublished: LocalCommitPublished) = {
|
||||
// is the commitment tx buried? (we need to check this because we may not have nay outputs)
|
||||
val isCommitTxConfirmed = localCommitPublished.irrevocablySpent.values.toSet.contains(localCommitPublished.commitTx.txid)
|
||||
// are there remaining spendable outputs from the commitment tx? we just substract all known spent outputs from the ones we control
|
||||
// are there remaining spendable outputs from the commitment tx? we just subtract all known spent outputs from the ones we control
|
||||
val commitOutputsSpendableByUs = (localCommitPublished.claimMainDelayedOutputTx.toSeq ++ localCommitPublished.htlcSuccessTxs ++ localCommitPublished.htlcTimeoutTxs)
|
||||
.flatMap(_.txIn.map(_.outPoint)).toSet -- localCommitPublished.irrevocablySpent.keys
|
||||
// which htlc delayed txes can we expect to be confirmed?
|
||||
|
@ -570,7 +654,7 @@ object Helpers {
|
|||
* - not watch for confirmations if we know the tx is already confirmed
|
||||
* - not watch the corresponding utxo when we already know the final spending tx
|
||||
*
|
||||
* @param tx a tx with only one input
|
||||
* @param tx a tx with only one input
|
||||
* @param irrevocablySpent a map of known spent outpoints
|
||||
* @return true if we know for sure that the utxos consumed by the tx have already irrevocably been spent, false otherwise
|
||||
*/
|
||||
|
|
|
@ -40,12 +40,12 @@ case class BitStream(bytes: Vector[Byte], offstart: Int, offend: Int) {
|
|||
* append bytes to a bitstream
|
||||
*
|
||||
* @param input bytes to append
|
||||
* @return an udpdate bitstream
|
||||
* @return an updated bitstream
|
||||
*/
|
||||
def writeBytes(input: Seq[Byte]): BitStream = input.foldLeft(this) { case (bs, b) => bs.writeByte(b) }
|
||||
|
||||
/**
|
||||
* append a bit to a bistream
|
||||
* append a bit to a bitstream
|
||||
*
|
||||
* @param bit bit to append
|
||||
* @return an update bitstream
|
||||
|
@ -63,7 +63,7 @@ case class BitStream(bytes: Vector[Byte], offstart: Int, offend: Int) {
|
|||
}
|
||||
|
||||
/**
|
||||
* append bits to a bistream
|
||||
* append bits to a bitstream
|
||||
*
|
||||
* @param input bits to append
|
||||
* @return an update bitstream
|
||||
|
|
|
@ -168,7 +168,7 @@ object Noise {
|
|||
*
|
||||
* @param cipher cipher functions
|
||||
*/
|
||||
case class UnitializedCipherState(cipher: CipherFunctions) extends CipherState {
|
||||
case class UninitializedCipherState(cipher: CipherFunctions) extends CipherState {
|
||||
override val hasKey = false
|
||||
|
||||
override def encryptWithAd(ad: BinaryData, plaintext: BinaryData): (CipherState, BinaryData) = (this, plaintext)
|
||||
|
@ -197,11 +197,11 @@ object Noise {
|
|||
|
||||
object CipherState {
|
||||
def apply(k: BinaryData, cipher: CipherFunctions): CipherState = k.length match {
|
||||
case 0 => UnitializedCipherState(cipher)
|
||||
case 0 => UninitializedCipherState(cipher)
|
||||
case 32 => InitializedCipherState(k, 0, cipher)
|
||||
}
|
||||
|
||||
def apply(cipher: CipherFunctions): CipherState = UnitializedCipherState(cipher)
|
||||
def apply(cipher: CipherFunctions): CipherState = UninitializedCipherState(cipher)
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -70,28 +70,28 @@ object Sphinx extends Logging {
|
|||
def blind(pub: PublicKey, blindingFactors: Seq[BinaryData]): PublicKey = blindingFactors.foldLeft(pub)(blind)
|
||||
|
||||
/**
|
||||
* computes the ephemereal public keys and shared secrets for all nodes on the route.
|
||||
* computes the ephemeral public keys and shared secrets for all nodes on the route.
|
||||
*
|
||||
* @param sessionKey this node's session key
|
||||
* @param publicKeys public keys of each node on the route
|
||||
* @return a tuple (ephemereal public keys, shared secrets)
|
||||
* @return a tuple (ephemeral public keys, shared secrets)
|
||||
*/
|
||||
def computeEphemerealPublicKeysAndSharedSecrets(sessionKey: PrivateKey, publicKeys: Seq[PublicKey]): (Seq[PublicKey], Seq[BinaryData]) = {
|
||||
val ephemerealPublicKey0 = blind(PublicKey(Crypto.curve.getG, compressed = true), sessionKey.value)
|
||||
def computeEphemeralPublicKeysAndSharedSecrets(sessionKey: PrivateKey, publicKeys: Seq[PublicKey]): (Seq[PublicKey], Seq[BinaryData]) = {
|
||||
val ephemeralPublicKey0 = blind(PublicKey(Crypto.curve.getG, compressed = true), sessionKey.value)
|
||||
val secret0 = computeSharedSecret(publicKeys(0), sessionKey)
|
||||
val blindingFactor0 = computeblindingFactor(ephemerealPublicKey0, secret0)
|
||||
computeEphemerealPublicKeysAndSharedSecrets(sessionKey, publicKeys.tail, Seq(ephemerealPublicKey0), Seq(blindingFactor0), Seq(secret0))
|
||||
val blindingFactor0 = computeblindingFactor(ephemeralPublicKey0, secret0)
|
||||
computeEphemeralPublicKeysAndSharedSecrets(sessionKey, publicKeys.tail, Seq(ephemeralPublicKey0), Seq(blindingFactor0), Seq(secret0))
|
||||
}
|
||||
|
||||
@tailrec
|
||||
def computeEphemerealPublicKeysAndSharedSecrets(sessionKey: PrivateKey, publicKeys: Seq[PublicKey], ephemerealPublicKeys: Seq[PublicKey], blindingFactors: Seq[BinaryData], sharedSecrets: Seq[BinaryData]): (Seq[PublicKey], Seq[BinaryData]) = {
|
||||
def computeEphemeralPublicKeysAndSharedSecrets(sessionKey: PrivateKey, publicKeys: Seq[PublicKey], ephemeralPublicKeys: Seq[PublicKey], blindingFactors: Seq[BinaryData], sharedSecrets: Seq[BinaryData]): (Seq[PublicKey], Seq[BinaryData]) = {
|
||||
if (publicKeys.isEmpty)
|
||||
(ephemerealPublicKeys, sharedSecrets)
|
||||
(ephemeralPublicKeys, sharedSecrets)
|
||||
else {
|
||||
val ephemerealPublicKey = blind(ephemerealPublicKeys.last, blindingFactors.last)
|
||||
val ephemeralPublicKey = blind(ephemeralPublicKeys.last, blindingFactors.last)
|
||||
val secret = computeSharedSecret(blind(publicKeys.head, blindingFactors), sessionKey)
|
||||
val blindingFactor = computeblindingFactor(ephemerealPublicKey, secret)
|
||||
computeEphemerealPublicKeysAndSharedSecrets(sessionKey, publicKeys.tail, ephemerealPublicKeys :+ ephemerealPublicKey, blindingFactors :+ blindingFactor, sharedSecrets :+ secret)
|
||||
val blindingFactor = computeblindingFactor(ephemeralPublicKey, secret)
|
||||
computeEphemeralPublicKeysAndSharedSecrets(sessionKey, publicKeys.tail, ephemeralPublicKeys :+ ephemeralPublicKey, blindingFactors :+ blindingFactor, sharedSecrets :+ secret)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,11 +176,11 @@ object Sphinx extends Logging {
|
|||
val bin = xor(packet.routingInfo ++ zeroes(PayloadLength + MacLength), generateStream(rho, PayloadLength + MacLength + MaxHops * (PayloadLength + MacLength)))
|
||||
val payload = bin.take(PayloadLength)
|
||||
val hmac = bin.slice(PayloadLength, PayloadLength + MacLength)
|
||||
val nextRoutinfo = bin.drop(PayloadLength + MacLength)
|
||||
val nextRouteInfo = bin.drop(PayloadLength + MacLength)
|
||||
|
||||
val nextPubKey = blind(PublicKey(packet.publicKey), computeblindingFactor(PublicKey(packet.publicKey), sharedSecret))
|
||||
|
||||
ParsedPacket(payload, Packet(Version, nextPubKey, hmac, nextRoutinfo), sharedSecret)
|
||||
ParsedPacket(payload, Packet(Version, nextPubKey, hmac, nextRouteInfo), sharedSecret)
|
||||
}
|
||||
|
||||
@tailrec
|
||||
|
@ -201,13 +201,13 @@ object Sphinx extends Logging {
|
|||
*
|
||||
* @param payload payload for this packed
|
||||
* @param associatedData associated data
|
||||
* @param ephemerealPublicKey ephemereal key for this packed
|
||||
* @param ephemeralPublicKey ephemeral key for this packed
|
||||
* @param sharedSecret shared secret
|
||||
* @param packet current packet (1 + all zeroes if this is the last packet)
|
||||
* @param routingInfoFiller optional routing info filler, needed only when you're constructing the last packet
|
||||
* @return the next packet
|
||||
*/
|
||||
private def makeNextPacket(payload: BinaryData, associatedData: BinaryData, ephemerealPublicKey: BinaryData, sharedSecret: BinaryData, packet: Packet, routingInfoFiller: BinaryData = BinaryData.empty): Packet = {
|
||||
private def makeNextPacket(payload: BinaryData, associatedData: BinaryData, ephemeralPublicKey: BinaryData, sharedSecret: BinaryData, packet: Packet, routingInfoFiller: BinaryData = BinaryData.empty): Packet = {
|
||||
require(payload.length == PayloadLength)
|
||||
|
||||
val nextRoutingInfo = {
|
||||
|
@ -217,7 +217,7 @@ object Sphinx extends Logging {
|
|||
}
|
||||
|
||||
val nextHmac: BinaryData = mac(generateKey("mu", sharedSecret), nextRoutingInfo ++ associatedData)
|
||||
val nextPacket = Packet(Version, ephemerealPublicKey, nextHmac, nextRoutingInfo)
|
||||
val nextPacket = Packet(Version, ephemeralPublicKey, nextHmac, nextRoutingInfo)
|
||||
nextPacket
|
||||
}
|
||||
|
||||
|
@ -249,10 +249,10 @@ object Sphinx extends Logging {
|
|||
* shared secrets (one per node) can be used to parse returned error messages if needed
|
||||
*/
|
||||
def makePacket(sessionKey: PrivateKey, publicKeys: Seq[PublicKey], payloads: Seq[BinaryData], associatedData: BinaryData): PacketAndSecrets = {
|
||||
val (ephemerealPublicKeys, sharedsecrets) = computeEphemerealPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
val (ephemeralPublicKeys, sharedsecrets) = computeEphemeralPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
val filler = generateFiller("rho", sharedsecrets.dropRight(1), PayloadLength + MacLength, MaxHops)
|
||||
|
||||
val lastPacket = makeNextPacket(payloads.last, associatedData, ephemerealPublicKeys.last, sharedsecrets.last, LAST_PACKET, filler)
|
||||
val lastPacket = makeNextPacket(payloads.last, associatedData, ephemeralPublicKeys.last, sharedsecrets.last, LAST_PACKET, filler)
|
||||
|
||||
@tailrec
|
||||
def loop(hoppayloads: Seq[BinaryData], ephkeys: Seq[PublicKey], sharedSecrets: Seq[BinaryData], packet: Packet): Packet = {
|
||||
|
@ -262,7 +262,7 @@ object Sphinx extends Logging {
|
|||
}
|
||||
}
|
||||
|
||||
val packet = loop(payloads.dropRight(1), ephemerealPublicKeys.dropRight(1), sharedsecrets.dropRight(1), lastPacket)
|
||||
val packet = loop(payloads.dropRight(1), ephemeralPublicKeys.dropRight(1), sharedsecrets.dropRight(1), lastPacket)
|
||||
PacketAndSecrets(packet, sharedsecrets.zip(publicKeys))
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,8 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
|||
|
||||
import TransportHandler._
|
||||
|
||||
connection ! akka.io.Tcp.Register(self)
|
||||
connection ! Tcp.Register(self)
|
||||
connection ! Tcp.ResumeReading
|
||||
|
||||
val out = context.actorOf(Props(new WriteAckSender(connection)))
|
||||
|
||||
|
@ -53,11 +54,15 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
|||
makeReader(keyPair)
|
||||
}
|
||||
|
||||
def sendToListener(listener: ActorRef, plaintextMessages: Seq[BinaryData]) = {
|
||||
plaintextMessages.map(plaintext => {
|
||||
def sendToListener(listener: ActorRef, plaintextMessages: Seq[BinaryData]): Seq[T] = {
|
||||
plaintextMessages.flatMap(plaintext => {
|
||||
codec.decode(BitVector(plaintext.data)) match {
|
||||
case Attempt.Successful(DecodeResult(message, _)) => listener ! message
|
||||
case Attempt.Failure(err) => log.error(s"cannot deserialize $plaintext: $err")
|
||||
case Attempt.Successful(DecodeResult(message, _)) =>
|
||||
listener ! message
|
||||
Some(message)
|
||||
case Attempt.Failure(err) =>
|
||||
log.error(s"cannot deserialize $plaintext: $err")
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -66,6 +71,7 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
|||
|
||||
when(Handshake) {
|
||||
case Event(Tcp.Received(data), HandshakeData(reader, buffer)) =>
|
||||
connection ! Tcp.ResumeReading
|
||||
log.debug("received {}", BinaryData(data))
|
||||
val buffer1 = buffer ++ data
|
||||
if (buffer1.length < expectedLength(reader))
|
||||
|
@ -104,28 +110,57 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
|||
}
|
||||
|
||||
when(WaitingForListener) {
|
||||
case Event(Tcp.Received(data), currentStateData@WaitingForListenerData(enc, dec, buffer)) =>
|
||||
stay using currentStateData.copy(buffer = buffer ++ data)
|
||||
case Event(Tcp.Received(data), d: WaitingForListenerData) =>
|
||||
stay using d.copy(buffer = d.buffer ++ data)
|
||||
|
||||
case Event(Listener(listener), WaitingForListenerData(enc, dec, buffer)) =>
|
||||
val (nextStateData, plaintextMessages) = WaitingForCyphertextData(enc, dec, None, buffer, listener).decrypt
|
||||
case Event(Listener(listener), d: WaitingForListenerData) =>
|
||||
context.watch(listener)
|
||||
sendToListener(listener, plaintextMessages)
|
||||
goto(WaitingForCyphertext) using nextStateData
|
||||
|
||||
val (nextData, plaintextMessages) = WaitingForCiphertextData(d.enc, d.dec, None, d.buffer, listener).decrypt
|
||||
if (plaintextMessages.isEmpty) {
|
||||
connection ! Tcp.ResumeReading
|
||||
goto(WaitingForCiphertext) using WaitingForCiphertextData(nextData.enc, nextData.dec, nextData.ciphertextLength, nextData.buffer, nextData.listener)
|
||||
} else {
|
||||
log.debug(s"read ${plaintextMessages.size} messages, waiting for readacks")
|
||||
val unacked = sendToListener(listener, plaintextMessages)
|
||||
goto(WaitingForReadAck) using WaitingForReadAckData(nextData.enc, nextData.dec, nextData.ciphertextLength, nextData.buffer, nextData.listener, unacked)
|
||||
}
|
||||
}
|
||||
|
||||
when(WaitingForCyphertext) {
|
||||
case Event(Tcp.Received(data), currentStateData@WaitingForCyphertextData(enc, dec, length, buffer, listener)) =>
|
||||
val (nextStateData, plaintextMessages) = WaitingForCyphertextData.decrypt(currentStateData.copy(buffer = buffer ++ data))
|
||||
sendToListener(listener, plaintextMessages)
|
||||
stay using nextStateData
|
||||
when(WaitingForCiphertext) {
|
||||
case Event(Tcp.Received(data), d: WaitingForCiphertextData) =>
|
||||
val (nextData, plaintextMessages) = WaitingForCiphertextData.decrypt(d.copy(buffer = d.buffer ++ data))
|
||||
if (plaintextMessages.isEmpty) {
|
||||
connection ! Tcp.ResumeReading
|
||||
stay using nextData
|
||||
} else {
|
||||
log.debug(s"read ${plaintextMessages.size} messages, waiting for readacks")
|
||||
val unacked = sendToListener(d.listener, plaintextMessages)
|
||||
goto(WaitingForReadAck) using WaitingForReadAckData(nextData.enc, nextData.dec, nextData.ciphertextLength, nextData.buffer, nextData.listener, unacked)
|
||||
}
|
||||
|
||||
case Event(t: T, WaitingForCyphertextData(enc, dec, length, buffer, listener)) =>
|
||||
case Event(t: T, d: WaitingForCiphertextData) =>
|
||||
val blob = codec.encode(t).require.toByteArray
|
||||
val (enc1, ciphertext) = TransportHandler.encrypt(enc, blob)
|
||||
val (enc1, ciphertext) = TransportHandler.encrypt(d.enc, blob)
|
||||
out ! buf(ciphertext)
|
||||
stay using WaitingForCyphertextData(enc1, dec, length, buffer, listener)
|
||||
stay using d.copy(enc = enc1)
|
||||
}
|
||||
|
||||
when(WaitingForReadAck) {
|
||||
case Event(ReadAck(msg: T), d: WaitingForReadAckData[T]) =>
|
||||
val unacked1 = d.unackedMessages diff List(msg) // TODO: NOT OPTIMAL!! but can't use a set because there might be duplicate messages
|
||||
if (unacked1.isEmpty) {
|
||||
log.debug("last incoming message was acked, resuming reading")
|
||||
connection ! Tcp.ResumeReading
|
||||
goto(WaitingForCiphertext) using WaitingForCiphertextData(d.enc, d.dec, d.ciphertextLength, d.buffer, d.listener)
|
||||
} else {
|
||||
stay using d.copy(unackedMessages = unacked1)
|
||||
}
|
||||
|
||||
case Event(t: T, d: WaitingForReadAckData[T]) =>
|
||||
val blob = codec.encode(t).require.toByteArray
|
||||
val (enc1, ciphertext) = TransportHandler.encrypt(d.enc, blob)
|
||||
out ! buf(ciphertext)
|
||||
stay using d.copy(enc = enc1)
|
||||
}
|
||||
|
||||
whenUnhandled {
|
||||
|
@ -208,13 +243,16 @@ object TransportHandler {
|
|||
sealed trait State
|
||||
case object Handshake extends State
|
||||
case object WaitingForListener extends State
|
||||
case object WaitingForCyphertext extends State
|
||||
case object WaitingForCiphertext extends State
|
||||
case object WaitingForReadAck extends State
|
||||
// @formatter:on
|
||||
|
||||
case class Listener(listener: ActorRef)
|
||||
|
||||
case class HandshakeCompleted(connection: ActorRef, transport: ActorRef, remoteNodeId: PublicKey)
|
||||
|
||||
case class ReadAck(msg: Any)
|
||||
|
||||
sealed trait Data
|
||||
|
||||
case class HandshakeData(reader: Noise.HandshakeStateReader, buffer: ByteString = ByteString.empty) extends Data
|
||||
|
@ -232,7 +270,7 @@ object TransportHandler {
|
|||
|
||||
override def encryptWithAd(ad: BinaryData, plaintext: BinaryData): (CipherState, BinaryData) = {
|
||||
cs match {
|
||||
case UnitializedCipherState(_) => (this, plaintext)
|
||||
case UninitializedCipherState(_) => (this, plaintext)
|
||||
case InitializedCipherState(k, n, _) if n == 999 => {
|
||||
val (_, ciphertext) = cs.encryptWithAd(ad, plaintext)
|
||||
val (ck1, k1) = SHA256HashFunctions.hkdf(ck, k)
|
||||
|
@ -247,7 +285,7 @@ object TransportHandler {
|
|||
|
||||
override def decryptWithAd(ad: BinaryData, ciphertext: BinaryData): (CipherState, BinaryData) = {
|
||||
cs match {
|
||||
case UnitializedCipherState(_) => (this, ciphertext)
|
||||
case UninitializedCipherState(_) => (this, ciphertext)
|
||||
case InitializedCipherState(k, n, _) if n == 999 => {
|
||||
val (_, plaintext) = cs.decryptWithAd(ad, ciphertext)
|
||||
val (ck1, k1) = SHA256HashFunctions.hkdf(ck, k)
|
||||
|
@ -263,13 +301,15 @@ object TransportHandler {
|
|||
|
||||
case class WaitingForListenerData(enc: CipherState, dec: CipherState, buffer: ByteString) extends Data
|
||||
|
||||
case class WaitingForCyphertextData(enc: CipherState, dec: CipherState, ciphertextLength: Option[Int], buffer: ByteString, listener: ActorRef) extends Data {
|
||||
def decrypt: (WaitingForCyphertextData, Seq[BinaryData]) = WaitingForCyphertextData.decrypt(this)
|
||||
case class WaitingForCiphertextData(enc: CipherState, dec: CipherState, ciphertextLength: Option[Int], buffer: ByteString, listener: ActorRef) extends Data {
|
||||
def decrypt: (WaitingForCiphertextData, Seq[BinaryData]) = WaitingForCiphertextData.decrypt(this)
|
||||
}
|
||||
|
||||
object WaitingForCyphertextData {
|
||||
case class WaitingForReadAckData[T](enc: CipherState, dec: CipherState, ciphertextLength: Option[Int], buffer: ByteString, listener: ActorRef, unackedMessages: Seq[T]) extends Data
|
||||
|
||||
object WaitingForCiphertextData {
|
||||
@tailrec
|
||||
def decrypt(state: WaitingForCyphertextData, acc: Seq[BinaryData] = Nil): (WaitingForCyphertextData, Seq[BinaryData]) = {
|
||||
def decrypt(state: WaitingForCiphertextData, acc: Seq[BinaryData] = Nil): (WaitingForCiphertextData, Seq[BinaryData]) = {
|
||||
(state.ciphertextLength, state.buffer.length) match {
|
||||
case (None, length) if length < 18 => (state, acc)
|
||||
case (None, _) =>
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package fr.acinq.eclair.db
|
||||
|
||||
import fr.acinq.bitcoin.{BinaryData, Satoshi}
|
||||
import fr.acinq.bitcoin.Crypto.PublicKey
|
||||
import fr.acinq.eclair.wire.{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement}
|
||||
|
||||
|
@ -13,7 +14,7 @@ trait NetworkDb {
|
|||
|
||||
def listNodes(): List[NodeAnnouncement]
|
||||
|
||||
def addChannel(c: ChannelAnnouncement)
|
||||
def addChannel(c: ChannelAnnouncement, txid: BinaryData, capacity: Satoshi)
|
||||
|
||||
/**
|
||||
* This method removes 1 channel announcement and 2 channel updates (at both ends of the same channel)
|
||||
|
@ -23,7 +24,7 @@ trait NetworkDb {
|
|||
*/
|
||||
def removeChannel(shortChannelId: Long)
|
||||
|
||||
def listChannels(): List[ChannelAnnouncement]
|
||||
def listChannels(): Map[ChannelAnnouncement, (BinaryData, Satoshi)]
|
||||
|
||||
def addChannelUpdate(u: ChannelUpdate)
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package fr.acinq.eclair.db
|
||||
|
||||
import fr.acinq.bitcoin.BinaryData
|
||||
import fr.acinq.eclair.channel.Command
|
||||
|
||||
/**
|
||||
* This database stores the preimages that we have received from downstream
|
||||
|
@ -14,12 +15,12 @@ import fr.acinq.bitcoin.BinaryData
|
|||
* to handle all corner cases.
|
||||
*
|
||||
*/
|
||||
trait PreimagesDb {
|
||||
trait PendingRelayDb {
|
||||
|
||||
def addPreimage(channelId: BinaryData, htlcId: Long, paymentPreimage: BinaryData)
|
||||
def addPendingRelay(channelId: BinaryData, htlcId: Long, cmd: Command)
|
||||
|
||||
def removePreimage(channelId: BinaryData, htlcId: Long)
|
||||
def removePendingRelay(channelId: BinaryData, htlcId: Long)
|
||||
|
||||
def listPreimages(channelId: BinaryData): List[(BinaryData, Long, BinaryData)]
|
||||
def listPendingRelay(channelId: BinaryData): List[Command]
|
||||
|
||||
}
|
|
@ -31,7 +31,7 @@ class SqliteChannelsDb(sqlite: Connection) extends ChannelsDb {
|
|||
}
|
||||
|
||||
override def removeChannel(channelId: BinaryData): Unit = {
|
||||
using(sqlite.prepareStatement("DELETE FROM preimages WHERE channel_id=?")) { statement =>
|
||||
using(sqlite.prepareStatement("DELETE FROM pending_relay WHERE channel_id=?")) { statement =>
|
||||
statement.setBytes(1, channelId)
|
||||
statement.executeUpdate()
|
||||
}
|
||||
|
|
|
@ -2,11 +2,12 @@ package fr.acinq.eclair.db.sqlite
|
|||
|
||||
import java.sql.Connection
|
||||
|
||||
import fr.acinq.bitcoin.Crypto
|
||||
import fr.acinq.bitcoin.{BinaryData, Crypto, Satoshi}
|
||||
import fr.acinq.eclair.db.NetworkDb
|
||||
import fr.acinq.eclair.router.Announcements
|
||||
import fr.acinq.eclair.wire.LightningMessageCodecs.{channelAnnouncementCodec, channelUpdateCodec, nodeAnnouncementCodec}
|
||||
import fr.acinq.eclair.wire.{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement}
|
||||
import scodec.bits.BitVector
|
||||
|
||||
class SqliteNetworkDb(sqlite: Connection) extends NetworkDb {
|
||||
|
||||
|
@ -15,7 +16,7 @@ class SqliteNetworkDb(sqlite: Connection) extends NetworkDb {
|
|||
using(sqlite.createStatement()) { statement =>
|
||||
statement.execute("PRAGMA foreign_keys = ON")
|
||||
statement.executeUpdate("CREATE TABLE IF NOT EXISTS nodes (node_id BLOB NOT NULL PRIMARY KEY, data BLOB NOT NULL)")
|
||||
statement.executeUpdate("CREATE TABLE IF NOT EXISTS channels (short_channel_id INTEGER NOT NULL PRIMARY KEY, data BLOB NOT NULL)")
|
||||
statement.executeUpdate("CREATE TABLE IF NOT EXISTS channels (short_channel_id INTEGER NOT NULL PRIMARY KEY, txid STRING NOT NULL, data BLOB NOT NULL, capacity_sat INTEGER NOT NULL)")
|
||||
statement.executeUpdate("CREATE TABLE IF NOT EXISTS channel_updates (short_channel_id INTEGER NOT NULL, node_flag INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY(short_channel_id, node_flag), FOREIGN KEY(short_channel_id) REFERENCES channels(short_channel_id))")
|
||||
statement.executeUpdate("CREATE INDEX IF NOT EXISTS channel_updates_idx ON channel_updates(short_channel_id)")
|
||||
}
|
||||
|
@ -50,10 +51,12 @@ class SqliteNetworkDb(sqlite: Connection) extends NetworkDb {
|
|||
}
|
||||
}
|
||||
|
||||
override def addChannel(c: ChannelAnnouncement): Unit = {
|
||||
using(sqlite.prepareStatement("INSERT OR IGNORE INTO channels VALUES (?, ?)")) { statement =>
|
||||
override def addChannel(c: ChannelAnnouncement, txid: BinaryData, capacity: Satoshi): Unit = {
|
||||
using(sqlite.prepareStatement("INSERT OR IGNORE INTO channels VALUES (?, ?, ?, ?)")) { statement =>
|
||||
statement.setLong(1, c.shortChannelId)
|
||||
statement.setBytes(2, channelAnnouncementCodec.encode(c).require.toByteArray)
|
||||
statement.setString(2, txid.toString())
|
||||
statement.setBytes(3, channelAnnouncementCodec.encode(c).require.toByteArray)
|
||||
statement.setLong(4, capacity.amount)
|
||||
statement.executeUpdate()
|
||||
}
|
||||
}
|
||||
|
@ -67,10 +70,15 @@ class SqliteNetworkDb(sqlite: Connection) extends NetworkDb {
|
|||
}
|
||||
}
|
||||
|
||||
override def listChannels(): List[ChannelAnnouncement] = {
|
||||
override def listChannels(): Map[ChannelAnnouncement, (BinaryData, Satoshi)] = {
|
||||
using(sqlite.createStatement()) { statement =>
|
||||
val rs = statement.executeQuery("SELECT data FROM channels")
|
||||
codecList(rs, channelAnnouncementCodec)
|
||||
val rs = statement.executeQuery("SELECT data, txid, capacity_sat FROM channels")
|
||||
var l: Map[ChannelAnnouncement, (BinaryData, Satoshi)] = Map()
|
||||
while (rs.next()) {
|
||||
l = l + (channelAnnouncementCodec.decode(BitVector(rs.getBytes("data"))).require.value ->
|
||||
(BinaryData(rs.getString("txid")), Satoshi(rs.getLong("capacity_sat"))))
|
||||
}
|
||||
l
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
package fr.acinq.eclair.db.sqlite
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import fr.acinq.bitcoin.BinaryData
|
||||
import fr.acinq.eclair.channel.Command
|
||||
import fr.acinq.eclair.db.PendingRelayDb
|
||||
import fr.acinq.eclair.db.sqlite.SqliteUtils.{codecList, using}
|
||||
import fr.acinq.eclair.wire.CommandCodecs.cmdCodec
|
||||
|
||||
class SqlitePendingRelayDb(sqlite: Connection) extends PendingRelayDb {
|
||||
|
||||
using(sqlite.createStatement()) { statement =>
|
||||
// note: should we use a foreign key to local_channels table here?
|
||||
statement.executeUpdate("CREATE TABLE IF NOT EXISTS pending_relay (channel_id BLOB NOT NULL, htlc_id INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY(channel_id, htlc_id))")
|
||||
}
|
||||
|
||||
override def addPendingRelay(channelId: BinaryData, htlcId: Long, cmd: Command): Unit = {
|
||||
using(sqlite.prepareStatement("INSERT OR IGNORE INTO pending_relay VALUES (?, ?, ?)")) { statement =>
|
||||
statement.setBytes(1, channelId)
|
||||
statement.setLong(2, htlcId)
|
||||
statement.setBytes(3, cmdCodec.encode(cmd).require.toByteArray)
|
||||
statement.executeUpdate()
|
||||
}
|
||||
}
|
||||
|
||||
override def removePendingRelay(channelId: BinaryData, htlcId: Long): Unit = {
|
||||
using(sqlite.prepareStatement("DELETE FROM pending_relay WHERE channel_id=? AND htlc_id=?")) { statement =>
|
||||
statement.setBytes(1, channelId)
|
||||
statement.setLong(2, htlcId)
|
||||
statement.executeUpdate()
|
||||
}
|
||||
}
|
||||
|
||||
override def listPendingRelay(channelId: BinaryData): List[Command] = {
|
||||
using(sqlite.prepareStatement("SELECT htlc_id, data FROM pending_relay WHERE channel_id=?")) { statement =>
|
||||
statement.setBytes(1, channelId)
|
||||
val rs = statement.executeQuery()
|
||||
codecList(rs, cmdCodec)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
package fr.acinq.eclair.db.sqlite
|
||||
|
||||
import java.sql.Connection
|
||||
|
||||
import fr.acinq.bitcoin.BinaryData
|
||||
import fr.acinq.eclair.db.PreimagesDb
|
||||
import fr.acinq.eclair.db.sqlite.SqliteUtils.using
|
||||
|
||||
class SqlitePreimagesDb(sqlite: Connection) extends PreimagesDb {
|
||||
|
||||
using(sqlite.createStatement()) { statement =>
|
||||
// note: should we use a foreign key to local_channels table here?
|
||||
statement.executeUpdate("CREATE TABLE IF NOT EXISTS preimages (channel_id BLOB NOT NULL, htlc_id INTEGER NOT NULL, preimage BLOB NOT NULL, PRIMARY KEY(channel_id, htlc_id))")
|
||||
}
|
||||
|
||||
override def addPreimage(channelId: BinaryData, htlcId: Long, paymentPreimage: BinaryData): Unit = {
|
||||
using(sqlite.prepareStatement("INSERT OR IGNORE INTO preimages VALUES (?, ?, ?)")) { statement =>
|
||||
statement.setBytes(1, channelId)
|
||||
statement.setLong(2, htlcId)
|
||||
statement.setBytes(3, paymentPreimage)
|
||||
statement.executeUpdate()
|
||||
}
|
||||
}
|
||||
|
||||
override def removePreimage(channelId: BinaryData, htlcId: Long): Unit = {
|
||||
using(sqlite.prepareStatement("DELETE FROM preimages WHERE channel_id=? AND htlc_id=?")) { statement =>
|
||||
statement.setBytes(1, channelId)
|
||||
statement.setLong(2, htlcId)
|
||||
statement.executeUpdate()
|
||||
}
|
||||
}
|
||||
|
||||
override def listPreimages(channelId: BinaryData): List[(BinaryData, Long, BinaryData)] = {
|
||||
using(sqlite.prepareStatement("SELECT htlc_id, preimage FROM preimages WHERE channel_id=?")) { statement =>
|
||||
statement.setBytes(1, channelId)
|
||||
val rs = statement.executeQuery()
|
||||
var l: List[(BinaryData, Long, BinaryData)] = Nil
|
||||
while (rs.next()) {
|
||||
l = l :+ (channelId, rs.getLong("htlc_id"), BinaryData(rs.getBytes("preimage")))
|
||||
}
|
||||
l
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,7 +21,7 @@ class Client(nodeParams: NodeParams, authenticator: ActorRef, address: InetSocke
|
|||
import context.system
|
||||
|
||||
log.info(s"connecting to pubkey=$remoteNodeId host=${address.getHostString} port=${address.getPort}")
|
||||
IO(Tcp) ! Connect(address, timeout = Some(5 seconds), options = KeepAlive(true) :: Nil)
|
||||
IO(Tcp) ! Connect(address, timeout = Some(5 seconds), options = KeepAlive(true) :: Nil, pullMode = true)
|
||||
|
||||
def receive = {
|
||||
case CommandFailed(_: Connect) =>
|
||||
|
|
|
@ -2,15 +2,17 @@ package fr.acinq.eclair.io
|
|||
|
||||
import java.net.InetSocketAddress
|
||||
|
||||
import akka.actor.{ActorRef, LoggingFSM, OneForOneStrategy, PoisonPill, Props, Status, SupervisorStrategy, Terminated}
|
||||
import akka.actor.{ActorRef, FSM, OneForOneStrategy, PoisonPill, Props, Status, SupervisorStrategy, Terminated}
|
||||
import fr.acinq.bitcoin.Crypto.{PrivateKey, PublicKey}
|
||||
import fr.acinq.bitcoin.{BinaryData, Crypto, DeterministicWallet, MilliSatoshi, Satoshi}
|
||||
import fr.acinq.eclair._
|
||||
import fr.acinq.eclair.blockchain.EclairWallet
|
||||
import fr.acinq.eclair.channel._
|
||||
import fr.acinq.eclair.crypto.TransportHandler
|
||||
import fr.acinq.eclair.crypto.TransportHandler.Listener
|
||||
import fr.acinq.eclair.router.{Rebroadcast, SendRoutingState}
|
||||
import fr.acinq.eclair.router.SendRoutingState
|
||||
import fr.acinq.eclair.wire
|
||||
import fr.acinq.eclair.wire.LightningMessage
|
||||
|
||||
import scala.concurrent.duration._
|
||||
import scala.util.Random
|
||||
|
@ -18,7 +20,7 @@ import scala.util.Random
|
|||
/**
|
||||
* Created by PM on 26/08/2016.
|
||||
*/
|
||||
class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress: Option[InetSocketAddress], authenticator: ActorRef, watcher: ActorRef, router: ActorRef, relayer: ActorRef, wallet: EclairWallet, storedChannels: Set[HasCommitments]) extends LoggingFSM[Peer.State, Peer.Data] {
|
||||
class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress: Option[InetSocketAddress], authenticator: ActorRef, watcher: ActorRef, router: ActorRef, relayer: ActorRef, wallet: EclairWallet, storedChannels: Set[HasCommitments]) extends FSM[Peer.State, Peer.Data] {
|
||||
|
||||
import Peer._
|
||||
|
||||
|
@ -65,6 +67,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
|
|||
|
||||
when(INITIALIZING) {
|
||||
case Event(remoteInit: wire.Init, InitializingData(address_opt, transport, channels, origin_opt)) =>
|
||||
transport ! TransportHandler.ReadAck(remoteInit)
|
||||
log.info(s"$remoteNodeId has features: initialRoutingSync=${Features.initialRoutingSync(remoteInit.localFeatures)}")
|
||||
if (Features.areSupported(remoteInit.localFeatures)) {
|
||||
origin_opt.map(origin => origin ! "connected")
|
||||
|
@ -72,8 +75,8 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
|
|||
router ! SendRoutingState(transport)
|
||||
}
|
||||
// let's bring existing/requested channels online
|
||||
channels.values.foreach(_ ! INPUT_RECONNECTED(transport))
|
||||
goto(CONNECTED) using ConnectedData(address_opt, transport, remoteInit, channels)
|
||||
channels.values.toSet[ActorRef].foreach(_ ! INPUT_RECONNECTED(transport)) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
|
||||
goto(CONNECTED) using ConnectedData(address_opt, transport, remoteInit, channels.map { case (k: ChannelId, v) => (k, v)})
|
||||
} else {
|
||||
log.warning(s"incompatible features, disconnecting")
|
||||
origin_opt.map(origin => origin ! "incompatible features")
|
||||
|
@ -113,29 +116,33 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
|
|||
transport ! wire.Ping(pongSize, BinaryData("00" * pingSize))
|
||||
stay
|
||||
|
||||
case Event(wire.Ping(pongLength, _), ConnectedData(_, transport, _, _)) =>
|
||||
case Event(ping@wire.Ping(pongLength, _), ConnectedData(_, transport, _, _)) =>
|
||||
transport ! TransportHandler.ReadAck(ping)
|
||||
// TODO: (optional) check against the expected data size tat we requested when we sent ping messages
|
||||
if (pongLength > 0) {
|
||||
transport ! wire.Pong(BinaryData("00" * pongLength))
|
||||
}
|
||||
stay
|
||||
|
||||
case Event(wire.Pong(data), ConnectedData(_, _, _, _)) =>
|
||||
case Event(pong@wire.Pong(data), ConnectedData(_, transport, _, _)) =>
|
||||
transport ! TransportHandler.ReadAck(pong)
|
||||
// TODO: compute latency for remote peer ?
|
||||
log.debug(s"received pong with ${data.length} bytes")
|
||||
stay
|
||||
|
||||
case Event(err@wire.Error(channelId, reason), ConnectedData(_, transport, _, channels)) if channelId == CHANNELID_ZERO =>
|
||||
transport ! TransportHandler.ReadAck(err)
|
||||
log.error(s"connection-level error, failing all channels! reason=${new String(reason)}")
|
||||
channels.values.foreach(_ forward err)
|
||||
channels.values.toSet[ActorRef].foreach(_ forward err) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
|
||||
transport ! PoisonPill
|
||||
stay
|
||||
|
||||
case Event(msg: wire.Error, ConnectedData(_, transport, _, channels)) =>
|
||||
case Event(err: wire.Error, ConnectedData(_, transport, _, channels)) =>
|
||||
transport ! TransportHandler.ReadAck(err)
|
||||
// error messages are a bit special because they can contain either temporaryChannelId or channelId (see BOLT 1)
|
||||
channels.get(FinalChannelId(msg.channelId)).orElse(channels.get(TemporaryChannelId(msg.channelId))) match {
|
||||
case Some(channel) => channel forward msg
|
||||
case None => transport ! wire.Error(msg.channelId, UNKNOWN_CHANNEL_MESSAGE)
|
||||
channels.get(FinalChannelId(err.channelId)).orElse(channels.get(TemporaryChannelId(err.channelId))) match {
|
||||
case Some(channel) => channel forward err
|
||||
case None => transport ! wire.Error(err.channelId, UNKNOWN_CHANNEL_MESSAGE)
|
||||
}
|
||||
stay
|
||||
|
||||
|
@ -149,6 +156,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
|
|||
stay using d.copy(channels = channels + (TemporaryChannelId(temporaryChannelId) -> channel))
|
||||
|
||||
case Event(msg: wire.OpenChannel, d@ConnectedData(_, transport, remoteInit, channels)) =>
|
||||
transport ! TransportHandler.ReadAck(msg)
|
||||
channels.get(TemporaryChannelId(msg.temporaryChannelId)) match {
|
||||
case None =>
|
||||
log.info(s"accepting a new channel to $remoteNodeId")
|
||||
|
@ -163,6 +171,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
|
|||
}
|
||||
|
||||
case Event(msg: wire.HasChannelId, ConnectedData(_, transport, _, channels)) =>
|
||||
transport ! TransportHandler.ReadAck(msg)
|
||||
channels.get(FinalChannelId(msg.channelId)) match {
|
||||
case Some(channel) => channel forward msg
|
||||
case None => transport ! wire.Error(msg.channelId, UNKNOWN_CHANNEL_MESSAGE)
|
||||
|
@ -170,6 +179,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
|
|||
stay
|
||||
|
||||
case Event(msg: wire.HasTemporaryChannelId, ConnectedData(_, transport, _, channels)) =>
|
||||
transport ! TransportHandler.ReadAck(msg)
|
||||
channels.get(TemporaryChannelId(msg.temporaryChannelId)) match {
|
||||
case Some(channel) => channel forward msg
|
||||
case None => transport ! wire.Error(msg.temporaryChannelId, UNKNOWN_CHANNEL_MESSAGE)
|
||||
|
@ -182,16 +192,14 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
|
|||
// we won't clean it up, but we won't remember the temporary id on channel termination
|
||||
stay using d.copy(channels = channels + (FinalChannelId(channelId) -> channel))
|
||||
|
||||
case Event(Rebroadcast(announcements), ConnectedData(_, transport, _, _)) =>
|
||||
// we filter out announcements that we received from this node
|
||||
announcements.foreach {
|
||||
case (_, s) if s == self => ()
|
||||
case (ann, _) => transport ! ann
|
||||
}
|
||||
case Event(msg: wire.RoutingMessage, _) =>
|
||||
// Note: we don't ack messages here because we don't want them to be stacked in the router's mailbox
|
||||
router ! msg
|
||||
stay
|
||||
|
||||
case Event(msg: wire.RoutingMessage, _) =>
|
||||
router ! msg
|
||||
case Event(readAck: TransportHandler.ReadAck, ConnectedData(_, transport, _, _)) =>
|
||||
// we just forward acks from router to transport
|
||||
transport forward readAck
|
||||
stay
|
||||
|
||||
case Event(Disconnect, ConnectedData(_, transport, _, _)) =>
|
||||
|
@ -200,8 +208,8 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
|
|||
|
||||
case Event(Terminated(actor), ConnectedData(address_opt, transport, _, channels)) if actor == transport =>
|
||||
log.info(s"lost connection to $remoteNodeId")
|
||||
channels.values.foreach(_ ! INPUT_DISCONNECTED)
|
||||
goto(DISCONNECTED) using DisconnectedData(address_opt, channels)
|
||||
channels.values.toSet[ActorRef].foreach(_ ! INPUT_DISCONNECTED) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
|
||||
goto(DISCONNECTED) using DisconnectedData(address_opt, channels.collect { case (k: FinalChannelId, v) => (k, v) })
|
||||
|
||||
case Event(Terminated(actor), d@ConnectedData(_, transport, _, channels)) if channels.values.toSet.contains(actor) =>
|
||||
// we will have at most 2 ids: a TemporaryChannelId and a FinalChannelId
|
||||
|
@ -217,9 +225,9 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
|
|||
log.info(s"got new transport while already connected, switching to new transport")
|
||||
context unwatch oldTransport
|
||||
oldTransport ! PoisonPill
|
||||
channels.values.foreach(_ ! INPUT_DISCONNECTED)
|
||||
channels.values.toSet[ActorRef].foreach(_ ! INPUT_DISCONNECTED) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
|
||||
self ! h
|
||||
goto(DISCONNECTED) using DisconnectedData(address_opt, channels)
|
||||
goto(DISCONNECTED) using DisconnectedData(address_opt, channels.collect { case (k: FinalChannelId, v) => (k, v) })
|
||||
}
|
||||
|
||||
whenUnhandled {
|
||||
|
@ -235,7 +243,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
|
|||
sender ! PeerInfo(remoteNodeId, stateName.toString, d.address_opt, d.channels.values.toSet.size) // we use toSet to dedup because a channel can have a TemporaryChannelId + a ChannelId
|
||||
stay
|
||||
|
||||
case Event(_: Rebroadcast, _) => stay // ignored
|
||||
case Event(_: TransportHandler.ReadAck, _) => stay // ignored
|
||||
}
|
||||
|
||||
onTransition {
|
||||
|
@ -284,10 +292,10 @@ object Peer {
|
|||
|
||||
sealed trait Data {
|
||||
def address_opt: Option[InetSocketAddress]
|
||||
def channels: Map[ChannelId, ActorRef]
|
||||
def channels: Map[_ <: ChannelId, ActorRef] // will be overriden by Map[FinalChannelId, ActorRef] or Map[ChannelId, ActorRef]
|
||||
}
|
||||
case class DisconnectedData(address_opt: Option[InetSocketAddress], channels: Map[ChannelId, ActorRef], attempts: Int = 0) extends Data
|
||||
case class InitializingData(address_opt: Option[InetSocketAddress], transport: ActorRef, channels: Map[ChannelId, ActorRef], origin_opt: Option[ActorRef]) extends Data
|
||||
case class DisconnectedData(address_opt: Option[InetSocketAddress], channels: Map[FinalChannelId, ActorRef], attempts: Int = 0) extends Data
|
||||
case class InitializingData(address_opt: Option[InetSocketAddress], transport: ActorRef, channels: Map[FinalChannelId, ActorRef], origin_opt: Option[ActorRef]) extends Data
|
||||
case class ConnectedData(address_opt: Option[InetSocketAddress], transport: ActorRef, remoteInit: wire.Init, channels: Map[ChannelId, ActorRef]) extends Data
|
||||
|
||||
sealed trait State
|
||||
|
|
|
@ -21,21 +21,27 @@ class Server(nodeParams: NodeParams, authenticator: ActorRef, address: InetSocke
|
|||
import Tcp._
|
||||
import context.system
|
||||
|
||||
IO(Tcp) ! Bind(self, address, options = KeepAlive(true) :: Nil)
|
||||
IO(Tcp) ! Bind(self, address, options = KeepAlive(true) :: Nil, pullMode = true)
|
||||
|
||||
def receive() = {
|
||||
case Bound(localAddress) =>
|
||||
bound.map(_.success(Unit))
|
||||
log.info(s"bound on $localAddress")
|
||||
// Accept connections one by one
|
||||
sender() ! ResumeAccepting(batchSize = 1)
|
||||
context.become(listening(sender()))
|
||||
|
||||
case CommandFailed(_: Bind) =>
|
||||
bound.map(_.failure(new RuntimeException("TCP bind failed")))
|
||||
context stop self
|
||||
}
|
||||
|
||||
def listening(listener: ActorRef): Receive = {
|
||||
case Connected(remote, _) =>
|
||||
log.info(s"connected to $remote")
|
||||
val connection = sender
|
||||
authenticator ! Authenticator.PendingAuth(connection, remoteNodeId_opt = None, address = remote, origin_opt = None)
|
||||
listener ! ResumeAccepting(batchSize = 1)
|
||||
}
|
||||
|
||||
override def unhandled(message: Any): Unit = log.warning(s"unhandled message=$message")
|
||||
|
|
|
@ -7,7 +7,6 @@ import fr.acinq.bitcoin.Crypto.PublicKey
|
|||
import fr.acinq.eclair.NodeParams
|
||||
import fr.acinq.eclair.blockchain.EclairWallet
|
||||
import fr.acinq.eclair.channel.HasCommitments
|
||||
import fr.acinq.eclair.router.Rebroadcast
|
||||
|
||||
/**
|
||||
* Ties network connections to peers.
|
||||
|
@ -66,8 +65,6 @@ class Switchboard(nodeParams: NodeParams, authenticator: ActorRef, watcher: Acto
|
|||
peer forward auth
|
||||
context become main(peers + (remoteNodeId -> peer))
|
||||
|
||||
case r: Rebroadcast => peers.values.foreach(_ forward r)
|
||||
|
||||
case 'peers => sender ! peers
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
package fr.acinq.eclair.payment
|
||||
|
||||
import akka.actor.{Actor, ActorLogging, ActorRef}
|
||||
import fr.acinq.bitcoin.BinaryData
|
||||
import fr.acinq.eclair.NodeParams
|
||||
import fr.acinq.eclair.channel._
|
||||
import fr.acinq.eclair.wire.{UpdateFailHtlc, UpdateFailMalformedHtlc, UpdateFulfillHtlc}
|
||||
|
||||
class CommandBuffer(nodeParams: NodeParams, register: ActorRef) extends Actor with ActorLogging {
|
||||
|
||||
import CommandBuffer._
|
||||
import nodeParams.pendingRelayDb
|
||||
|
||||
context.system.eventStream.subscribe(self, classOf[ChannelStateChanged])
|
||||
|
||||
override def receive: Receive = {
|
||||
|
||||
case CommandSend(channelId, htlcId, cmd) =>
|
||||
// save command in db
|
||||
register forward Register.Forward(channelId, cmd)
|
||||
// we also store the preimage in a db (note that this happens *after* forwarding the fulfill to the channel, so we don't add latency)
|
||||
pendingRelayDb.addPendingRelay(channelId, htlcId, cmd)
|
||||
|
||||
case CommandAck(channelId, htlcId) =>
|
||||
//delete from db
|
||||
log.debug(s"fulfill/fail acked for channelId=$channelId htlcId=$htlcId")
|
||||
pendingRelayDb.removePendingRelay(channelId, htlcId)
|
||||
|
||||
case ChannelStateChanged(channel, _, _, WAIT_FOR_INIT_INTERNAL | OFFLINE | SYNCING, nextState, d: HasCommitments) =>
|
||||
import d.channelId
|
||||
// if channel is in a state where it can have pending htlcs, we send them the fulfills we know of
|
||||
nextState match {
|
||||
case NORMAL | SHUTDOWN | CLOSING =>
|
||||
pendingRelayDb.listPendingRelay(channelId) match {
|
||||
case Nil => ()
|
||||
case msgs =>
|
||||
log.info(s"re-sending ${msgs.size} unacked fulfills/fails to channel $channelId")
|
||||
msgs.foreach(channel ! _) // they all have commit = false
|
||||
// better to sign once instead of after each fulfill
|
||||
channel ! CMD_SIGN
|
||||
}
|
||||
case _ => ()
|
||||
}
|
||||
|
||||
case _: ChannelStateChanged => () // ignored
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
object CommandBuffer {
|
||||
|
||||
case class CommandSend(channelId: BinaryData, htlcId: Long, cmd: Command)
|
||||
|
||||
case class CommandAck(channelId: BinaryData, htlcId: Long)
|
||||
|
||||
}
|
|
@ -5,25 +5,38 @@ import fr.acinq.bitcoin.{BinaryData, Crypto, MilliSatoshi}
|
|||
import fr.acinq.eclair.channel.{CMD_FAIL_HTLC, CMD_FULFILL_HTLC}
|
||||
import fr.acinq.eclair.db.Payment
|
||||
import fr.acinq.eclair.wire._
|
||||
import scala.concurrent.duration._
|
||||
import fr.acinq.eclair.{NodeParams, randomBytes}
|
||||
|
||||
import scala.compat.Platform
|
||||
import scala.concurrent.ExecutionContext
|
||||
import scala.util.{Failure, Success, Try}
|
||||
|
||||
/**
|
||||
* Created by PM on 17/06/2016.
|
||||
*/
|
||||
class LocalPaymentHandler(nodeParams: NodeParams) extends Actor with ActorLogging {
|
||||
class LocalPaymentHandler(nodeParams: NodeParams)(implicit ec: ExecutionContext = ExecutionContext.Implicits.global) extends Actor with ActorLogging {
|
||||
|
||||
context.system.scheduler.schedule(10 minutes, 10 minutes)(self ! Platform.currentTime / 1000)
|
||||
|
||||
override def receive: Receive = run(Map())
|
||||
|
||||
def run(h2r: Map[BinaryData, (BinaryData, PaymentRequest)]): Receive = {
|
||||
|
||||
case currentSeconds: Long =>
|
||||
context.become(run(h2r.collect {
|
||||
case e@(_, (_, pr)) if pr.expiry.isEmpty => e // requests that don't expire are kept forever
|
||||
case e@(_, (_, pr)) if pr.timestamp + pr.expiry.get > currentSeconds => e // clean up expired requests
|
||||
}))
|
||||
|
||||
case ReceivePayment(amount_opt, desc) =>
|
||||
Try {
|
||||
if (h2r.size > nodeParams.maxPendingPaymentRequests) {
|
||||
throw new RuntimeException(s"too many pending payment requests (max=${nodeParams.maxPendingPaymentRequests})")
|
||||
}
|
||||
val paymentPreimage = randomBytes(32)
|
||||
val paymentHash = Crypto.sha256(paymentPreimage)
|
||||
(paymentPreimage, paymentHash, PaymentRequest(nodeParams.chainHash, amount_opt, paymentHash, nodeParams.privateKey, desc))
|
||||
(paymentPreimage, paymentHash, PaymentRequest(nodeParams.chainHash, amount_opt, paymentHash, nodeParams.privateKey, desc, fallbackAddress = None, expirySeconds = Some(nodeParams.paymentRequestExpiry.toSeconds)))
|
||||
} match {
|
||||
case Success((r, h, pr)) =>
|
||||
log.debug(s"generated payment request=${PaymentRequest.write(pr)} from amount=$amount_opt")
|
||||
|
|
|
@ -9,7 +9,7 @@ sealed trait PaymentEvent {
|
|||
val paymentHash: BinaryData
|
||||
}
|
||||
|
||||
case class PaymentSent(amount: MilliSatoshi, feesPaid: MilliSatoshi, paymentHash: BinaryData) extends PaymentEvent
|
||||
case class PaymentSent(amount: MilliSatoshi, feesPaid: MilliSatoshi, paymentHash: BinaryData, paymentPreimage: BinaryData) extends PaymentEvent
|
||||
|
||||
case class PaymentRelayed(amountIn: MilliSatoshi, amountOut: MilliSatoshi, paymentHash: BinaryData) extends PaymentEvent
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package fr.acinq.eclair.payment
|
||||
|
||||
import akka.actor.{ActorRef, FSM, LoggingFSM, Props, Status}
|
||||
import akka.actor.{ActorRef, FSM, Props, Status}
|
||||
import fr.acinq.bitcoin.Crypto.PublicKey
|
||||
import fr.acinq.bitcoin.{BinaryData, MilliSatoshi}
|
||||
import fr.acinq.eclair._
|
||||
|
@ -42,7 +42,7 @@ case object WAITING_FOR_PAYMENT_COMPLETE extends State
|
|||
/**
|
||||
* Created by PM on 26/08/2016.
|
||||
*/
|
||||
class PaymentLifecycle(sourceNodeId: PublicKey, router: ActorRef, register: ActorRef) extends LoggingFSM[State, Data] {
|
||||
class PaymentLifecycle(sourceNodeId: PublicKey, router: ActorRef, register: ActorRef) extends FSM[State, Data] {
|
||||
|
||||
import PaymentLifecycle._
|
||||
|
||||
|
@ -73,11 +73,29 @@ class PaymentLifecycle(sourceNodeId: PublicKey, router: ActorRef, register: Acto
|
|||
|
||||
case Event(fulfill: UpdateFulfillHtlc, w: WaitingForComplete) =>
|
||||
w.sender ! PaymentSucceeded(w.hops, fulfill.paymentPreimage)
|
||||
context.system.eventStream.publish(PaymentSent(MilliSatoshi(w.c.amountMsat), MilliSatoshi(w.cmd.amountMsat - w.c.amountMsat), w.cmd.paymentHash))
|
||||
context.system.eventStream.publish(PaymentSent(MilliSatoshi(w.c.amountMsat), MilliSatoshi(w.cmd.amountMsat - w.c.amountMsat), w.cmd.paymentHash, fulfill.paymentPreimage))
|
||||
stop(FSM.Normal)
|
||||
|
||||
case Event(fail: UpdateFailHtlc, WaitingForComplete(s, c, _, failures, sharedSecrets, ignoreNodes, ignoreChannels, hops)) =>
|
||||
Sphinx.parseErrorPacket(fail.reason, sharedSecrets) match {
|
||||
case Success(e@ErrorPacket(nodeId, failureMessage)) if nodeId == c.targetNodeId =>
|
||||
// if destination node returns an error, we fail the payment immediately
|
||||
log.warning(s"received an error message from target nodeId=$nodeId, failing the payment (failure=$failureMessage)")
|
||||
s ! PaymentFailed(c.paymentHash, failures = failures :+ RemoteFailure(hops, e))
|
||||
stop(FSM.Normal)
|
||||
case res if failures.size + 1 >= c.maxAttempts =>
|
||||
// otherwise we never try more than maxAttempts, no matter the kind of error returned
|
||||
val failure = res match {
|
||||
case Success(e@ErrorPacket(nodeId, failureMessage)) =>
|
||||
log.info(s"received an error message from nodeId=$nodeId (failure=$failureMessage)")
|
||||
RemoteFailure(hops, e)
|
||||
case Failure(t) =>
|
||||
log.warning(s"cannot parse returned error: ${t.getMessage}")
|
||||
UnreadableRemoteFailure(hops)
|
||||
}
|
||||
log.warning(s"too many failed attempts, failing the payment")
|
||||
s ! PaymentFailed(c.paymentHash, failures = failures :+ failure)
|
||||
stop(FSM.Normal)
|
||||
case Failure(t) =>
|
||||
log.warning(s"cannot parse returned error: ${t.getMessage}")
|
||||
// in that case we don't know which node is sending garbage, let's try to blacklist all nodes except the one we are directly connected to and the destination node
|
||||
|
@ -85,17 +103,8 @@ class PaymentLifecycle(sourceNodeId: PublicKey, router: ActorRef, register: Acto
|
|||
log.warning(s"blacklisting intermediate nodes=${blacklist.mkString(",")}")
|
||||
router ! RouteRequest(sourceNodeId, c.targetNodeId, c.assistedRoutes, ignoreNodes ++ blacklist, ignoreChannels)
|
||||
goto(WAITING_FOR_ROUTE) using WaitingForRoute(s, c, failures :+ UnreadableRemoteFailure(hops))
|
||||
case Success(e@ErrorPacket(nodeId, failureMessage)) if nodeId == c.targetNodeId =>
|
||||
log.warning(s"received an error message from target nodeId=$nodeId, failing the payment (failure=$failureMessage)")
|
||||
s ! PaymentFailed(c.paymentHash, failures = failures :+ RemoteFailure(hops, e))
|
||||
stop(FSM.Normal)
|
||||
case Success(e@ErrorPacket(nodeId, failureMessage)) if failures.size + 1 >= c.maxAttempts =>
|
||||
log.info(s"received an error message from nodeId=$nodeId (failure=$failureMessage)")
|
||||
log.warning(s"too many failed attempts, failing the payment")
|
||||
s ! PaymentFailed(c.paymentHash, failures = failures :+ RemoteFailure(hops, e))
|
||||
stop(FSM.Normal)
|
||||
case Success(e@ErrorPacket(nodeId, failureMessage: Node)) =>
|
||||
log.info(s"received an error message from nodeId=$nodeId, trying to route around it (failure=$failureMessage)")
|
||||
log.info(s"received 'Node' type error message from nodeId=$nodeId, trying to route around it (failure=$failureMessage)")
|
||||
// let's try to route around this node
|
||||
router ! RouteRequest(sourceNodeId, c.targetNodeId, c.assistedRoutes, ignoreNodes + nodeId, ignoreChannels)
|
||||
goto(WAITING_FOR_ROUTE) using WaitingForRoute(s, c, failures :+ RemoteFailure(hops, e))
|
||||
|
|
|
@ -34,13 +34,13 @@ case class PaymentRequest(prefix: String, amount: Option[MilliSatoshi], timestam
|
|||
*
|
||||
* @return the payment hash
|
||||
*/
|
||||
def paymentHash = tags.collectFirst { case p: PaymentRequest.PaymentHashTag => p }.get.hash
|
||||
lazy val paymentHash = tags.collectFirst { case p: PaymentRequest.PaymentHashTag => p }.get.hash
|
||||
|
||||
/**
|
||||
*
|
||||
* @return the description of the payment, or its hash
|
||||
*/
|
||||
def description: Either[String, BinaryData] = tags.collectFirst {
|
||||
lazy val description: Either[String, BinaryData] = tags.collectFirst {
|
||||
case PaymentRequest.DescriptionTag(d) => Left(d)
|
||||
case PaymentRequest.DescriptionHashTag(h) => Right(h)
|
||||
}.get
|
||||
|
@ -58,13 +58,13 @@ case class PaymentRequest(prefix: String, amount: Option[MilliSatoshi], timestam
|
|||
case PaymentRequest.FallbackAddressTag(version, hash) if prefix == "lntb" => Bech32.encodeWitnessAddress("tb", version, hash)
|
||||
}
|
||||
|
||||
def routingInfo(): Seq[Seq[ExtraHop]] = tags.collect { case t: RoutingInfoTag => t.path }
|
||||
lazy val routingInfo: Seq[Seq[ExtraHop]] = tags.collect { case t: RoutingInfoTag => t.path }
|
||||
|
||||
def expiry: Option[Long] = tags.collectFirst {
|
||||
lazy val expiry: Option[Long] = tags.collectFirst {
|
||||
case PaymentRequest.ExpiryTag(seconds) => seconds
|
||||
}
|
||||
|
||||
def minFinalCltvExpiry: Option[Long] = tags.collectFirst {
|
||||
lazy val minFinalCltvExpiry: Option[Long] = tags.collectFirst {
|
||||
case PaymentRequest.MinFinalCltvExpiryTag(expiry) => expiry
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ object PaymentRequest {
|
|||
tags = List(
|
||||
Some(PaymentHashTag(paymentHash)),
|
||||
Some(DescriptionTag(description)),
|
||||
expirySeconds.map(ExpiryTag(_))
|
||||
expirySeconds.map(ExpiryTag)
|
||||
).flatten ++ extraHops.map(RoutingInfoTag(_)),
|
||||
signature = BinaryData.empty)
|
||||
.sign(privateKey)
|
||||
|
@ -365,7 +365,7 @@ object PaymentRequest {
|
|||
object Signature {
|
||||
/**
|
||||
*
|
||||
* @param signature 65-bytes signatyre: r (32 bytes) | s (32 bytes) | recid (1 bytes)
|
||||
* @param signature 65-bytes signature: r (32 bytes) | s (32 bytes) | recid (1 bytes)
|
||||
* @return a (r, s, recoveryId)
|
||||
*/
|
||||
def decode(signature: BinaryData): (BigInteger, BigInteger, Byte) = {
|
||||
|
@ -392,7 +392,7 @@ object PaymentRequest {
|
|||
*
|
||||
* @param stream stream to write to
|
||||
* @param value a 5bits value
|
||||
* @return an upated stream
|
||||
* @return an updated stream
|
||||
*/
|
||||
def write5(stream: BitStream, value: Int5): BitStream = stream.writeBits(toBits(value))
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import fr.acinq.eclair.{Globals, NodeParams}
|
|||
import scodec.bits.BitVector
|
||||
import scodec.{Attempt, DecodeResult}
|
||||
|
||||
import scala.util.{Failure, Success, Try}
|
||||
import scala.util.{Failure, Success}
|
||||
|
||||
// @formatter:off
|
||||
|
||||
|
@ -23,8 +23,6 @@ case class ForwardFulfill(fulfill: UpdateFulfillHtlc, to: Origin)
|
|||
case class ForwardFail(fail: UpdateFailHtlc, to: Origin)
|
||||
case class ForwardFailMalformed(fail: UpdateFailMalformedHtlc, to: Origin)
|
||||
|
||||
case class AckFulfillCmd(channelId: BinaryData, htlcId: Long)
|
||||
|
||||
// @formatter:on
|
||||
|
||||
|
||||
|
@ -33,34 +31,15 @@ case class AckFulfillCmd(channelId: BinaryData, htlcId: Long)
|
|||
*/
|
||||
class Relayer(nodeParams: NodeParams, register: ActorRef, paymentHandler: ActorRef) extends Actor with ActorLogging {
|
||||
|
||||
import nodeParams.preimagesDb
|
||||
|
||||
context.system.eventStream.subscribe(self, classOf[ChannelStateChanged])
|
||||
context.system.eventStream.subscribe(self, classOf[LocalChannelUpdate])
|
||||
context.system.eventStream.subscribe(self, classOf[LocalChannelDown])
|
||||
|
||||
val commandBuffer = context.actorOf(Props(new CommandBuffer(nodeParams, register)))
|
||||
|
||||
override def receive: Receive = main(Map())
|
||||
|
||||
def main(channelUpdates: Map[Long, ChannelUpdate]): Receive = {
|
||||
|
||||
case ChannelStateChanged(channel, _, _, _, nextState, d: HasCommitments) =>
|
||||
import d.channelId
|
||||
// if channel is in a state where it can have pending htlcs, we send them the fulfills we know of
|
||||
nextState match {
|
||||
case NORMAL | SHUTDOWN | CLOSING =>
|
||||
preimagesDb.listPreimages(channelId) match {
|
||||
case Nil => ()
|
||||
case preimages =>
|
||||
log.info(s"re-sending ${preimages.size} unacked fulfills to channel $channelId")
|
||||
preimages.map(p => CMD_FULFILL_HTLC(p._2, p._3, commit = false)).foreach(channel ! _)
|
||||
// better to sign once instead of after each fulfill
|
||||
channel ! CMD_SIGN
|
||||
}
|
||||
case _ => ()
|
||||
}
|
||||
|
||||
case _: ChannelStateChanged => ()
|
||||
|
||||
case LocalChannelUpdate(_, channelId, shortChannelId, remoteNodeId, _, channelUpdate) =>
|
||||
log.debug(s"updating channel_update for channelId=$channelId shortChannelId=${shortChannelId.toHexString} remoteNodeId=$remoteNodeId channelUpdate=$channelUpdate ")
|
||||
context become main(channelUpdates + (channelUpdate.shortChannelId -> channelUpdate))
|
||||
|
@ -93,7 +72,7 @@ class Relayer(nodeParams: NodeParams, register: ActorRef, paymentHandler: ActorR
|
|||
cmd match {
|
||||
case Left(cmdFail) =>
|
||||
log.info(s"rejecting htlc #${add.id} paymentHash=${add.paymentHash} from channelId=${add.channelId} reason=${cmdFail.reason}")
|
||||
sender ! cmdFail
|
||||
commandBuffer ! CommandBuffer.CommandSend(add.channelId, add.id, cmdFail)
|
||||
case Right(addHtlc) =>
|
||||
log.debug(s"forwarding htlc #${add.id} paymentHash=${add.paymentHash} to payment-handler")
|
||||
paymentHandler forward addHtlc
|
||||
|
@ -118,7 +97,7 @@ class Relayer(nodeParams: NodeParams, register: ActorRef, paymentHandler: ActorR
|
|||
cmd match {
|
||||
case Left(cmdFail) =>
|
||||
log.info(s"rejecting htlc #${add.id} paymentHash=${add.paymentHash} from channelId=${add.channelId} to shortChannelId=${perHopPayload.channel_id.toHexString} reason=${cmdFail.reason}")
|
||||
sender ! cmdFail
|
||||
commandBuffer ! CommandBuffer.CommandSend(add.channelId, add.id, cmdFail)
|
||||
case Right(cmdAdd) =>
|
||||
log.info(s"forwarding htlc #${add.id} paymentHash=${add.paymentHash} from channelId=${add.channelId} to shortChannelId=${perHopPayload.channel_id.toHexString}")
|
||||
register ! Register.ForwardShortId(perHopPayload.channel_id, cmdAdd)
|
||||
|
@ -127,17 +106,18 @@ class Relayer(nodeParams: NodeParams, register: ActorRef, paymentHandler: ActorR
|
|||
log.warning(s"couldn't parse onion: reason=${t.getMessage}")
|
||||
val cmdFail = CMD_FAIL_MALFORMED_HTLC(add.id, Crypto.sha256(add.onionRoutingPacket), failureCode = FailureMessageCodecs.BADONION, commit = true)
|
||||
log.info(s"rejecting htlc #${add.id} paymentHash=${add.paymentHash} from channelId=${add.channelId} reason=malformed onionHash=${cmdFail.onionHash} failureCode=${cmdFail.failureCode}")
|
||||
sender ! cmdFail
|
||||
commandBuffer ! CommandBuffer.CommandSend(add.channelId, add.id, cmdFail)
|
||||
}
|
||||
|
||||
case Status.Failure(Register.ForwardShortIdFailure(Register.ForwardShortId(shortChannelId, CMD_ADD_HTLC(_, _, _, _, Some(add), _)))) =>
|
||||
log.warning(s"couldn't resolve downstream channel ${shortChannelId.toHexString}, failing htlc #${add.id}")
|
||||
register ! Register.Forward(add.channelId, CMD_FAIL_HTLC(add.id, Right(UnknownNextPeer), commit = true))
|
||||
val cmdFail = CMD_FAIL_HTLC(add.id, Right(UnknownNextPeer), commit = true)
|
||||
commandBuffer ! CommandBuffer.CommandSend(add.channelId, add.id, cmdFail)
|
||||
|
||||
case Status.Failure(AddHtlcFailed(_, error, Local(Some(sender)), _)) =>
|
||||
case Status.Failure(AddHtlcFailed(_, _, error, Local(Some(sender)), _)) =>
|
||||
sender ! Status.Failure(error)
|
||||
|
||||
case Status.Failure(AddHtlcFailed(_, error, Relayed(originChannelId, originHtlcId, _, _), channelUpdate_opt)) =>
|
||||
case Status.Failure(AddHtlcFailed(_, paymentHash, error, Relayed(originChannelId, originHtlcId, _, _), channelUpdate_opt)) =>
|
||||
val failure = (error, channelUpdate_opt) match {
|
||||
case (_: InsufficientFunds, Some(channelUpdate)) => TemporaryChannelFailure(channelUpdate)
|
||||
case (_: TooManyAcceptedHtlcs, Some(channelUpdate)) => TemporaryChannelFailure(channelUpdate)
|
||||
|
@ -146,36 +126,33 @@ class Relayer(nodeParams: NodeParams, register: ActorRef, paymentHandler: ActorR
|
|||
case (_: HtlcTimedout, _) => PermanentChannelFailure
|
||||
case _ => TemporaryNodeFailure
|
||||
}
|
||||
val cmd = CMD_FAIL_HTLC(originHtlcId, Right(failure), commit = true)
|
||||
register ! Register.Forward(originChannelId, cmd)
|
||||
val cmdFail = CMD_FAIL_HTLC(originHtlcId, Right(failure), commit = true)
|
||||
log.info(s"rejecting htlc #$originHtlcId paymentHash=$paymentHash from channelId=$originChannelId reason=${cmdFail.reason}")
|
||||
commandBuffer ! CommandBuffer.CommandSend(originChannelId, originHtlcId, cmdFail)
|
||||
|
||||
case ForwardFulfill(fulfill, Local(Some(sender))) =>
|
||||
sender ! fulfill
|
||||
|
||||
case ForwardFulfill(fulfill, Relayed(originChannelId, originHtlcId, amountMsatIn, amountMsatOut)) =>
|
||||
val cmd = CMD_FULFILL_HTLC(originHtlcId, fulfill.paymentPreimage, commit = true)
|
||||
register ! Register.Forward(originChannelId, cmd)
|
||||
commandBuffer ! CommandBuffer.CommandSend(originChannelId, originHtlcId, cmd)
|
||||
context.system.eventStream.publish(PaymentRelayed(MilliSatoshi(amountMsatIn), MilliSatoshi(amountMsatOut), Crypto.sha256(fulfill.paymentPreimage)))
|
||||
// we also store the preimage in a db (note that this happens *after* forwarding the fulfill to the channel, so we don't add latency)
|
||||
preimagesDb.addPreimage(originChannelId, originHtlcId, fulfill.paymentPreimage)
|
||||
|
||||
case AckFulfillCmd(channelId, htlcId) =>
|
||||
log.debug(s"fulfill acked for channelId=$channelId htlcId=$htlcId")
|
||||
preimagesDb.removePreimage(channelId, htlcId)
|
||||
|
||||
case ForwardFail(fail, Local(Some(sender))) =>
|
||||
sender ! fail
|
||||
|
||||
case ForwardFail(fail, Relayed(originChannelId, originHtlcId, _, _)) =>
|
||||
val cmd = CMD_FAIL_HTLC(originHtlcId, Left(fail.reason), commit = true)
|
||||
register ! Register.Forward(originChannelId, cmd)
|
||||
commandBuffer ! CommandBuffer.CommandSend(originChannelId, originHtlcId, cmd)
|
||||
|
||||
case ForwardFailMalformed(fail, Local(Some(sender))) =>
|
||||
sender ! fail
|
||||
|
||||
case ForwardFailMalformed(fail, Relayed(originChannelId, originHtlcId, _, _)) =>
|
||||
val cmd = CMD_FAIL_MALFORMED_HTLC(originHtlcId, fail.onionHash, fail.failureCode, commit = true)
|
||||
register ! Register.Forward(originChannelId, cmd)
|
||||
commandBuffer ! CommandBuffer.CommandSend(originChannelId, originHtlcId, cmd)
|
||||
|
||||
case ack: CommandBuffer.CommandAck => commandBuffer forward ack
|
||||
|
||||
case "ok" => () // ignoring responses from channels
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import fr.acinq.bitcoin.Script.{pay2wsh, write}
|
|||
import fr.acinq.eclair._
|
||||
import fr.acinq.eclair.blockchain._
|
||||
import fr.acinq.eclair.channel._
|
||||
import fr.acinq.eclair.crypto.TransportHandler
|
||||
import fr.acinq.eclair.io.Peer
|
||||
import fr.acinq.eclair.payment.PaymentRequest.ExtraHop
|
||||
import fr.acinq.eclair.transactions.Scripts
|
||||
|
@ -31,18 +32,17 @@ case class RouteResponse(hops: Seq[Hop], ignoreNodes: Set[PublicKey], ignoreChan
|
|||
case class ExcludeChannel(desc: ChannelDesc) // this is used when we get a TemporaryChannelFailure, to give time for the channel to recover (note that exclusions are directed)
|
||||
case class LiftChannelExclusion(desc: ChannelDesc)
|
||||
case class SendRoutingState(to: ActorRef)
|
||||
case class Stash(channels: Map[ChannelAnnouncement, ActorRef], updates: Map[ChannelUpdate, ActorRef], nodes: Map[NodeAnnouncement, ActorRef])
|
||||
case class Rebroadcast(ann: Queue[(RoutingMessage, ActorRef)])
|
||||
case class Stash(updates: Map[ChannelUpdate, Set[ActorRef]], nodes: Map[NodeAnnouncement, Set[ActorRef]])
|
||||
|
||||
case class Data(nodes: Map[PublicKey, NodeAnnouncement],
|
||||
channels: Map[Long, ChannelAnnouncement],
|
||||
updates: Map[ChannelDesc, ChannelUpdate],
|
||||
stash: Stash,
|
||||
rebroadcast: Queue[(RoutingMessage, ActorRef)],
|
||||
awaiting: Map[ChannelAnnouncement, ActorRef],
|
||||
privateChannels: Map[Long, ChannelAnnouncement],
|
||||
awaiting: Map[ChannelAnnouncement, Seq[ActorRef]], // note: this is a seq because we want to preserve order: first actor is the one who we need to send a tcp-ack when validation is done
|
||||
privateChannels: Map[Long, PublicKey], // short_channel_id -> node_id
|
||||
privateUpdates: Map[ChannelDesc, ChannelUpdate],
|
||||
excludedChannels: Set[ChannelDesc], // those channels are temporarily excluded from route calculation, because their node returned a TemporaryChannelFailure
|
||||
sendStateWaitlist: Queue[ActorRef],
|
||||
sendingState: Set[ActorRef])
|
||||
|
||||
sealed trait State
|
||||
|
@ -50,7 +50,6 @@ case object NORMAL extends State
|
|||
case object WAITING_FOR_VALIDATION extends State
|
||||
|
||||
case object TickBroadcast
|
||||
case object TickValidate
|
||||
case object TickPruneStaleChannels
|
||||
|
||||
// @formatter:on
|
||||
|
@ -69,266 +68,221 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
|
|||
context.system.eventStream.subscribe(self, classOf[LocalChannelDown])
|
||||
|
||||
setTimer(TickBroadcast.toString, TickBroadcast, nodeParams.routerBroadcastInterval, repeat = true)
|
||||
setTimer(TickValidate.toString, TickValidate, nodeParams.routerValidateInterval, repeat = true)
|
||||
setTimer(TickPruneStaleChannels.toString, TickPruneStaleChannels, 1 day, repeat = true)
|
||||
setTimer(TickPruneStaleChannels.toString, TickPruneStaleChannels, 1 hour, repeat = true)
|
||||
|
||||
val db = nodeParams.networkDb
|
||||
|
||||
// Note: We go through the whole validation process instead of directly loading into memory, because the channels
|
||||
// could have been closed while we were shutdown, and if someone connects to us right after startup we don't want to
|
||||
// advertise invalid channels. We could optimize this (at least not fetch txes from the blockchain, and not check sigs)
|
||||
// Note: It is possible that some channels have been closed while we were shutdown. Since we are directly loading channels
|
||||
// in memory without checking they are still alive, we may advertise closed channels if someone connects to us right after
|
||||
// startup. That being said, if we stay down long enough that a significant numbers of channels are closed, there is a chance
|
||||
// other peers forgot about us in the meantime.
|
||||
{
|
||||
log.info(s"loading network announcements from db...")
|
||||
log.info("loading network announcements from db...")
|
||||
// On Android, we discard the node announcements
|
||||
val channels = db.listChannels()
|
||||
val updates = db.listChannelUpdates()
|
||||
val staleChannels = getStaleChannels(channels, updates)
|
||||
if (staleChannels.size > 0) {
|
||||
log.info(s"dropping ${staleChannels.size} stale channels pre-validation")
|
||||
// let's prune the db (maybe eclair was stopped for a long time)
|
||||
val staleChannels = getStaleChannels(channels.keys, updates)
|
||||
if (staleChannels.nonEmpty) {
|
||||
log.info("dropping {} stale channels pre-validation", staleChannels.size)
|
||||
staleChannels.foreach(shortChannelId => db.removeChannel(shortChannelId)) // this also removes updates
|
||||
}
|
||||
val remainingChannels = channels.filterNot(c => staleChannels.contains(c.shortChannelId))
|
||||
val remainingChannels = channels.keys.filterNot(c => staleChannels.contains(c.shortChannelId))
|
||||
val remainingUpdates = updates.filterNot(c => staleChannels.contains(c.shortChannelId))
|
||||
|
||||
val initChannels = remainingChannels.map(c => (c.shortChannelId -> c)).toMap
|
||||
val initChannelUpdates = remainingUpdates.map(u => (getDesc(u, initChannels(u.shortChannelId)) -> u)).toMap
|
||||
log.info(s"loaded from db: channels=${remainingChannels.size} nodes=N/A updates=${remainingUpdates.size}")
|
||||
startWith(NORMAL, Data(Map.empty, initChannels, initChannelUpdates, Stash(Map.empty, Map.empty, Map.empty), Queue.empty, Map.empty, Map.empty, Map.empty, Set.empty, Set.empty))
|
||||
|
||||
log.info("loaded from db: channels={} nodes={} updates={}", remainingChannels.size, 0, remainingUpdates.size)
|
||||
startWith(NORMAL, Data(Map.empty, initChannels, initChannelUpdates, Stash(Map.empty, Map.empty), awaiting = Map.empty, privateChannels = Map.empty, privateUpdates = Map.empty, excludedChannels = Set.empty, sendStateWaitlist = Queue.empty, sendingState = Set.empty))
|
||||
}
|
||||
|
||||
when(NORMAL) {
|
||||
case Event(TickValidate, d) =>
|
||||
require(d.awaiting.size == 0, "awaiting queue should be empty")
|
||||
// we remove stale channels
|
||||
val staleChannels = getStaleChannels(d.stash.channels.keys, d.stash.updates.keys)
|
||||
val (droppedChannels, remainingChannels) = d.stash.channels.keys.partition(c => staleChannels.contains(c.shortChannelId))
|
||||
val (droppedUpdates, _) = d.stash.updates.keys.partition(u => staleChannels.contains(u.shortChannelId))
|
||||
// we validate non-stale channels that had a channel_update
|
||||
val batch = remainingChannels.filter(c => d.stash.updates.keys.exists(_.shortChannelId == c.shortChannelId)).take(MAX_PARALLEL_JSONRPC_REQUESTS)
|
||||
// we clean up the stash (nodes will be filtered afterwards)
|
||||
val stash1 = d.stash.copy(channels = d.stash.channels -- droppedChannels -- batch, updates = d.stash.updates -- droppedUpdates)
|
||||
if (staleChannels.size > 0) {
|
||||
log.info(s"dropping ${staleChannels.size} stale channels pre-validation, stash channels: ${d.stash.channels.size} -> ${stash1.channels.size} updates: ${d.stash.updates.size} -> ${stash1.updates.size} nodes: ${stash1.nodes.size}")
|
||||
}
|
||||
if (batch.size > 0) {
|
||||
log.info(s"validating a batch of ${batch.size} channels")
|
||||
watcher ! ParallelGetRequest(batch.toSeq)
|
||||
val awaiting1 = d.stash.channels.filterKeys(batch.toSet)
|
||||
goto(WAITING_FOR_VALIDATION) using d.copy(stash = stash1, awaiting = awaiting1)
|
||||
} else stay using d.copy(stash = stash1)
|
||||
}
|
||||
|
||||
when(WAITING_FOR_VALIDATION) {
|
||||
case Event(ParallelGetResponse(results), d) =>
|
||||
log.info(s"got validation results for ${results.size} channels")
|
||||
val validated = results.flatMap {
|
||||
case IndividualResult(c, Some(tx), true) =>
|
||||
// TODO: blacklisting
|
||||
val (_, _, outputIndex) = fromShortId(c.shortChannelId)
|
||||
// let's check that the output is indeed a P2WSH multisig 2-of-2 of nodeid1 and nodeid2)
|
||||
val fundingOutputScript = write(pay2wsh(Scripts.multiSig2of2(PublicKey(c.bitcoinKey1), PublicKey(c.bitcoinKey2))))
|
||||
if (tx.txOut.size < outputIndex + 1) {
|
||||
log.error(s"invalid script for shortChannelId=${c.shortChannelId.toHexString}: txid=${tx.txid} does not have outputIndex=$outputIndex ann=$c")
|
||||
None
|
||||
} else if (fundingOutputScript != tx.txOut(outputIndex).publicKeyScript) {
|
||||
log.error(s"invalid script for shortChannelId=${c.shortChannelId.toHexString} txid=${tx.txid} ann=$c")
|
||||
None
|
||||
} else {
|
||||
// On Android we disable the ability to detect when external channels die. If we try to use them during a
|
||||
// payment, we simply will get an error from the node that is just before the missing channel.
|
||||
//watcher ! WatchSpentBasic(self, tx, outputIndex, BITCOIN_FUNDING_EXTERNAL_CHANNEL_SPENT(c.shortChannelId))
|
||||
// TODO: check feature bit set
|
||||
log.debug(s"added channel channelId=${c.shortChannelId.toHexString}")
|
||||
context.system.eventStream.publish(ChannelDiscovered(c, tx.txOut(outputIndex).amount))
|
||||
db.addChannel(c)
|
||||
Some(c)
|
||||
}
|
||||
case IndividualResult(c, Some(tx), false) =>
|
||||
// TODO: vulnerability if they flood us with spent funding tx?
|
||||
log.warning(s"ignoring shortChannelId=${c.shortChannelId.toHexString} tx=${tx.txid} (funding tx not found in utxo)")
|
||||
// there may be a record if we have just restarted
|
||||
db.removeChannel(c.shortChannelId)
|
||||
None
|
||||
case IndividualResult(c, None, _) =>
|
||||
// TODO: blacklist?
|
||||
log.warning(s"could not retrieve tx for shortChannelId=${c.shortChannelId.toHexString}")
|
||||
None
|
||||
}
|
||||
|
||||
// in case we just validated our first local channel, we announce the local node
|
||||
// note that this will also make sure we always update our node announcement on restart (eg: alias, color), because
|
||||
// even if we had stored a previous announcement, it would be overriden by this more recent one
|
||||
if (!d.nodes.contains(nodeParams.nodeId) && validated.exists(isRelatedTo(_, nodeParams.nodeId))) {
|
||||
log.info(s"first local channel validated, announcing local node")
|
||||
val nodeAnn = Announcements.makeNodeAnnouncement(nodeParams.privateKey, nodeParams.alias, nodeParams.color, nodeParams.publicAddresses)
|
||||
self ! nodeAnn
|
||||
}
|
||||
|
||||
// we also reprocess node and channel_update announcements related to channels that were just analyzed
|
||||
val reprocessUpdates = d.stash.updates.filterKeys(u => results.exists(r => r.c.shortChannelId == u.shortChannelId))
|
||||
val reprocessNodes = d.stash.nodes.filterKeys(n => results.exists(r => isRelatedTo(r.c, n.nodeId)))
|
||||
reprocessUpdates.foreach { case (msg, origin) => self.tell(msg, origin) } // we preserve the origin when reprocessing the message
|
||||
reprocessNodes.foreach { case (msg, origin) => self.tell(msg, origin) } // we preserve the origin when reprocessing the message
|
||||
|
||||
// and we remove the reprocessed messages from the stash
|
||||
val stash1 = d.stash.copy(updates = d.stash.updates -- reprocessUpdates.keys, nodes = d.stash.nodes -- reprocessNodes.keys)
|
||||
|
||||
// we also add the newly validated channels to the rebroadcast queue
|
||||
val rebroadcast1 = d.rebroadcast ++ d.awaiting.filterKeys(validated.toSet)
|
||||
|
||||
// we remove fake announcements that we may have made before
|
||||
goto(NORMAL) using d.copy(channels = d.channels ++ validated.map(c => (c.shortChannelId -> c)), privateChannels = d.privateChannels -- validated.map(_.shortChannelId), rebroadcast = rebroadcast1, stash = stash1, awaiting = Map.empty)
|
||||
}
|
||||
|
||||
whenUnhandled {
|
||||
|
||||
case Event(LocalChannelUpdate(_, _, shortChannelId, remoteNodeId, channelAnnouncement_opt, u), d: Data) =>
|
||||
d.channels.get(shortChannelId) match {
|
||||
case Some(_) =>
|
||||
// channel had already been announced and router knows about it, we can process the channel_update
|
||||
self ! u
|
||||
stay
|
||||
// channel has already been announced and router knows about it, we can process the channel_update
|
||||
stay using handle(u, self, d)
|
||||
case None =>
|
||||
channelAnnouncement_opt match {
|
||||
case Some(c) if d.awaiting.contains(c) =>
|
||||
// channel is currently beeing verified, we can process the channel_update right away (it will be stashed)
|
||||
stay using handle(u, self, d)
|
||||
case Some(c) =>
|
||||
// channel wasn't announced but here is the announcement, we will process it *before* the channel_update
|
||||
self ! c
|
||||
self ! u
|
||||
stay
|
||||
watcher ! ValidateRequest(c)
|
||||
val d1 = d.copy(awaiting = d.awaiting + (c -> Nil)) // no origin
|
||||
stay using handle(u, self, d1)
|
||||
case None if d.privateChannels.contains(shortChannelId) =>
|
||||
// channel isn't announced but we already know about it, we can process the channel_update
|
||||
stay using handle(u, self, d)
|
||||
case None =>
|
||||
// channel isn't announced yet, do we have a fake announcement?
|
||||
d.privateChannels.get(shortChannelId) match {
|
||||
case Some(_) =>
|
||||
// yes: nothing to do, we can process the channel_update
|
||||
self ! u
|
||||
stay
|
||||
case None =>
|
||||
// no: create one and add it to current state, then process the channel_update
|
||||
log.info(s"adding unannounced local channel to remote=$remoteNodeId shortChannelId=${shortChannelId.toHexString}")
|
||||
self ! u
|
||||
val fake_c = Announcements.makeChannelAnnouncement("", shortChannelId, nodeParams.nodeId, remoteNodeId, nodeParams.nodeId, nodeParams.nodeId, "", "", "", "")
|
||||
stay using d.copy(privateChannels = d.privateChannels + (shortChannelId -> fake_c))
|
||||
}
|
||||
// channel isn't announced and we never heard of it (maybe it is a private channel or maybe it is a public channel that doesn't yet have 6 confirmations)
|
||||
// let's create a corresponding private channel and process the channel_update
|
||||
log.info("adding unannounced local channel to remote={} shortChannelId={}", remoteNodeId, shortChannelId.toHexString)
|
||||
stay using handle(u, self, d.copy(privateChannels = d.privateChannels + (shortChannelId -> remoteNodeId)))
|
||||
}
|
||||
}
|
||||
|
||||
case Event(LocalChannelDown(_, channelId, shortChannelId, _), d: Data) =>
|
||||
log.debug(s"removed local channel_update for channelId=$channelId shortChannelId=${shortChannelId.toHexString}")
|
||||
log.debug("removed local channel_update for channelId={} shortChannelId={}", channelId, shortChannelId.toHexString)
|
||||
stay using d.copy(privateChannels = d.privateChannels - shortChannelId, privateUpdates = d.privateUpdates.filterKeys(_.id != shortChannelId))
|
||||
|
||||
case Event(s@SendRoutingState(remote), d: Data) =>
|
||||
case Event(SendRoutingState(remote), d: Data) =>
|
||||
if (d.sendingState.size > 3) {
|
||||
log.info(s"received request to send announcements to $remote, already sending state to ${d.sendingState.size} peers, delaying...")
|
||||
context.system.scheduler.scheduleOnce(3 seconds, self, s)
|
||||
stay
|
||||
} else {
|
||||
log.info(s"info sending all announcements to $remote: channels=${d.channels.size} nodes=${d.nodes.size} updates=${d.updates.size}")
|
||||
val batch = d.channels.values ++ d.nodes.values ++ d.updates.values
|
||||
// we group and add delays to leave room for channel messages
|
||||
val actor = context.actorOf(ThrottleForwarder.props(remote, batch, 100, 100 millis))
|
||||
context watch actor
|
||||
stay using d.copy(sendingState = d.sendingState + actor)
|
||||
}
|
||||
log.info("received request to send announcements to {}, already sending state to {} peers, adding to wait list (waiting={})", remote, d.sendingState.size, d.sendStateWaitlist.size)
|
||||
context watch remote
|
||||
stay using d.copy(sendStateWaitlist = d.sendStateWaitlist :+ remote)
|
||||
} else stay using handleSendState(remote, d)
|
||||
|
||||
case Event(Terminated(actor), d: Data) if d.sendingState.contains(actor) =>
|
||||
log.info(s"done sending announcements to a peer, freeing slot")
|
||||
stay using d.copy(sendingState = d.sendingState - actor)
|
||||
log.info("done sending announcements to a peer, freeing slot (waiting={})", d.sendStateWaitlist.size)
|
||||
val d1 = d.copy(sendingState = d.sendingState - actor)
|
||||
d.sendStateWaitlist.dequeueOption match {
|
||||
case Some((remote, sendStateWaitlist1)) => stay using handleSendState(remote, d1.copy(sendStateWaitlist = sendStateWaitlist1))
|
||||
case None => stay using d1
|
||||
}
|
||||
|
||||
case Event(Terminated(actor), d: Data) if d.sendStateWaitlist.contains(actor) =>
|
||||
// note: 'contains' and 'filter' operations are expensive on a queue, but its size should be very small (maybe even capped?)
|
||||
log.info("peer={} died, removing from wait list (waiting={})", actor, d.sendStateWaitlist.size - 1)
|
||||
stay using d.copy(sendStateWaitlist = d.sendStateWaitlist filterNot(_ == actor))
|
||||
|
||||
case Event(SendRoutingState(remote), _) =>
|
||||
// disabled on Android for performance reasons
|
||||
stay
|
||||
|
||||
case Event(c: ChannelAnnouncement, d) =>
|
||||
log.debug(s"received channel announcement for shortChannelId=${c.shortChannelId.toHexString} nodeId1=${c.nodeId1} nodeId2=${c.nodeId2} from $sender")
|
||||
if (d.channels.containsKey(c.shortChannelId) || d.awaiting.keys.exists(_.shortChannelId == c.shortChannelId) || d.stash.channels.contains(c)) {
|
||||
log.debug("received channel announcement for shortChannelId={} nodeId1={} nodeId2={} from {}", c.shortChannelId.toHexString, c.nodeId1, c.nodeId2, sender)
|
||||
if (d.channels.contains(c.shortChannelId)) {
|
||||
sender ! TransportHandler.ReadAck(c)
|
||||
log.debug("ignoring {} (duplicate)", c)
|
||||
stay
|
||||
} else if (d.awaiting.contains(c)) {
|
||||
sender ! TransportHandler.ReadAck(c)
|
||||
log.debug("ignoring {} (being verified)", c)
|
||||
// adding the sender to the list of origins so that we don't send back the same announcement to this peer later
|
||||
val origins = d.awaiting(c) :+ sender
|
||||
stay using d.copy(awaiting = d.awaiting + (c -> origins))
|
||||
} else if (!Announcements.checkSigs(c)) {
|
||||
sender ! TransportHandler.ReadAck(c)
|
||||
log.warning("bad signature for announcement {}", c)
|
||||
sender ! Error(Peer.CHANNELID_ZERO, "bad announcement sig!!!".getBytes())
|
||||
stay
|
||||
} else {
|
||||
log.debug("stashing {}", c)
|
||||
stay using d.copy(stash = d.stash.copy(channels = d.stash.channels + (c -> sender)))
|
||||
log.info("validating shortChannelId={}", c.shortChannelId.toHexString)
|
||||
watcher ! ValidateRequest(c)
|
||||
// we don't acknowledge the message just yet
|
||||
stay using d.copy(awaiting = d.awaiting + (c -> Seq(sender)))
|
||||
}
|
||||
|
||||
case Event(v@ValidateResult(c, _, _, _), d0) =>
|
||||
d0.awaiting.get(c) match {
|
||||
case Some(origin +: others) => origin ! TransportHandler.ReadAck(c) // now we can acknowledge the message, we only need to do it for the first peer that sent us the announcement
|
||||
case _ => ()
|
||||
}
|
||||
log.info("got validation result for shortChannelId={} (awaiting={} stash.nodes={} stash.updates={})", c.shortChannelId.toHexString, d0.awaiting.size, d0.stash.nodes.size, d0.stash.updates.size)
|
||||
val success = v match {
|
||||
case ValidateResult(c, _, _, Some(t)) =>
|
||||
log.warning("validation failure for shortChannelId={} reason={}", c.shortChannelId.toHexString, t.getMessage)
|
||||
false
|
||||
case ValidateResult(c, Some(tx), true, None) =>
|
||||
// TODO: blacklisting
|
||||
val (_, _, outputIndex) = fromShortId(c.shortChannelId)
|
||||
// let's check that the output is indeed a P2WSH multisig 2-of-2 of nodeid1 and nodeid2)
|
||||
val fundingOutputScript = write(pay2wsh(Scripts.multiSig2of2(PublicKey(c.bitcoinKey1), PublicKey(c.bitcoinKey2))))
|
||||
if (tx.txOut.size < outputIndex + 1) {
|
||||
log.error("invalid script for shortChannelId={}: txid={} does not have outputIndex={} ann={}", c.shortChannelId.toHexString, tx.txid, outputIndex, c)
|
||||
false
|
||||
} else if (fundingOutputScript != tx.txOut(outputIndex).publicKeyScript) {
|
||||
log.error("invalid script for shortChannelId={} txid={} ann={}", c.shortChannelId.toHexString, tx.txid, c)
|
||||
false
|
||||
} else {
|
||||
// On Android we disable the ability to detect when external channels die. If we try to use them during a
|
||||
// payment, we simply will get an error from the node that is just before the missing channel.
|
||||
//watcher ! WatchSpentBasic(self, tx, outputIndex, BITCOIN_FUNDING_EXTERNAL_CHANNEL_SPENT(c.shortChannelId))
|
||||
// TODO: check feature bit set
|
||||
log.debug("added channel channelId={}", c.shortChannelId.toHexString)
|
||||
val capacity = tx.txOut(outputIndex).amount
|
||||
context.system.eventStream.publish(ChannelDiscovered(c, capacity))
|
||||
db.addChannel(c, tx.txid, capacity)
|
||||
|
||||
// in case we just validated our first local channel, we announce the local node
|
||||
// note that this will also make sure we always update our node announcement on restart (eg: alias, color), because
|
||||
// even if we had stored a previous announcement, it would be overridden by this more recent one
|
||||
if (!d0.nodes.contains(nodeParams.nodeId) && isRelatedTo(c, nodeParams.nodeId)) {
|
||||
log.info("first local channel validated, announcing local node")
|
||||
val nodeAnn = Announcements.makeNodeAnnouncement(nodeParams.privateKey, nodeParams.alias, nodeParams.color, nodeParams.publicAddresses)
|
||||
self ! nodeAnn
|
||||
}
|
||||
true
|
||||
}
|
||||
case ValidateResult(c, Some(tx), false, None) =>
|
||||
// TODO: vulnerability if they flood us with spent funding tx?
|
||||
log.warning("ignoring shortChannelId={} tx={} (funding tx not found in utxo)", c.shortChannelId.toHexString, tx.txid)
|
||||
// there may be a record if we have just restarted
|
||||
db.removeChannel(c.shortChannelId)
|
||||
false
|
||||
case ValidateResult(c, None, _, None) =>
|
||||
// TODO: blacklist?
|
||||
log.warning("could not retrieve tx for shortChannelId={}", c.shortChannelId.toHexString)
|
||||
false
|
||||
}
|
||||
|
||||
// we also reprocess node and channel_update announcements related to channels that were just analyzed
|
||||
val reprocessUpdates = d0.stash.updates.filterKeys(u => u.shortChannelId == c.shortChannelId)
|
||||
val reprocessNodes = d0.stash.nodes.filterKeys(n => isRelatedTo(c, n.nodeId))
|
||||
// and we remove the reprocessed messages from the stash
|
||||
val stash1 = d0.stash.copy(updates = d0.stash.updates -- reprocessUpdates.keys, nodes = d0.stash.nodes -- reprocessNodes.keys)
|
||||
// we remove channel from awaiting map
|
||||
val awaiting1 = d0.awaiting - c
|
||||
if (success) {
|
||||
val d1 = d0.copy(
|
||||
channels = d0.channels + (c.shortChannelId -> c),
|
||||
privateChannels = d0.privateChannels - c.shortChannelId, // we remove fake announcements that we may have made before
|
||||
stash = stash1,
|
||||
awaiting = awaiting1)
|
||||
// we only reprocess updates and nodes if validation succeeded
|
||||
val d2 = reprocessUpdates.foldLeft(d1) {
|
||||
case (d, (u, origins)) => origins.foldLeft(d) { case (d, origin) => handle(u, origin, d) } // we reprocess the same channel_update for every origin (to preserve origin information)
|
||||
}
|
||||
val d3 = reprocessNodes.foldLeft(d2) {
|
||||
case (d, (n, origins)) => origins.foldLeft(d) { case (d, origin) => handle(n, origin, d) } // we reprocess the same node_announcement for every origins (to preserve origin information)
|
||||
}
|
||||
stay using d3
|
||||
} else {
|
||||
stay using d0.copy(stash = stash1, awaiting = awaiting1)
|
||||
}
|
||||
|
||||
case Event(n: NodeAnnouncement, d: Data) => stay // we just ignore node_announcements on android
|
||||
|
||||
case Event(u: ChannelUpdate, d: Data) =>
|
||||
log.debug(s"received channel update for shortChannelId=${u.shortChannelId.toHexString} from $sender")
|
||||
if (d.channels.contains(u.shortChannelId)) {
|
||||
val publicChannel = true
|
||||
val c = d.channels(u.shortChannelId)
|
||||
val desc = getDesc(u, c)
|
||||
if (d.updates.contains(desc) && d.updates(desc).timestamp >= u.timestamp) {
|
||||
log.debug("ignoring {} (old timestamp or duplicate)", u)
|
||||
stay
|
||||
} else if (!Announcements.checkSig(u, desc.a)) {
|
||||
log.warning(s"bad signature for announcement shortChannelId=${u.shortChannelId.toHexString} {}", u)
|
||||
sender ! Error(Peer.CHANNELID_ZERO, "bad announcement sig!!!".getBytes())
|
||||
stay
|
||||
} else if (d.updates.contains(desc)) {
|
||||
log.debug(s"updated channel_update for shortChannelId=${u.shortChannelId.toHexString} public=$publicChannel flags=${u.flags} {}", u)
|
||||
context.system.eventStream.publish(ChannelUpdateReceived(u))
|
||||
db.updateChannelUpdate(u)
|
||||
stay using d.copy(updates = d.updates + (desc -> u), rebroadcast = d.rebroadcast :+ (u -> sender))
|
||||
} else {
|
||||
log.debug(s"added channel_update for shortChannelId=${u.shortChannelId.toHexString} public=$publicChannel flags=${u.flags} {}", u)
|
||||
context.system.eventStream.publish(ChannelUpdateReceived(u))
|
||||
db.addChannelUpdate(u)
|
||||
stay using d.copy(updates = d.updates + (desc -> u), privateUpdates = d.privateUpdates - desc, rebroadcast = d.rebroadcast :+ (u -> sender))
|
||||
}
|
||||
} else if (d.awaiting.keys.exists(c => c.shortChannelId == u.shortChannelId) || d.stash.channels.keys.exists(c => c.shortChannelId == u.shortChannelId)) {
|
||||
log.debug("stashing {}", u)
|
||||
stay using d.copy(stash = d.stash.copy(updates = d.stash.updates + (u -> sender)))
|
||||
} else if (d.privateChannels.contains(u.shortChannelId)) {
|
||||
val publicChannel = false
|
||||
val c = d.privateChannels(u.shortChannelId)
|
||||
val desc = getDesc(u, c)
|
||||
if (d.updates.contains(desc) && d.updates(desc).timestamp >= u.timestamp) {
|
||||
log.debug("ignoring {} (old timestamp or duplicate)", u)
|
||||
stay
|
||||
} else if (!Announcements.checkSig(u, desc.a)) {
|
||||
log.warning(s"bad signature for announcement shortChannelId=${u.shortChannelId.toHexString} {}", u)
|
||||
sender ! Error(Peer.CHANNELID_ZERO, "bad announcement sig!!!".getBytes())
|
||||
stay
|
||||
} else if (d.privateUpdates.contains(desc)) {
|
||||
log.debug(s"updated channel_update for shortChannelId=${u.shortChannelId.toHexString} public=$publicChannel flags=${u.flags} {}", u)
|
||||
context.system.eventStream.publish(ChannelUpdateReceived(u))
|
||||
stay using d.copy(privateUpdates = d.privateUpdates + (desc -> u))
|
||||
} else {
|
||||
log.debug(s"added channel_update for shortChannelId=${u.shortChannelId.toHexString} public=$publicChannel flags=${u.flags} {}", u)
|
||||
context.system.eventStream.publish(ChannelUpdateReceived(u))
|
||||
stay using d.copy(privateUpdates = d.privateUpdates + (desc -> u))
|
||||
}
|
||||
} else {
|
||||
log.debug("ignoring announcement {} (unknown channel)", u)
|
||||
stay
|
||||
}
|
||||
sender ! TransportHandler.ReadAck(u)
|
||||
log.debug("received channel update for shortChannelId={} from {}", u.shortChannelId.toHexString, sender)
|
||||
stay using handle(u, sender, d)
|
||||
|
||||
case Event(WatchEventSpentBasic(BITCOIN_FUNDING_EXTERNAL_CHANNEL_SPENT(shortChannelId)), d)
|
||||
if d.channels.containsKey(shortChannelId) =>
|
||||
if d.channels.contains(shortChannelId) =>
|
||||
val lostChannel = d.channels(shortChannelId)
|
||||
log.info(s"funding tx of channelId=${shortChannelId.toHexString} has been spent")
|
||||
log.info("funding tx of channelId={} has been spent", shortChannelId.toHexString)
|
||||
// we need to remove nodes that aren't tied to any channels anymore
|
||||
val channels1 = d.channels - lostChannel.shortChannelId
|
||||
val lostNodes = Seq(lostChannel.nodeId1, lostChannel.nodeId2).filterNot(nodeId => hasChannels(nodeId, channels1.values))
|
||||
// let's clean the db and send the events
|
||||
log.info(s"pruning shortChannelId=${shortChannelId.toHexString} (spent)")
|
||||
log.info("pruning shortChannelId={} (spent)", shortChannelId.toHexString)
|
||||
db.removeChannel(shortChannelId) // NB: this also removes channel updates
|
||||
context.system.eventStream.publish(ChannelLost(shortChannelId))
|
||||
lostNodes.foreach {
|
||||
case nodeId =>
|
||||
log.info(s"pruning nodeId=$nodeId (spent)")
|
||||
log.info("pruning nodeId={} (spent)", nodeId)
|
||||
db.removeNode(nodeId)
|
||||
context.system.eventStream.publish(NodeLost(nodeId))
|
||||
}
|
||||
stay using d.copy(nodes = d.nodes -- lostNodes, channels = d.channels - shortChannelId, updates = d.updates.filterKeys(_.id != shortChannelId))
|
||||
|
||||
case Event(TickValidate, d) => stay // ignored
|
||||
|
||||
case Event(TickBroadcast, d) =>
|
||||
// On Android we don't rebroadcast announcements
|
||||
stay using d.copy(rebroadcast = Queue.empty)
|
||||
stay
|
||||
|
||||
case Event(TickPruneStaleChannels, d) =>
|
||||
// first we select channels that we will prune
|
||||
|
@ -341,13 +295,13 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
|
|||
// let's clean the db and send the events
|
||||
staleChannels.foreach {
|
||||
case shortChannelId =>
|
||||
log.info(s"pruning shortChannelId=${shortChannelId.toHexString} (stale)")
|
||||
log.info("pruning shortChannelId={} (stale)", shortChannelId.toHexString)
|
||||
db.removeChannel(shortChannelId) // NB: this also removes channel updates
|
||||
context.system.eventStream.publish(ChannelLost(shortChannelId))
|
||||
}
|
||||
staleNodes.foreach {
|
||||
case nodeId =>
|
||||
log.info(s"pruning nodeId=$nodeId (stale)")
|
||||
log.info("pruning nodeId={} (stale)", nodeId)
|
||||
db.removeNode(nodeId)
|
||||
context.system.eventStream.publish(NodeLost(nodeId))
|
||||
}
|
||||
|
@ -355,12 +309,12 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
|
|||
|
||||
case Event(ExcludeChannel(desc@ChannelDesc(shortChannelId, nodeId, _)), d) =>
|
||||
val banDuration = nodeParams.channelExcludeDuration
|
||||
log.info(s"excluding shortChannelId=${shortChannelId.toHexString} from nodeId=$nodeId for duration=$banDuration")
|
||||
log.info("excluding shortChannelId={} from nodeId={} for duration={}", shortChannelId.toHexString, nodeId, banDuration)
|
||||
context.system.scheduler.scheduleOnce(banDuration, self, LiftChannelExclusion(desc))
|
||||
stay using d.copy(excludedChannels = d.excludedChannels + desc)
|
||||
|
||||
case Event(LiftChannelExclusion(desc@ChannelDesc(shortChannelId, nodeId, _)), d) =>
|
||||
log.info(s"reinstating shortChannelId=${shortChannelId.toHexString} from nodeId=$nodeId")
|
||||
log.info("reinstating shortChannelId={} from nodeId={}", shortChannelId.toHexString, nodeId)
|
||||
stay using d.copy(excludedChannels = d.excludedChannels - desc)
|
||||
|
||||
case Event('nodes, d) =>
|
||||
|
@ -395,7 +349,7 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
|
|||
val updates2 = updates1.filterKeys(!d.excludedChannels.contains(_))
|
||||
// we also filter out disabled channels, and channels/nodes that are blacklisted for this particular request
|
||||
val updates3 = filterUpdates(updates2, ignoreNodes, ignoreChannels)
|
||||
log.info(s"finding a route $start->$end with ignoreNodes=${ignoreNodes.map(_.toBin).mkString(",")} ignoreChannels=${ignoreChannels.map(_.toHexString).mkString(",")}")
|
||||
log.info("finding a route {}->{} with ignoreNodes={} ignoreChannels={}", start, end, ignoreNodes.map(_.toBin).mkString(","), ignoreChannels.map(_.toHexString).mkString(","))
|
||||
findRoute(start, end, updates3).map(r => RouteResponse(r, ignoreNodes, ignoreChannels)) pipeTo sender
|
||||
// On Android, we don't monitor channels to see if their funding is spent because it is too expensive
|
||||
// if the node that created this channel tells us it is unusable (only permanent channel failure) we forget about it
|
||||
|
@ -404,20 +358,114 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
|
|||
stay
|
||||
}
|
||||
|
||||
onTransition {
|
||||
case _ -> NORMAL =>
|
||||
log.info(s"current status channels=${nextStateData.channels.size} nodes=${nextStateData.nodes.size} updates=${nextStateData.updates.size} privateChannels=${nextStateData.privateChannels.size} privateUpdates=${nextStateData.privateUpdates.size}")
|
||||
log.info(s"children=${context.children.size} rebroadcast=${nextStateData.rebroadcast.size} stash.channels=${nextStateData.stash.channels.size} stash.nodes=${nextStateData.stash.nodes.size} stash.updates=${nextStateData.stash.updates.size} awaiting=${nextStateData.awaiting.size} excludedChannels=${nextStateData.excludedChannels.size}")
|
||||
initialize()
|
||||
|
||||
def handle(n: NodeAnnouncement, origin: ActorRef, d: Data): Data =
|
||||
if (d.stash.nodes.contains(n)) {
|
||||
log.debug("ignoring {} (already stashed)", n)
|
||||
val origins = d.stash.nodes(n) + origin
|
||||
d.copy(stash = d.stash.copy(nodes = d.stash.nodes + (n -> origins)))
|
||||
} else if (d.nodes.contains(n.nodeId) && d.nodes(n.nodeId).timestamp >= n.timestamp) {
|
||||
log.debug("ignoring {} (old timestamp or duplicate)", n)
|
||||
d
|
||||
} else if (!Announcements.checkSig(n)) {
|
||||
log.warning("bad signature for {}", n)
|
||||
origin ! Error(Peer.CHANNELID_ZERO, "bad announcement sig!!!".getBytes())
|
||||
d
|
||||
} else if (d.nodes.contains(n.nodeId)) {
|
||||
log.debug("updated node nodeId={}", n.nodeId)
|
||||
context.system.eventStream.publish(NodeUpdated(n))
|
||||
db.updateNode(n)
|
||||
d.copy(nodes = d.nodes + (n.nodeId -> n))
|
||||
} else if (d.channels.values.exists(c => isRelatedTo(c, n.nodeId))) {
|
||||
log.debug("added node nodeId={}", n.nodeId)
|
||||
context.system.eventStream.publish(NodeDiscovered(n))
|
||||
db.addNode(n)
|
||||
d.copy(nodes = d.nodes + (n.nodeId -> n))
|
||||
} else if (d.awaiting.keys.exists(c => isRelatedTo(c, n.nodeId))) {
|
||||
log.debug("stashing {}", n)
|
||||
d.copy(stash = d.stash.copy(nodes = d.stash.nodes + (n -> Set(origin))))
|
||||
} else {
|
||||
log.debug("ignoring {} (no related channel found)", n)
|
||||
// there may be a record if we have just restarted
|
||||
db.removeNode(n.nodeId)
|
||||
d
|
||||
}
|
||||
|
||||
def handle(u: ChannelUpdate, origin: ActorRef, d: Data): Data =
|
||||
if (d.channels.contains(u.shortChannelId)) {
|
||||
// related channel is already known (note: this means no related channel_update is in the stash)
|
||||
val publicChannel = true
|
||||
val c = d.channels(u.shortChannelId)
|
||||
val desc = getDesc(u, c)
|
||||
if (d.updates.contains(desc) && d.updates(desc).timestamp >= u.timestamp) {
|
||||
log.debug("ignoring {} (old timestamp or duplicate)", u)
|
||||
d
|
||||
} else if (!Announcements.checkSig(u, desc.a)) {
|
||||
log.warning("bad signature for announcement shortChannelId={} {}", u, u.shortChannelId.toHexString)
|
||||
origin ! Error(Peer.CHANNELID_ZERO, "bad announcement sig!!!".getBytes())
|
||||
d
|
||||
} else if (d.updates.contains(desc)) {
|
||||
log.debug("updated channel_update for shortChannelId={} public={} flags={} {}", u.shortChannelId.toHexString, publicChannel, u.flags, u)
|
||||
context.system.eventStream.publish(ChannelUpdateReceived(u))
|
||||
db.updateChannelUpdate(u)
|
||||
d.copy(updates = d.updates + (desc -> u))
|
||||
} else {
|
||||
log.debug("added channel_update for shortChannelId={} public={} flags={} {}", u.shortChannelId.toHexString, publicChannel, u.flags, u)
|
||||
context.system.eventStream.publish(ChannelUpdateReceived(u))
|
||||
db.addChannelUpdate(u)
|
||||
d.copy(updates = d.updates + (desc -> u), privateUpdates = d.privateUpdates - desc)
|
||||
}
|
||||
} else if (d.awaiting.keys.exists(c => c.shortChannelId == u.shortChannelId)) {
|
||||
// channel is currently being validated
|
||||
if (d.stash.updates.contains(u)) {
|
||||
log.debug("ignoring {} (already stashed)", u)
|
||||
val origins = d.stash.updates(u) + origin
|
||||
d.copy(stash = d.stash.copy(updates = d.stash.updates + (u -> origins)))
|
||||
} else {
|
||||
log.debug("stashing {}", u)
|
||||
d.copy(stash = d.stash.copy(updates = d.stash.updates + (u -> Set(origin))))
|
||||
}
|
||||
} else if (d.privateChannels.contains(u.shortChannelId)) {
|
||||
val publicChannel = false
|
||||
val remoteNodeId = d.privateChannels(u.shortChannelId)
|
||||
val (a, b) = if (Announcements.isNode1(nodeParams.nodeId, remoteNodeId)) (nodeParams.nodeId, remoteNodeId) else (remoteNodeId, nodeParams.nodeId)
|
||||
val desc = if (Announcements.isNode1(u.flags)) ChannelDesc(u.shortChannelId, a, b) else ChannelDesc(u.shortChannelId, b, a)
|
||||
if (d.updates.contains(desc) && d.updates(desc).timestamp >= u.timestamp) {
|
||||
log.debug("ignoring {} (old timestamp or duplicate)", u)
|
||||
d
|
||||
} else if (!Announcements.checkSig(u, desc.a)) {
|
||||
log.warning("bad signature for announcement shortChannelId={} {}", u.shortChannelId.toHexString, u)
|
||||
origin ! Error(Peer.CHANNELID_ZERO, "bad announcement sig!!!".getBytes())
|
||||
d
|
||||
} else if (d.privateUpdates.contains(desc)) {
|
||||
log.debug("updated channel_update for shortChannelId={} public={} flags={} {}", u.shortChannelId.toHexString, publicChannel, u.flags, u)
|
||||
context.system.eventStream.publish(ChannelUpdateReceived(u))
|
||||
d.copy(privateUpdates = d.privateUpdates + (desc -> u))
|
||||
} else {
|
||||
log.debug("added channel_update for shortChannelId={} public={} flags={} {}", u.shortChannelId.toHexString, publicChannel, u.flags, u)
|
||||
context.system.eventStream.publish(ChannelUpdateReceived(u))
|
||||
d.copy(privateUpdates = d.privateUpdates + (desc -> u))
|
||||
}
|
||||
} else {
|
||||
log.debug("ignoring announcement {} (unknown channel)", u)
|
||||
d
|
||||
}
|
||||
|
||||
def handleSendState(remote: ActorRef, d: Data): Data = {
|
||||
log.info("sending all announcements to {}: channels={} nodes={} updates={}", remote, d.channels.size, d.nodes.size, d.updates.size)
|
||||
val batch = d.channels.values ++ d.nodes.values ++ d.updates.values
|
||||
// we group and add delays to leave room for channel messages
|
||||
val actor = context.actorOf(ThrottleForwarder.props(remote, batch, 100, 100 millis))
|
||||
context watch actor
|
||||
d.copy(sendingState = d.sendingState + actor)
|
||||
}
|
||||
|
||||
initialize()
|
||||
|
||||
}
|
||||
|
||||
object Router {
|
||||
|
||||
val MAX_PARALLEL_JSONRPC_REQUESTS = 50
|
||||
|
||||
def props(nodeParams: NodeParams, watcher: ActorRef) = Props(new Router(nodeParams, watcher))
|
||||
|
||||
def toFakeUpdate(extraHop: ExtraHop): ChannelUpdate =
|
||||
|
@ -433,19 +481,6 @@ object Router {
|
|||
}.toMap
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to build a ChannelDesc, *nodeX and nodeY are provided in no particular order* and will be sorted
|
||||
*
|
||||
* @param u
|
||||
* @param nodeX
|
||||
* @param nodeY
|
||||
* @return a ChannelDesc
|
||||
*/
|
||||
def getDesc(u: ChannelUpdate, nodeX: PublicKey, nodeY: PublicKey): ChannelDesc = {
|
||||
val (nodeId1, nodeId2) = if (Announcements.isNode1(nodeX, nodeY)) (nodeX, nodeY) else (nodeY, nodeX)
|
||||
if (Announcements.isNode1(u.flags)) ChannelDesc(u.shortChannelId, nodeId1, nodeId2) else ChannelDesc(u.shortChannelId, nodeId2, nodeId1)
|
||||
}
|
||||
|
||||
def getDesc(u: ChannelUpdate, channel: ChannelAnnouncement): ChannelDesc = {
|
||||
require(u.flags.data.size == 2, s"invalid flags length ${u.flags.data.size} != 2")
|
||||
// the least significant bit tells us if it is node1 or node2
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package fr.acinq.eclair.router
|
||||
|
||||
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
|
||||
import akka.actor.{Actor, ActorLogging, ActorRef, Props, Terminated}
|
||||
|
||||
import scala.concurrent.duration.{FiniteDuration, _}
|
||||
|
||||
|
@ -17,6 +17,8 @@ class ThrottleForwarder(target: ActorRef, messages: Iterable[Any], chunkSize: In
|
|||
|
||||
import scala.concurrent.ExecutionContext.Implicits.global
|
||||
|
||||
context watch target
|
||||
|
||||
val clock = context.system.scheduler.schedule(0 second, delay, self, Tick)
|
||||
|
||||
log.debug(s"sending messages=${messages.size} with chunkSize=$chunkSize and delay=$delay")
|
||||
|
@ -34,6 +36,11 @@ class ThrottleForwarder(target: ActorRef, messages: Iterable[Any], chunkSize: In
|
|||
chunk.foreach(target ! _)
|
||||
context become group(rest)
|
||||
}
|
||||
|
||||
case Terminated(_) =>
|
||||
clock.cancel()
|
||||
log.debug(s"target died, aborting sending")
|
||||
context stop self
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
package fr.acinq.eclair.wire
|
||||
|
||||
import fr.acinq.eclair.channel.{CMD_FAIL_HTLC, CMD_FAIL_MALFORMED_HTLC, CMD_FULFILL_HTLC, Command}
|
||||
import fr.acinq.eclair.wire.FailureMessageCodecs.failureMessageCodec
|
||||
import fr.acinq.eclair.wire.LightningMessageCodecs._
|
||||
import scodec.Codec
|
||||
import scodec.codecs._
|
||||
|
||||
object CommandCodecs {
|
||||
|
||||
val cmdFulfillCodec: Codec[CMD_FULFILL_HTLC] =
|
||||
(("id" | int64) ::
|
||||
("r" | binarydata(32)) ::
|
||||
("commit" | provide(false))).as[CMD_FULFILL_HTLC]
|
||||
|
||||
val cmdFailCodec: Codec[CMD_FAIL_HTLC] =
|
||||
(("id" | int64) ::
|
||||
("reason" | either(bool, varsizebinarydata, failureMessageCodec)) ::
|
||||
("commit" | provide(false))).as[CMD_FAIL_HTLC]
|
||||
|
||||
val cmdFailMalformedCodec: Codec[CMD_FAIL_MALFORMED_HTLC] =
|
||||
(("id" | int64) ::
|
||||
("onionHash" | binarydata(32)) ::
|
||||
("failureCode" | uint16) ::
|
||||
("commit" | provide(false))).as[CMD_FAIL_MALFORMED_HTLC]
|
||||
|
||||
val cmdCodec: Codec[Command] = discriminated[Command].by(uint16)
|
||||
.typecase(0, cmdFulfillCodec)
|
||||
.typecase(1, cmdFailCodec)
|
||||
.typecase(2, cmdFailMalformedCodec)
|
||||
|
||||
}
|
|
@ -4,6 +4,7 @@ import akka.actor.ActorSystem
|
|||
import fr.acinq.bitcoin.{Block, Transaction}
|
||||
import fr.acinq.eclair.blockchain._
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BitcoinJsonRPCClient, ExtendedBitcoinClient}
|
||||
import org.json4s.JsonAST
|
||||
|
||||
import scala.concurrent.duration._
|
||||
import scala.concurrent.{ExecutionContext, Future}
|
||||
|
@ -11,7 +12,9 @@ import scala.concurrent.{ExecutionContext, Future}
|
|||
/**
|
||||
* Created by PM on 26/04/2016.
|
||||
*/
|
||||
class TestBitcoinClient()(implicit system: ActorSystem) extends ExtendedBitcoinClient(new BitcoinJsonRPCClient("", "", "", 0)) {
|
||||
class TestBitcoinClient()(implicit system: ActorSystem) extends ExtendedBitcoinClient(new BitcoinJsonRPCClient {
|
||||
override def invoke(method: String, params: Any*)(implicit ec: ExecutionContext): Future[JsonAST.JValue] = ???
|
||||
}) {
|
||||
|
||||
import scala.concurrent.ExecutionContext.Implicits.global
|
||||
|
||||
|
|
|
@ -46,12 +46,12 @@ object TestConstants {
|
|||
smartfeeNBlocks = 3,
|
||||
feeBaseMsat = 546000,
|
||||
feeProportionalMillionth = 10,
|
||||
reserveToFundingRatio = 0.01, // note: not used (overriden below)
|
||||
reserveToFundingRatio = 0.01, // note: not used (overridden below)
|
||||
maxReserveToFundingRatio = 0.05,
|
||||
channelsDb = new SqliteChannelsDb(sqlite),
|
||||
peersDb = new SqlitePeersDb(sqlite),
|
||||
networkDb = new SqliteNetworkDb(sqlite),
|
||||
preimagesDb = new SqlitePreimagesDb(sqlite),
|
||||
pendingRelayDb = new SqlitePendingRelayDb(sqlite),
|
||||
paymentsDb = new SqlitePaymentsDb(sqlite),
|
||||
routerBroadcastInterval = 60 seconds,
|
||||
routerValidateInterval = 2 seconds,
|
||||
|
@ -62,7 +62,9 @@ object TestConstants {
|
|||
chainHash = Block.RegtestGenesisBlock.hash,
|
||||
channelFlags = 1,
|
||||
channelExcludeDuration = 5 seconds,
|
||||
watcherType = BITCOIND)
|
||||
watcherType = BITCOIND,
|
||||
paymentRequestExpiry = 1 hour,
|
||||
maxPendingPaymentRequests = 10000000)
|
||||
|
||||
def id = nodeParams.privateKey.publicKey
|
||||
|
||||
|
@ -100,12 +102,12 @@ object TestConstants {
|
|||
smartfeeNBlocks = 3,
|
||||
feeBaseMsat = 546000,
|
||||
feeProportionalMillionth = 10,
|
||||
reserveToFundingRatio = 0.01, // note: not used (overriden below)
|
||||
reserveToFundingRatio = 0.01, // note: not used (overridden below)
|
||||
maxReserveToFundingRatio = 0.05,
|
||||
channelsDb = new SqliteChannelsDb(sqlite),
|
||||
peersDb = new SqlitePeersDb(sqlite),
|
||||
networkDb = new SqliteNetworkDb(sqlite),
|
||||
preimagesDb = new SqlitePreimagesDb(sqlite),
|
||||
pendingRelayDb = new SqlitePendingRelayDb(sqlite),
|
||||
paymentsDb = new SqlitePaymentsDb(sqlite),
|
||||
routerBroadcastInterval = 60 seconds,
|
||||
routerValidateInterval = 2 seconds,
|
||||
|
@ -116,7 +118,9 @@ object TestConstants {
|
|||
chainHash = Block.RegtestGenesisBlock.hash,
|
||||
channelFlags = 1,
|
||||
channelExcludeDuration = 5 seconds,
|
||||
watcherType = BITCOIND)
|
||||
watcherType = BITCOIND,
|
||||
paymentRequestExpiry = 1 hour,
|
||||
maxPendingPaymentRequests = 10000000)
|
||||
|
||||
def id = nodeParams.privateKey.publicKey
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import com.typesafe.config.ConfigFactory
|
|||
import fr.acinq.bitcoin._
|
||||
import fr.acinq.bitcoin.Crypto.PrivateKey
|
||||
import fr.acinq.eclair.Kit
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BitcoinJsonRPCClient, ExtendedBitcoinClient}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BasicBitcoinJsonRPCClient, BitcoinJsonRPCClient, ExtendedBitcoinClient}
|
||||
import fr.acinq.eclair.integration.IntegrationSpec
|
||||
import grizzled.slf4j.Logging
|
||||
import org.json4s.DefaultFormats
|
||||
|
@ -48,7 +48,7 @@ class ExtendedBitcoinClientSpec extends TestKit(ActorSystem("test")) with FunSui
|
|||
Files.copy(classOf[IntegrationSpec].getResourceAsStream("/integration/bitcoin.conf"), new File(PATH_BITCOIND_DATADIR.toString, "bitcoin.conf").toPath)
|
||||
|
||||
bitcoind = s"$PATH_BITCOIND -datadir=$PATH_BITCOIND_DATADIR".run()
|
||||
bitcoinrpcclient = new BitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoinrpcclient = new BasicBitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoincli = system.actorOf(Props(new Actor {
|
||||
override def receive: Receive = {
|
||||
case BitcoinReq(method) => bitcoinrpcclient.invoke(method) pipeTo sender
|
||||
|
@ -76,7 +76,7 @@ class ExtendedBitcoinClientSpec extends TestKit(ActorSystem("test")) with FunSui
|
|||
}, max = 30 seconds, interval = 500 millis)
|
||||
logger.info(s"generating initial blocks...")
|
||||
sender.send(bitcoincli, BitcoinReq("generate", 500))
|
||||
sender.expectMsgType[JValue](10 seconds)
|
||||
sender.expectMsgType[JValue](20 seconds)
|
||||
|
||||
val future = for {
|
||||
count <- client.getBlockCount
|
||||
|
@ -88,17 +88,17 @@ class ExtendedBitcoinClientSpec extends TestKit(ActorSystem("test")) with FunSui
|
|||
priv = PrivateKey("01" * 32)
|
||||
wif = Base58Check.encode(Base58.Prefix.SecretKeyTestnet, priv.toBin)
|
||||
_ = sender.send(bitcoincli, BitcoinReq("importprivkey", wif))
|
||||
_ = sender.expectMsgType[JValue](10 seconds)
|
||||
_ = sender.expectMsgType[JValue](20 seconds)
|
||||
// send money to our private key
|
||||
address = Base58Check.encode(Base58.Prefix.PubkeyAddressTestnet, priv.publicKey.hash160)
|
||||
_ = client.sendFromAccount("", address, 1.0)
|
||||
_ = sender.send(bitcoincli, BitcoinReq("generate", 1))
|
||||
_ = sender.expectMsgType[JValue](10 seconds)
|
||||
_ = sender.expectMsgType[JValue](20 seconds)
|
||||
// and check that we find a utxo four our private key
|
||||
unspentAddresses1 <- client.listUnspentAddresses
|
||||
_ = assert(unspentAddresses1 contains address)
|
||||
} yield ()
|
||||
|
||||
Await.result(future, 10 seconds)
|
||||
Await.result(future, 20 seconds)
|
||||
}
|
||||
}
|
|
@ -1,137 +0,0 @@
|
|||
package fr.acinq.eclair.blockchain.bitcoind
|
||||
|
||||
import java.io.File
|
||||
import java.nio.file.Files
|
||||
import java.util.UUID
|
||||
|
||||
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
|
||||
import akka.pattern.pipe
|
||||
import akka.testkit.{TestKit, TestProbe}
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import fr.acinq.bitcoin.{MilliBtc, Satoshi, Script}
|
||||
import fr.acinq.eclair.blockchain._
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.BitcoinJsonRPCClient
|
||||
import fr.acinq.eclair.randomKey
|
||||
import fr.acinq.eclair.transactions.Scripts
|
||||
import grizzled.slf4j.Logging
|
||||
import org.bitcoinj.script.{Script => BitcoinjScript}
|
||||
import org.json4s.JsonAST.JValue
|
||||
import org.json4s.{DefaultFormats, JString}
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
import org.scalatest.{BeforeAndAfterAll, FunSuiteLike}
|
||||
|
||||
import scala.concurrent.ExecutionContext.Implicits.global
|
||||
import scala.concurrent.duration._
|
||||
import scala.sys.process.{Process, _}
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class BitcoinCoreWalletSpec extends TestKit(ActorSystem("test")) with FunSuiteLike with BeforeAndAfterAll with Logging {
|
||||
|
||||
val INTEGRATION_TMP_DIR = s"${System.getProperty("buildDirectory")}/bitcoinj-${UUID.randomUUID().toString}"
|
||||
logger.info(s"using tmp dir: $INTEGRATION_TMP_DIR")
|
||||
|
||||
val PATH_BITCOIND = new File(System.getProperty("buildDirectory"), "bitcoin-0.14.0/bin/bitcoind")
|
||||
val PATH_BITCOIND_DATADIR = new File(INTEGRATION_TMP_DIR, "datadir-bitcoin")
|
||||
|
||||
var bitcoind: Process = null
|
||||
var bitcoinrpcclient: BitcoinJsonRPCClient = null
|
||||
var bitcoincli: ActorRef = null
|
||||
|
||||
implicit val formats = DefaultFormats
|
||||
|
||||
case class BitcoinReq(method: String, params: Any*)
|
||||
|
||||
override def beforeAll(): Unit = {
|
||||
Files.createDirectories(PATH_BITCOIND_DATADIR.toPath)
|
||||
Files.copy(classOf[BitcoinCoreWalletSpec].getResourceAsStream("/integration/bitcoin.conf"), new File(PATH_BITCOIND_DATADIR.toString, "bitcoin.conf").toPath)
|
||||
|
||||
bitcoind = s"$PATH_BITCOIND -datadir=$PATH_BITCOIND_DATADIR".run()
|
||||
bitcoinrpcclient = new BitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoincli = system.actorOf(Props(new Actor {
|
||||
override def receive: Receive = {
|
||||
case BitcoinReq(method) => bitcoinrpcclient.invoke(method) pipeTo sender
|
||||
case BitcoinReq(method, params) => bitcoinrpcclient.invoke(method, params) pipeTo sender
|
||||
case BitcoinReq(method, param1, param2) => bitcoinrpcclient.invoke(method, param1, param2) pipeTo sender
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
override def afterAll(): Unit = {
|
||||
// gracefully stopping bitcoin will make it store its state cleanly to disk, which is good for later debugging
|
||||
logger.info(s"stopping bitcoind")
|
||||
val sender = TestProbe()
|
||||
sender.send(bitcoincli, BitcoinReq("stop"))
|
||||
sender.expectMsgType[JValue]
|
||||
bitcoind.exitValue()
|
||||
// logger.warn(s"starting bitcoin-qt")
|
||||
// val PATH_BITCOINQT = new File(System.getProperty("buildDirectory"), "bitcoin-0.14.0/bin/bitcoin-qt").toPath
|
||||
// bitcoind = s"$PATH_BITCOINQT -datadir=$PATH_BITCOIND_DATADIR".run()
|
||||
}
|
||||
|
||||
test("wait bitcoind ready") {
|
||||
val sender = TestProbe()
|
||||
logger.info(s"waiting for bitcoind to initialize...")
|
||||
awaitCond({
|
||||
sender.send(bitcoincli, BitcoinReq("getnetworkinfo"))
|
||||
sender.receiveOne(5 second).isInstanceOf[JValue]
|
||||
}, max = 30 seconds, interval = 500 millis)
|
||||
logger.info(s"generating initial blocks...")
|
||||
sender.send(bitcoincli, BitcoinReq("generate", 500))
|
||||
sender.expectMsgType[JValue](30 seconds)
|
||||
}
|
||||
|
||||
test("create/commit/rollback funding txes") {
|
||||
import collection.JavaConversions._
|
||||
val commonConfig = ConfigFactory.parseMap(Map("eclair.chain" -> "regtest", "eclair.spv" -> false, "eclair.server.public-ips.1" -> "localhost", "eclair.bitcoind.port" -> 28333, "eclair.bitcoind.rpcport" -> 28332, "eclair.bitcoind.zmq" -> "tcp://127.0.0.1:28334", "eclair.router-broadcast-interval" -> "2 second", "eclair.auto-reconnect" -> false))
|
||||
val config = ConfigFactory.load(commonConfig).getConfig("eclair")
|
||||
val bitcoinClient = new BitcoinJsonRPCClient(
|
||||
user = config.getString("bitcoind.rpcuser"),
|
||||
password = config.getString("bitcoind.rpcpassword"),
|
||||
host = config.getString("bitcoind.host"),
|
||||
port = config.getInt("bitcoind.rpcport"))
|
||||
val wallet = new BitcoinCoreWallet(bitcoinClient)
|
||||
|
||||
val sender = TestProbe()
|
||||
|
||||
wallet.getBalance.pipeTo(sender.ref)
|
||||
assert(sender.expectMsgType[Satoshi] > Satoshi(0))
|
||||
|
||||
wallet.getFinalAddress.pipeTo(sender.ref)
|
||||
assert(sender.expectMsgType[String].startsWith("2"))
|
||||
|
||||
val fundingTxes = for (i <- 0 to 3) yield {
|
||||
val pubkeyScript = Script.write(Script.pay2wsh(Scripts.multiSig2of2(randomKey.publicKey, randomKey.publicKey)))
|
||||
wallet.makeFundingTx(pubkeyScript, MilliBtc(50), 10000).pipeTo(sender.ref)
|
||||
val MakeFundingTxResponse(fundingTx, _) = sender.expectMsgType[MakeFundingTxResponse]
|
||||
fundingTx
|
||||
}
|
||||
|
||||
sender.send(bitcoincli, BitcoinReq("listlockunspent"))
|
||||
assert(sender.expectMsgType[JValue](10 seconds).children.size === 4)
|
||||
|
||||
wallet.commit(fundingTxes(0)).pipeTo(sender.ref)
|
||||
assert(sender.expectMsgType[Boolean])
|
||||
|
||||
wallet.rollback(fundingTxes(1)).pipeTo(sender.ref)
|
||||
assert(sender.expectMsgType[Boolean])
|
||||
|
||||
wallet.commit(fundingTxes(2)).pipeTo(sender.ref)
|
||||
assert(sender.expectMsgType[Boolean])
|
||||
|
||||
wallet.rollback(fundingTxes(3)).pipeTo(sender.ref)
|
||||
assert(sender.expectMsgType[Boolean])
|
||||
|
||||
sender.send(bitcoincli, BitcoinReq("getrawtransaction", fundingTxes(0).txid.toString()))
|
||||
assert(sender.expectMsgType[JString](10 seconds).s === fundingTxes(0).toString())
|
||||
|
||||
sender.send(bitcoincli, BitcoinReq("getrawtransaction", fundingTxes(2).txid.toString()))
|
||||
assert(sender.expectMsgType[JString](10 seconds).s === fundingTxes(2).toString())
|
||||
|
||||
// NB: bitcoin core doesn't clear the locks when a tx is published
|
||||
sender.send(bitcoincli, BitcoinReq("listlockunspent"))
|
||||
assert(sender.expectMsgType[JValue](10 seconds).children.size === 2)
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -9,7 +9,7 @@ import akka.actor.{Actor, ActorRef, ActorSystem, Props}
|
|||
import akka.pattern.pipe
|
||||
import akka.testkit.{TestKit, TestProbe}
|
||||
import fr.acinq.bitcoin.{Satoshi, Script}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.BitcoinJsonRPCClient
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BasicBitcoinJsonRPCClient, BitcoinJsonRPCClient}
|
||||
import fr.acinq.eclair.blockchain.{PublishAsap, WatchConfirmed, WatchEventConfirmed, WatchSpent}
|
||||
import fr.acinq.eclair.channel.{BITCOIN_FUNDING_DEPTHOK, BITCOIN_FUNDING_SPENT}
|
||||
import fr.acinq.eclair.randomKey
|
||||
|
@ -52,7 +52,7 @@ class BitcoinjSpec extends TestKit(ActorSystem("test")) with FunSuiteLike with B
|
|||
Files.copy(classOf[BitcoinjSpec].getResourceAsStream("/integration/bitcoin.conf"), new File(PATH_BITCOIND_DATADIR.toString, "bitcoin.conf").toPath)
|
||||
|
||||
bitcoind = s"$PATH_BITCOIND -datadir=$PATH_BITCOIND_DATADIR".run()
|
||||
bitcoinrpcclient = new BitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoinrpcclient = new BasicBitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoincli = system.actorOf(Props(new Actor {
|
||||
override def receive: Receive = {
|
||||
case BitcoinReq(method) => bitcoinrpcclient.invoke(method) pipeTo sender
|
||||
|
|
|
@ -8,7 +8,7 @@ import java.util.UUID
|
|||
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
|
||||
import akka.pattern.pipe
|
||||
import akka.testkit.{TestKit, TestProbe}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.BitcoinJsonRPCClient
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BasicBitcoinJsonRPCClient, BitcoinJsonRPCClient}
|
||||
import grizzled.slf4j.Logging
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.JsonAST.{JInt, JValue}
|
||||
|
@ -49,7 +49,7 @@ class IntegrationSpec extends TestKit(ActorSystem("test")) with FunSuiteLike wit
|
|||
Files.createDirectories(PATH_BITCOIND_DATADIR.toPath)
|
||||
Files.copy(classOf[IntegrationSpec].getResourceAsStream("/integration/bitcoin.conf"), new File(PATH_BITCOIND_DATADIR.toString, "bitcoin.conf").toPath)
|
||||
|
||||
bitcoinrpcclient = new BitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoinrpcclient = new BasicBitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoincli = system.actorOf(Props(new Actor {
|
||||
override def receive: Receive = {
|
||||
case BitcoinReq(method, Nil) =>
|
||||
|
|
|
@ -6,8 +6,9 @@ import fr.acinq.bitcoin.{BinaryData, Block, MnemonicCode, Satoshi}
|
|||
import fr.acinq.eclair.blockchain.electrum.ElectrumClient.{ScriptHashSubscription, ScriptHashSubscriptionResponse}
|
||||
import fr.acinq.eclair.blockchain.electrum.ElectrumWallet.{NewWalletReceiveAddress, WalletEvent, WalletParameters, WalletReady}
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.{BeforeAndAfterAll, FunSuite, FunSuiteLike}
|
||||
import org.scalatest.FunSuiteLike
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
|
@ -26,7 +27,7 @@ class ElectrumWalletSimulatedClientSpec extends TestKit(ActorSystem("test")) wit
|
|||
|
||||
val listener = TestProbe()
|
||||
system.eventStream.subscribe(listener.ref, classOf[WalletEvent])
|
||||
val wallet = TestFSMRef(new ElectrumWallet(mnemonics, system.actorOf(Props(new SimulatedClient())), WalletParameters(Block.RegtestGenesisBlock.hash, minimumFee = Satoshi(5000))))
|
||||
val wallet = TestFSMRef(new ElectrumWallet(seed, system.actorOf(Props(new SimulatedClient())), WalletParameters(Block.RegtestGenesisBlock.hash, minimumFee = Satoshi(5000))))
|
||||
|
||||
// wallet sends a receive address notification as soon as it is created
|
||||
listener.expectMsgType[NewWalletReceiveAddress]
|
||||
|
|
|
@ -21,7 +21,7 @@ class ElectrumWalletSpec extends IntegrationSpec {
|
|||
var wallet: ActorRef = _
|
||||
|
||||
test("wait until wallet is ready") {
|
||||
wallet = system.actorOf(Props(new ElectrumWallet(mnemonics, electrumClient, WalletParameters(Block.RegtestGenesisBlock.hash, minimumFee = Satoshi(5000)))), "wallet")
|
||||
wallet = system.actorOf(Props(new ElectrumWallet(seed, electrumClient, WalletParameters(Block.RegtestGenesisBlock.hash, minimumFee = Satoshi(5000)))), "wallet")
|
||||
val probe = TestProbe()
|
||||
awaitCond({
|
||||
probe.send(wallet, GetData)
|
||||
|
|
|
@ -1,98 +0,0 @@
|
|||
package fr.acinq.eclair.channel
|
||||
|
||||
import java.util.concurrent.CountDownLatch
|
||||
import java.util.concurrent.atomic.AtomicLong
|
||||
|
||||
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
|
||||
import akka.testkit.TestProbe
|
||||
import fr.acinq.bitcoin.{BinaryData, Crypto}
|
||||
import fr.acinq.eclair.TestConstants.{Alice, Bob}
|
||||
import fr.acinq.eclair._
|
||||
import fr.acinq.eclair.blockchain._
|
||||
import fr.acinq.eclair.blockchain.bitcoind.ZmqWatcher
|
||||
import fr.acinq.eclair.payment.Relayer
|
||||
import fr.acinq.eclair.wire.{Init, UpdateAddHtlc}
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.FunSuite
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
|
||||
import scala.concurrent.duration._
|
||||
import scala.util.Random
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class ThroughputSpec extends FunSuite {
|
||||
ignore("throughput") {
|
||||
implicit val system = ActorSystem()
|
||||
val pipe = system.actorOf(Props[Pipe], "pipe")
|
||||
val blockchain = system.actorOf(ZmqWatcher.props(new TestBitcoinClient()), "blockchain")
|
||||
val paymentHandler = system.actorOf(Props(new Actor() {
|
||||
val random = new Random()
|
||||
|
||||
def generateR(): BinaryData = {
|
||||
val r = Array.fill[Byte](32)(0)
|
||||
random.nextBytes(r)
|
||||
r
|
||||
}
|
||||
|
||||
context.become(run(Map()))
|
||||
|
||||
override def receive: Receive = ???
|
||||
|
||||
// TODO: store this map on file ?
|
||||
def run(h2r: Map[BinaryData, BinaryData]): Receive = {
|
||||
case ('add, tgt: ActorRef) =>
|
||||
val r = generateR()
|
||||
val h: BinaryData = Crypto.sha256(r)
|
||||
tgt ! CMD_ADD_HTLC(1, h, 1)
|
||||
context.become(run(h2r + (h -> r)))
|
||||
|
||||
case ('sig, tgt: ActorRef) => tgt ! CMD_SIGN
|
||||
|
||||
case htlc: UpdateAddHtlc if h2r.contains(htlc.paymentHash) =>
|
||||
val r = h2r(htlc.paymentHash)
|
||||
sender ! CMD_FULFILL_HTLC(htlc.id, r)
|
||||
context.become(run(h2r - htlc.paymentHash))
|
||||
}
|
||||
}), "payment-handler")
|
||||
val registerA = TestProbe()
|
||||
val registerB = TestProbe()
|
||||
val relayerA = system.actorOf(Relayer.props(Alice.nodeParams, registerA.ref, paymentHandler))
|
||||
val relayerB = system.actorOf(Relayer.props(Bob.nodeParams, registerB.ref, paymentHandler))
|
||||
val wallet = new TestWallet
|
||||
val alice = system.actorOf(Channel.props(Alice.nodeParams, wallet, Bob.id, blockchain, ???, relayerA), "a")
|
||||
val bob = system.actorOf(Channel.props(Bob.nodeParams, wallet, Alice.id, blockchain, ???, relayerB), "b")
|
||||
val aliceInit = Init(Alice.channelParams.globalFeatures, Alice.channelParams.localFeatures)
|
||||
val bobInit = Init(Bob.channelParams.globalFeatures, Bob.channelParams.localFeatures)
|
||||
alice ! INPUT_INIT_FUNDER("00" * 32, TestConstants.fundingSatoshis, TestConstants.pushMsat, TestConstants.feeratePerKw, Alice.channelParams, pipe, bobInit, ChannelFlags.Empty)
|
||||
bob ! INPUT_INIT_FUNDEE("00" * 32, Bob.channelParams, pipe, aliceInit)
|
||||
|
||||
val latch = new CountDownLatch(2)
|
||||
val listener = system.actorOf(Props(new Actor {
|
||||
override def receive: Receive = {
|
||||
case ChannelStateChanged(_, _, _, _, NORMAL, _) => latch.countDown()
|
||||
}
|
||||
}), "listener")
|
||||
system.eventStream.subscribe(listener, classOf[ChannelEvent])
|
||||
|
||||
pipe ! (alice, bob)
|
||||
latch.await()
|
||||
|
||||
var i = new AtomicLong(0)
|
||||
val random = new Random()
|
||||
|
||||
def msg = random.nextInt(100) % 5 match {
|
||||
case 0 | 1 | 2 | 3 => 'add
|
||||
case 4 => 'sig
|
||||
}
|
||||
|
||||
import scala.concurrent.ExecutionContext.Implicits.global
|
||||
system.scheduler.schedule(0 seconds, 50 milliseconds, new Runnable() {
|
||||
override def run(): Unit = paymentHandler ! (msg, alice)
|
||||
})
|
||||
system.scheduler.schedule(5 seconds, 70 milliseconds, new Runnable() {
|
||||
override def run(): Unit = paymentHandler ! (msg, bob)
|
||||
})
|
||||
|
||||
Thread.sleep(Long.MaxValue)
|
||||
}
|
||||
}
|
|
@ -99,7 +99,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(500000000, "11" * 42, expiry = 400144)
|
||||
sender.send(alice, add)
|
||||
val error = InvalidPaymentHash(channelId(alice))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = 300000)
|
||||
sender.send(alice, add)
|
||||
val error = ExpiryCannotBeInThePast(channelId(alice), 300000, 400000)
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(50, "11" * 32, 400144)
|
||||
sender.send(alice, add)
|
||||
val error = HtlcValueTooSmall(channelId(alice), 1000, 50)
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(Int.MaxValue, "11" * 32, 400144)
|
||||
sender.send(alice, add)
|
||||
val error = InsufficientFunds(channelId(alice), amountMsat = Int.MaxValue, missingSatoshis = 1376443, reserveSatoshis = 20000, feesSatoshis = 8960)
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(1000000, "44" * 32, 400144)
|
||||
sender.send(alice, add)
|
||||
val error = InsufficientFunds(channelId(alice), amountMsat = 1000000, missingSatoshis = 1000, reserveSatoshis = 20000, feesSatoshis = 12400)
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(500000000, "33" * 32, 400144)
|
||||
sender.send(alice, add)
|
||||
val error = InsufficientFunds(channelId(alice), amountMsat = 500000000, missingSatoshis = 332400, reserveSatoshis = 20000, feesSatoshis = 12400)
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(151000000, "11" * 32, 400144)
|
||||
sender.send(bob, add)
|
||||
val error = HtlcValueTooHighInFlight(channelId(bob), maximum = 150000000, actual = 151000000)
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(bob), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(bob), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
bob2alice.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(10000000, "33" * 32, 400144)
|
||||
sender.send(alice, add)
|
||||
val error = TooManyAcceptedHtlcs(channelId(alice), maximum = 30)
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add2 = CMD_ADD_HTLC(TestConstants.fundingSatoshis * 2 / 3 * 1000, "22" * 32, 400144)
|
||||
sender.send(alice, add2)
|
||||
val error = InsufficientFunds(channelId(alice), add2.amountMsat, 564012, 20000, 10680)
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add2.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = 400144)
|
||||
sender.send(alice, add)
|
||||
val error = NoMoreHtlcsClosingInProgress(channelId(alice))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
bob2alice.forward(alice)
|
||||
sender.send(alice, add2)
|
||||
val error = NoMoreHtlcsClosingInProgress(channelId(alice))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add2.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate))))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1630,7 +1630,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
within(30 seconds) {
|
||||
val sender = TestProbe()
|
||||
|
||||
// initally we have :
|
||||
// initially we have :
|
||||
// alice = 800 000
|
||||
// bob = 200 000
|
||||
def send(): Transaction = {
|
||||
|
|
|
@ -85,7 +85,7 @@ class ShutdownStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = 300000)
|
||||
sender.send(alice, add)
|
||||
val error = ChannelUnavailable(channelId(alice))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), None)))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), None)))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ class NegotiatingStateSpec extends TestkitBaseClass with StateTestsHelperMethods
|
|||
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = 300000)
|
||||
sender.send(alice, add)
|
||||
val error = ChannelUnavailable(channelId(alice))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), None)))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), None)))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import fr.acinq.eclair.TestkitBaseClass
|
|||
import fr.acinq.eclair.blockchain._
|
||||
import fr.acinq.eclair.channel.states.StateTestsHelperMethods
|
||||
import fr.acinq.eclair.channel.{Data, State, _}
|
||||
import fr.acinq.eclair.payment.{AckFulfillCmd, ForwardAdd, ForwardFulfill, Local}
|
||||
import fr.acinq.eclair.payment.{CommandBuffer, ForwardAdd, ForwardFulfill, Local}
|
||||
import fr.acinq.eclair.transactions.Scripts
|
||||
import fr.acinq.eclair.wire._
|
||||
import org.junit.runner.RunWith
|
||||
|
@ -36,7 +36,7 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
fulfillHtlc(htlc.id, r, bob, alice, bob2alice, alice2bob)
|
||||
relayer.expectMsgType[ForwardFulfill]
|
||||
crossSign(bob, alice, bob2alice, alice2bob)
|
||||
relayer.expectMsgType[AckFulfillCmd]
|
||||
relayer.expectMsgType[CommandBuffer.CommandAck]
|
||||
val bobCommitTx2 = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.localCommit.publishableTxs.commitTx.tx
|
||||
bobCommitTx1 :: bobCommitTx2 :: Nil
|
||||
}).flatten
|
||||
|
@ -96,7 +96,7 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
|||
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = 300000)
|
||||
sender.send(alice, add)
|
||||
val error = ChannelUnavailable(channelId(alice))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), error, Local(Some(sender.ref)), None)))
|
||||
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), None)))
|
||||
alice2bob.expectNoMsg(200 millis)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ object NoiseDemo extends App {
|
|||
|
||||
def receive = ???
|
||||
|
||||
val handskaheState = if (isWriter) {
|
||||
val handshakeState = if (isWriter) {
|
||||
val state = Noise.HandshakeState.initializeWriter(
|
||||
Noise.handshakePatternXK,
|
||||
"lightning".getBytes(),
|
||||
|
@ -35,7 +35,7 @@ object NoiseDemo extends App {
|
|||
state
|
||||
}
|
||||
|
||||
context become handshake(handskaheState)
|
||||
context become handshake(handshakeState)
|
||||
|
||||
def toNormal(enc: CipherState, dec: CipherState) = {
|
||||
unstashAll()
|
||||
|
|
|
@ -39,8 +39,8 @@ class SphinxSpec extends FunSuite {
|
|||
hop_blinding_factor[4] = 0xc96e00dddaf57e7edcd4fb5954be5b65b09f17cb6d20651b4e90315be5779205
|
||||
hop_ephemeral_pubkey[4] = 0x03a214ebd875aab6ddfd77f22c5e7311d7f77f17a169e599f157bbcdae8bf071f4
|
||||
*/
|
||||
test("generate ephemereal keys and secrets") {
|
||||
val (ephkeys, sharedsecrets) = computeEphemerealPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
test("generate ephemeral keys and secrets") {
|
||||
val (ephkeys, sharedsecrets) = computeEphemeralPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
assert(ephkeys(0) == PublicKey(BinaryData("0x02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619")))
|
||||
assert(sharedsecrets(0) == BinaryData("0x53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66"))
|
||||
assert(ephkeys(1) == PublicKey(BinaryData("0x028f9438bfbf7feac2e108d677e3a82da596be706cc1cf342b75c7b7e22bf4e6e2")))
|
||||
|
@ -57,7 +57,7 @@ class SphinxSpec extends FunSuite {
|
|||
filler = 0xc6b008cf6414ed6e4c42c291eb505e9f22f5fe7d0ecdd15a833f4d016ac974d33adc6ea3293e20859e87ebfb937ba406abd025d14af692b12e9c9c2adbe307a679779259676211c071e614fdb386d1ff02db223a5b2fae03df68d321c7b29f7c7240edd3fa1b7cb6903f89dc01abf41b2eb0b49b6b8d73bb0774b58204c0d0e96d3cce45ad75406be0bc009e327b3e712a4bd178609c00b41da2daf8a4b0e1319f07a492ab4efb056f0f599f75e6dc7e0d10ce1cf59088ab6e873de377343880f7a24f0e36731a0b72092f8d5bc8cd346762e93b2bf203d00264e4bc136fc142de8f7b69154deb05854ea88e2d7506222c95ba1aab065c8a851391377d3406a35a9af3ac
|
||||
*/
|
||||
test("generate filler") {
|
||||
val (_, sharedsecrets) = computeEphemerealPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
val (_, sharedsecrets) = computeEphemeralPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
val filler = generateFiller("rho", sharedsecrets.dropRight(1), PayloadLength + MacLength, 20)
|
||||
assert(filler == BinaryData("0xc6b008cf6414ed6e4c42c291eb505e9f22f5fe7d0ecdd15a833f4d016ac974d33adc6ea3293e20859e87ebfb937ba406abd025d14af692b12e9c9c2adbe307a679779259676211c071e614fdb386d1ff02db223a5b2fae03df68d321c7b29f7c7240edd3fa1b7cb6903f89dc01abf41b2eb0b49b6b8d73bb0774b58204c0d0e96d3cce45ad75406be0bc009e327b3e712a4bd178609c00b41da2daf8a4b0e1319f07a492ab4efb056f0f599f75e6dc7e0d10ce1cf59088ab6e873de377343880f7a24f0e36731a0b72092f8d5bc8cd346762e93b2bf203d00264e4bc136fc142de8f7b69154deb05854ea88e2d7506222c95ba1aab065c8a851391377d3406a35a9af3ac"))
|
||||
}
|
||||
|
|
|
@ -45,8 +45,8 @@ class TransportHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLik
|
|||
initiator ! Listener(probe1.ref)
|
||||
responder ! Listener(probe2.ref)
|
||||
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCiphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCiphertext)
|
||||
|
||||
initiator.tell(BinaryData("hello".getBytes), probe1.ref)
|
||||
probe2.expectMsg(BinaryData("hello".getBytes))
|
||||
|
@ -77,8 +77,8 @@ class TransportHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLik
|
|||
initiator ! Listener(probe1.ref)
|
||||
responder ! Listener(probe2.ref)
|
||||
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCiphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCiphertext)
|
||||
|
||||
initiator.tell(MyMessage("hello"), probe1.ref)
|
||||
probe2.expectMsg(MyMessage("hello"))
|
||||
|
@ -107,8 +107,8 @@ class TransportHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLik
|
|||
initiator ! Listener(probe1.ref)
|
||||
responder ! Listener(probe2.ref)
|
||||
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCiphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCiphertext)
|
||||
|
||||
initiator.tell(BinaryData("hello".getBytes), probe1.ref)
|
||||
probe2.expectMsg(BinaryData("hello".getBytes))
|
||||
|
|
|
@ -2,7 +2,7 @@ package fr.acinq.eclair.db
|
|||
|
||||
import java.sql.DriverManager
|
||||
|
||||
import fr.acinq.eclair.db.sqlite.{SqliteChannelsDb, SqlitePreimagesDb}
|
||||
import fr.acinq.eclair.db.sqlite.{SqliteChannelsDb, SqlitePendingRelayDb}
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.FunSuite
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
|
@ -21,7 +21,7 @@ class SqliteChannelsDbSpec extends FunSuite {
|
|||
test("add/remove/list channels") {
|
||||
val sqlite = inmem
|
||||
val db = new SqliteChannelsDb(sqlite)
|
||||
new SqlitePreimagesDb(sqlite) // needed by db.removeChannel
|
||||
new SqlitePendingRelayDb(sqlite) // needed by db.removeChannel
|
||||
|
||||
val channel = ChannelStateSpec.normal
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ package fr.acinq.eclair.db
|
|||
import java.net.{InetAddress, InetSocketAddress}
|
||||
import java.sql.DriverManager
|
||||
|
||||
import fr.acinq.bitcoin.{Block, Crypto}
|
||||
import fr.acinq.bitcoin.{Block, Crypto, Satoshi}
|
||||
import fr.acinq.eclair.db.sqlite.SqliteNetworkDb
|
||||
import fr.acinq.eclair.randomKey
|
||||
import fr.acinq.eclair.router.Announcements
|
||||
|
@ -54,15 +54,20 @@ class SqliteNetworkDbSpec extends FunSuite {
|
|||
val channel_2 = Announcements.makeChannelAnnouncement(Block.RegtestGenesisBlock.hash, 43, randomKey.publicKey, randomKey.publicKey, randomKey.publicKey, randomKey.publicKey, sig, sig, sig, sig)
|
||||
val channel_3 = Announcements.makeChannelAnnouncement(Block.RegtestGenesisBlock.hash, 44, randomKey.publicKey, randomKey.publicKey, randomKey.publicKey, randomKey.publicKey, sig, sig, sig, sig)
|
||||
|
||||
val txid_1 = randomKey.toBin
|
||||
val txid_2 = randomKey.toBin
|
||||
val txid_3 = randomKey.toBin
|
||||
val capacity = Satoshi(10000)
|
||||
|
||||
assert(db.listChannels().toSet === Set.empty)
|
||||
db.addChannel(channel_1)
|
||||
db.addChannel(channel_1) // duplicate is ignored
|
||||
db.addChannel(channel_1, txid_1, capacity)
|
||||
db.addChannel(channel_1, txid_1, capacity) // duplicate is ignored
|
||||
assert(db.listChannels().size === 1)
|
||||
db.addChannel(channel_2)
|
||||
db.addChannel(channel_3)
|
||||
assert(db.listChannels().toSet === Set(channel_1, channel_2, channel_3))
|
||||
db.addChannel(channel_2, txid_2, capacity)
|
||||
db.addChannel(channel_3, txid_3, capacity)
|
||||
assert(db.listChannels().toSet === Set((channel_1, (txid_1, capacity)), (channel_2, (txid_2, capacity)), (channel_3, (txid_3, capacity))))
|
||||
db.removeChannel(channel_2.shortChannelId)
|
||||
assert(db.listChannels().toSet === Set(channel_1, channel_3))
|
||||
assert(db.listChannels().toSet === Set((channel_1, (txid_1, capacity)), (channel_3, (txid_3, capacity))))
|
||||
|
||||
val channel_update_1 = Announcements.makeChannelUpdate(Block.RegtestGenesisBlock.hash, randomKey, randomKey.publicKey, 42, 5, 7000000, 50000, 100, true)
|
||||
val channel_update_2 = Announcements.makeChannelUpdate(Block.RegtestGenesisBlock.hash, randomKey, randomKey.publicKey, 43, 5, 7000000, 50000, 100, true)
|
||||
|
@ -75,7 +80,7 @@ class SqliteNetworkDbSpec extends FunSuite {
|
|||
intercept[SQLiteException](db.addChannelUpdate(channel_update_2))
|
||||
db.addChannelUpdate(channel_update_3)
|
||||
db.removeChannel(channel_3.shortChannelId)
|
||||
assert(db.listChannels().toSet === Set(channel_1))
|
||||
assert(db.listChannels().toSet === Set((channel_1, (txid_1, capacity))))
|
||||
assert(db.listChannelUpdates().toSet === Set(channel_update_1))
|
||||
db.updateChannelUpdate(channel_update_1)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
package fr.acinq.eclair.db
|
||||
|
||||
import java.sql.DriverManager
|
||||
|
||||
import fr.acinq.eclair.channel.{CMD_FAIL_HTLC, CMD_FAIL_MALFORMED_HTLC, CMD_FULFILL_HTLC}
|
||||
import fr.acinq.eclair.db.sqlite.SqlitePendingRelayDb
|
||||
import fr.acinq.eclair.randomBytes
|
||||
import fr.acinq.eclair.wire.FailureMessageCodecs
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.FunSuite
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class SqlitePendingRelayDbSpec extends FunSuite {
|
||||
|
||||
def inmem = DriverManager.getConnection("jdbc:sqlite::memory:")
|
||||
|
||||
test("init sqlite 2 times in a row") {
|
||||
val sqlite = inmem
|
||||
val db1 = new SqlitePendingRelayDb(sqlite)
|
||||
val db2 = new SqlitePendingRelayDb(sqlite)
|
||||
}
|
||||
|
||||
test("add/remove/list messages") {
|
||||
val sqlite = inmem
|
||||
val db = new SqlitePendingRelayDb(sqlite)
|
||||
|
||||
val channelId1 = randomBytes(32)
|
||||
val channelId2 = randomBytes(32)
|
||||
val msg0 = CMD_FULFILL_HTLC(0, randomBytes(32))
|
||||
val msg1 = CMD_FULFILL_HTLC(1, randomBytes(32))
|
||||
val msg2 = CMD_FAIL_HTLC(2, Left(randomBytes(32)))
|
||||
val msg3 = CMD_FAIL_HTLC(3, Left(randomBytes(32)))
|
||||
val msg4 = CMD_FAIL_MALFORMED_HTLC(4, randomBytes(32), FailureMessageCodecs.BADONION)
|
||||
|
||||
assert(db.listPendingRelay(channelId1).toSet === Set.empty)
|
||||
db.addPendingRelay(channelId1, msg0.id, msg0)
|
||||
db.addPendingRelay(channelId1, msg0.id, msg0) // duplicate
|
||||
db.addPendingRelay(channelId1, msg1.id, msg1)
|
||||
db.addPendingRelay(channelId1, msg2.id, msg2)
|
||||
db.addPendingRelay(channelId1, msg3.id, msg3)
|
||||
db.addPendingRelay(channelId1, msg4.id, msg4)
|
||||
db.addPendingRelay(channelId2, msg0.id, msg0) // same messages but for different channel
|
||||
db.addPendingRelay(channelId2, msg1.id, msg1)
|
||||
assert(db.listPendingRelay(channelId1).toSet === Set(msg0, msg1, msg2, msg3, msg4))
|
||||
assert(db.listPendingRelay(channelId2).toSet === Set(msg0, msg1))
|
||||
db.removePendingRelay(channelId1, msg1.id)
|
||||
assert(db.listPendingRelay(channelId1).toSet === Set(msg0, msg2, msg3, msg4))
|
||||
}
|
||||
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
package fr.acinq.eclair.db
|
||||
|
||||
import java.sql.DriverManager
|
||||
|
||||
import fr.acinq.eclair.db.sqlite.SqlitePreimagesDb
|
||||
import fr.acinq.eclair.randomBytes
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.FunSuite
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class SqlitePreimagesDbSpec extends FunSuite {
|
||||
|
||||
def inmem = DriverManager.getConnection("jdbc:sqlite::memory:")
|
||||
|
||||
test("init sqlite 2 times in a row") {
|
||||
val sqlite = inmem
|
||||
val db1 = new SqlitePreimagesDb(sqlite)
|
||||
val db2 = new SqlitePreimagesDb(sqlite)
|
||||
}
|
||||
|
||||
test("add/remove/list preimages") {
|
||||
val sqlite = inmem
|
||||
val db = new SqlitePreimagesDb(sqlite)
|
||||
|
||||
val channelId = randomBytes(32)
|
||||
val preimage0 = randomBytes(32)
|
||||
val preimage1 = randomBytes(32)
|
||||
val preimage2 = randomBytes(32)
|
||||
val preimage3 = randomBytes(32)
|
||||
|
||||
assert(db.listPreimages(channelId).toSet === Set.empty)
|
||||
db.addPreimage(channelId, 0, preimage0)
|
||||
db.addPreimage(channelId, 0, preimage0) // duplicate
|
||||
db.addPreimage(channelId, 1, preimage1)
|
||||
db.addPreimage(channelId, 2, preimage2)
|
||||
assert(db.listPreimages(channelId).sortBy(_._2) === (channelId, 0, preimage0) :: (channelId, 1, preimage1) :: (channelId, 2, preimage2) :: Nil)
|
||||
db.removePreimage(channelId, 1)
|
||||
assert(db.listPreimages(channelId).sortBy(_._2) === (channelId, 0, preimage0) :: (channelId, 2, preimage2) :: Nil)
|
||||
}
|
||||
|
||||
}
|
|
@ -11,7 +11,7 @@ import com.google.common.net.HostAndPort
|
|||
import com.typesafe.config.{Config, ConfigFactory}
|
||||
import fr.acinq.bitcoin.Crypto.PublicKey
|
||||
import fr.acinq.bitcoin.{Base58, Base58Check, BinaryData, Block, Crypto, MilliSatoshi, OP_CHECKSIG, OP_DUP, OP_EQUAL, OP_EQUALVERIFY, OP_HASH160, OP_PUSHDATA, Satoshi, Script}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BitcoinJsonRPCClient, ExtendedBitcoinClient}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BasicBitcoinJsonRPCClient, BitcoinJsonRPCClient, ExtendedBitcoinClient}
|
||||
import fr.acinq.eclair.blockchain.bitcoinj.BitcoinjWallet
|
||||
import fr.acinq.eclair.blockchain.{Watch, WatchConfirmed}
|
||||
import fr.acinq.eclair.channel.Register.Forward
|
||||
|
@ -67,7 +67,7 @@ class BasicIntegrationSpvSpec extends TestKit(ActorSystem("test")) with FunSuite
|
|||
Files.copy(classOf[BasicIntegrationSpvSpec].getResourceAsStream("/integration/bitcoin.conf"), new File(PATH_BITCOIND_DATADIR.toString, "bitcoin.conf").toPath)
|
||||
|
||||
bitcoind = s"$PATH_BITCOIND -datadir=$PATH_BITCOIND_DATADIR".run()
|
||||
bitcoinrpcclient = new BitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoinrpcclient = new BasicBitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoincli = system.actorOf(Props(new Actor {
|
||||
override def receive: Receive = {
|
||||
case BitcoinReq(method) => bitcoinrpcclient.invoke(method) pipeTo sender
|
||||
|
|
|
@ -11,7 +11,7 @@ import com.google.common.net.HostAndPort
|
|||
import com.typesafe.config.{Config, ConfigFactory}
|
||||
import fr.acinq.bitcoin.Crypto.PublicKey
|
||||
import fr.acinq.bitcoin.{Base58, Base58Check, BinaryData, Block, Crypto, MilliSatoshi, OP_CHECKSIG, OP_DUP, OP_EQUAL, OP_EQUALVERIFY, OP_HASH160, OP_PUSHDATA, Satoshi, Script}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BitcoinJsonRPCClient, ExtendedBitcoinClient}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BasicBitcoinJsonRPCClient, BitcoinJsonRPCClient, ExtendedBitcoinClient}
|
||||
import fr.acinq.eclair.blockchain.{Watch, WatchConfirmed}
|
||||
import fr.acinq.eclair.channel.Register.Forward
|
||||
import fr.acinq.eclair.channel._
|
||||
|
@ -61,7 +61,7 @@ class IntegrationSpec extends TestKit(ActorSystem("test")) with FunSuiteLike wit
|
|||
Files.copy(classOf[IntegrationSpec].getResourceAsStream("/integration/bitcoin.conf"), new File(PATH_BITCOIND_DATADIR.toString, "bitcoin.conf").toPath)
|
||||
|
||||
bitcoind = s"$PATH_BITCOIND -datadir=$PATH_BITCOIND_DATADIR".run()
|
||||
bitcoinrpcclient = new BitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoinrpcclient = new BasicBitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 28332)
|
||||
bitcoincli = system.actorOf(Props(new Actor {
|
||||
override def receive: Receive = {
|
||||
case BitcoinReq(method) => bitcoinrpcclient.invoke(method) pipeTo sender
|
||||
|
@ -173,7 +173,7 @@ class IntegrationSpec extends TestKit(ActorSystem("test")) with FunSuiteLike wit
|
|||
val numberOfChannels = 11
|
||||
val channelEndpointsCount = 2 * numberOfChannels
|
||||
|
||||
// we make sure all channels have set up their WatchConfirmed for the funding tx
|
||||
// we make sure all channels have set up their WatchConfirmed for the funding tx
|
||||
awaitCond({
|
||||
val watches = nodes.values.foldLeft(Set.empty[Watch]) {
|
||||
case (watches, setup) =>
|
||||
|
@ -529,6 +529,12 @@ class IntegrationSpec extends TestKit(ActorSystem("test")) with FunSuiteLike wit
|
|||
// we then generate enough blocks to make the htlc timeout
|
||||
sender.send(bitcoincli, BitcoinReq("generate", 11))
|
||||
sender.expectMsgType[JValue](10 seconds)
|
||||
// we generate more blocks for the htlc-timeout to reach enough confirmations
|
||||
awaitCond({
|
||||
sender.send(bitcoincli, BitcoinReq("generate", 1))
|
||||
sender.expectMsgType[JValue](10 seconds)
|
||||
paymentSender.msgAvailable
|
||||
}, max = 30 seconds, interval = 1 second)
|
||||
// this will fail the htlc
|
||||
val failed = paymentSender.expectMsgType[PaymentFailed](30 seconds)
|
||||
assert(failed.paymentHash === paymentHash)
|
||||
|
@ -577,6 +583,12 @@ class IntegrationSpec extends TestKit(ActorSystem("test")) with FunSuiteLike wit
|
|||
// we then generate enough blocks to make the htlc timeout
|
||||
sender.send(bitcoincli, BitcoinReq("generate", 11))
|
||||
sender.expectMsgType[JValue](10 seconds)
|
||||
// we generate more blocks for the claim-htlc-timeout to reach enough confirmations
|
||||
awaitCond({
|
||||
sender.send(bitcoincli, BitcoinReq("generate", 1))
|
||||
sender.expectMsgType[JValue](10 seconds)
|
||||
paymentSender.msgAvailable
|
||||
}, max = 30 seconds, interval = 1 second)
|
||||
// this will fail the htlc
|
||||
val failed = paymentSender.expectMsgType[PaymentFailed](30 seconds)
|
||||
assert(failed.paymentHash === paymentHash)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package fr.acinq.eclair.payment
|
||||
|
||||
import akka.actor.ActorSystem
|
||||
import akka.actor.{ActorSystem, Status}
|
||||
import akka.actor.Status.Failure
|
||||
import akka.testkit.{TestKit, TestProbe}
|
||||
import fr.acinq.bitcoin.{MilliSatoshi, Satoshi}
|
||||
|
@ -75,6 +75,21 @@ class PaymentHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLike
|
|||
assert(pr.amount.contains(MilliSatoshi(100000000L)) && pr.nodeId.toString == nodeParams.privateKey.publicKey.toString)
|
||||
}
|
||||
|
||||
test("Payment request generation should fail when there are too many pending requests") {
|
||||
val nodeParams = Alice.nodeParams.copy(maxPendingPaymentRequests = 42)
|
||||
val handler = system.actorOf(LocalPaymentHandler.props(nodeParams))
|
||||
val sender = TestProbe()
|
||||
|
||||
for (i <- 0 to nodeParams.maxPendingPaymentRequests) {
|
||||
sender.send(handler, ReceivePayment(None, s"Request #$i"))
|
||||
sender.expectMsgType[PaymentRequest]
|
||||
}
|
||||
|
||||
// over limit
|
||||
sender.send(handler, ReceivePayment(None, "This one should fail"))
|
||||
assert(sender.expectMsgType[Status.Failure].cause.getMessage === s"too many pending payment requests (max=${nodeParams.maxPendingPaymentRequests})")
|
||||
}
|
||||
|
||||
test("Payment request generation should succeed when the amount is not set") {
|
||||
val handler = system.actorOf(LocalPaymentHandler.props(Alice.nodeParams))
|
||||
val sender = TestProbe()
|
||||
|
|
|
@ -37,7 +37,7 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
|
|||
sender.expectMsg(PaymentFailed(request.paymentHash, LocalFailure(RouteNotFound) :: Nil))
|
||||
}
|
||||
|
||||
test("payment failed (unparseable failure)") { case (router, _) =>
|
||||
test("payment failed (unparsable failure)") { case (router, _) =>
|
||||
val relayer = TestProbe()
|
||||
val routerForwarder = TestProbe()
|
||||
val paymentFSM = TestFSMRef(new PaymentLifecycle(a, routerForwarder.ref, relayer.ref))
|
||||
|
@ -57,11 +57,22 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
|
|||
val WaitingForComplete(_, _, cmd1, Nil, _, _, _, hops) = paymentFSM.stateData
|
||||
|
||||
relayer.expectMsg(ForwardShortId(channelId_ab, cmd1))
|
||||
sender.send(paymentFSM, UpdateFailHtlc("00" * 32, 0, "42" * 32))
|
||||
sender.send(paymentFSM, UpdateFailHtlc("00" * 32, 0, "42" * 32)) // unparsable message
|
||||
|
||||
// then the payment lifecycle will ask for a new route excluding all intermediate nodes
|
||||
routerForwarder.expectMsg(RouteRequest(a, d, ignoreNodes = Set(c), ignoreChannels = Set.empty))
|
||||
awaitCond(paymentFSM.stateName == WAITING_FOR_ROUTE)
|
||||
|
||||
// let's simulate a response by the router with another route
|
||||
sender.send(paymentFSM, RouteResponse(hops, Set(c), Set.empty))
|
||||
awaitCond(paymentFSM.stateName == WAITING_FOR_PAYMENT_COMPLETE)
|
||||
val WaitingForComplete(_, _, cmd2, _, _, _, _, _) = paymentFSM.stateData
|
||||
// and reply a 2nd time with an unparsable failure
|
||||
relayer.expectMsg(ForwardShortId(channelId_ab, cmd2))
|
||||
sender.send(paymentFSM, UpdateFailHtlc("00" * 32, 0, "42" * 32)) // unparsable message
|
||||
|
||||
// we allow 2 tries, so we send a 2nd request to the router
|
||||
sender.expectMsg(PaymentFailed(request.paymentHash, UnreadableRemoteFailure(hops) :: UnreadableRemoteFailure(hops) :: Nil))
|
||||
|
||||
}
|
||||
|
||||
test("payment failed (first hop returns an UpdateFailMalformedHtlc)") { case (router, _) =>
|
||||
|
@ -178,8 +189,8 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
|
|||
|
||||
sender.send(paymentFSM, UpdateFulfillHtlc("00" * 32, 0, "42" * 32))
|
||||
|
||||
sender.expectMsgType[PaymentSucceeded]
|
||||
val PaymentSent(MilliSatoshi(request.amountMsat), feesPaid, request.paymentHash) = eventListener.expectMsgType[PaymentSent]
|
||||
val paymentOK = sender.expectMsgType[PaymentSucceeded]
|
||||
val PaymentSent(MilliSatoshi(request.amountMsat), feesPaid, request.paymentHash, paymentOK.paymentPreimage) = eventListener.expectMsgType[PaymentSent]
|
||||
assert(feesPaid.amount > 0)
|
||||
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ class PaymentRequestSpec extends FunSuite {
|
|||
assert(pr.nodeId == PublicKey(BinaryData("03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad")))
|
||||
assert(pr.description == Right(Crypto.sha256("One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon".getBytes)))
|
||||
assert(pr.fallbackAddress === Some("1RustyRX2oai4EYYDpQGWvEL62BBGqN9T"))
|
||||
assert(pr.routingInfo() === List(List(
|
||||
assert(pr.routingInfo === List(List(
|
||||
ExtraHop(PublicKey("029e03a901b85534ff1e92c43c74431f7ce72046060fcf7a95c37e148f78c77255"), 72623859790382856L, 1, 20, 3),
|
||||
ExtraHop(PublicKey("039e03a901b85534ff1e92c43c74431f7ce72046060fcf7a95c37e148f78c77255"), 217304205466536202L, 2, 30, 4)
|
||||
)))
|
||||
|
|
|
@ -58,8 +58,8 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
assert(fwd.shortChannelId === channelUpdate_bc.shortChannelId)
|
||||
assert(fwd.message.upstream_opt === Some(add_ab))
|
||||
|
||||
sender.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
sender.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail to relay an htlc-add when we have no channel_update for the next channel") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -72,12 +72,13 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
|
||||
sender.send(relayer, ForwardAdd(add_ab))
|
||||
|
||||
val fail = sender.expectMsgType[CMD_FAIL_HTLC]
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason == Right(UnknownNextPeer))
|
||||
val fwdFail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]]
|
||||
assert(fwdFail.channelId === add_ab.channelId)
|
||||
assert(fwdFail.message.id === add_ab.id)
|
||||
assert(fwdFail.message.reason === Right(UnknownNextPeer))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail to relay an htlc-add when register returns an error") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -102,11 +103,11 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
assert(fwd2.message.id === add_ab.id)
|
||||
assert(fwd2.message.reason === Right(UnknownNextPeer))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail relay an htlc-add when the channel is advertised as unusable (down)") { case (relayer, register, paymentHandler) =>
|
||||
test("fail to relay an htlc-add when the channel is advertised as unusable (down)") { case (relayer, register, paymentHandler) =>
|
||||
val sender = TestProbe()
|
||||
|
||||
// check that payments are sent properly
|
||||
|
@ -120,19 +121,22 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
assert(fwd.shortChannelId === channelUpdate_bc.shortChannelId)
|
||||
assert(fwd.message.upstream_opt === Some(add_ab))
|
||||
|
||||
sender.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
sender.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
|
||||
// now tell the relayer that the channel is own and try again
|
||||
// now tell the relayer that the channel is down and try again
|
||||
relayer ! LocalChannelDown(sender.ref, channelId = channelId_bc, shortChannelId = channelUpdate_bc.shortChannelId, remoteNodeId = TestConstants.Bob.nodeParams.privateKey.publicKey)
|
||||
|
||||
val (cmd1, _) = buildCommand(finalAmountMsat, finalExpiry, "02" * 32, hops)
|
||||
val add_ab1 = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd1.amountMsat, cmd1.paymentHash, cmd1.expiry, cmd1.onion)
|
||||
sender.send(relayer, ForwardAdd(add_ab))
|
||||
val fail = sender.expectMsgType[CMD_FAIL_HTLC]
|
||||
assert(fail.id === add_ab1.id)
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason === Right(UnknownNextPeer))
|
||||
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail to relay an htlc-add when the requested channel is disabled") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -147,12 +151,12 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
|
||||
sender.send(relayer, ForwardAdd(add_ab))
|
||||
|
||||
val fail = sender.expectMsgType[CMD_FAIL_HTLC]
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason == Right(ChannelDisabled(channelUpdate_bc_disabled.flags, channelUpdate_bc_disabled)))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail to relay an htlc-add when the onion is malformed") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -166,12 +170,12 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
|
||||
sender.send(relayer, ForwardAdd(add_ab))
|
||||
|
||||
val fail = sender.expectMsgType[CMD_FAIL_MALFORMED_HTLC]
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_MALFORMED_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.onionHash == Crypto.sha256(add_ab.onionRoutingPacket))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail to relay an htlc-add when amount is below the next hop's requirements") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -185,12 +189,12 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
|
||||
sender.send(relayer, ForwardAdd(add_ab))
|
||||
|
||||
val fail = sender.expectMsgType[CMD_FAIL_HTLC]
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason == Right(AmountBelowMinimum(cmd.amountMsat, channelUpdate_bc)))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail to relay an htlc-add when expiry does not match next hop's requirements") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -204,12 +208,12 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
|
||||
sender.send(relayer, ForwardAdd(add_ab))
|
||||
|
||||
val fail = sender.expectMsgType[CMD_FAIL_HTLC]
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason == Right(IncorrectCltvExpiry(cmd.expiry, channelUpdate_bc)))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail to relay an htlc-add when expiry is too soon") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -222,12 +226,12 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
|
||||
sender.send(relayer, ForwardAdd(add_ab))
|
||||
|
||||
val fail = sender.expectMsgType[CMD_FAIL_HTLC]
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason == Right(ExpiryTooSoon(channelUpdate_bc)))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail an htlc-add at the final node when amount has been modified by second-to-last node") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -242,12 +246,12 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
|
||||
sender.send(relayer, ForwardAdd(add_ab))
|
||||
|
||||
val fail = sender.expectMsgType[CMD_FAIL_HTLC]
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason == Right(FinalIncorrectHtlcAmount(add_ab.amountMsat)))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail an htlc-add at the final node when expiry has been modified by second-to-last node") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -262,12 +266,12 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
|
||||
sender.send(relayer, ForwardAdd(add_ab))
|
||||
|
||||
val fail = sender.expectMsgType[CMD_FAIL_HTLC]
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason == Right(FinalIncorrectCltvExpiry(add_ab.expiry)))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail to relay an htlc-add when next channel's balance is too low") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -284,14 +288,14 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
assert(fwd.shortChannelId === channelUpdate_bc.shortChannelId)
|
||||
assert(fwd.message.upstream_opt === Some(add_ab))
|
||||
|
||||
sender.send(relayer, Status.Failure(AddHtlcFailed(channelId_bc, new InsufficientFunds(channelId_bc, cmd.amountMsat, 100, 0, 0), Relayed(add_ab.channelId, add_ab.id, add_ab.amountMsat, cmd.amountMsat), Some(channelUpdate_bc))))
|
||||
sender.send(relayer, Status.Failure(AddHtlcFailed(channelId_bc, add_ab.paymentHash, new InsufficientFunds(channelId_bc, cmd.amountMsat, 100, 0, 0), Relayed(add_ab.channelId, add_ab.id, add_ab.amountMsat, cmd.amountMsat), Some(channelUpdate_bc))))
|
||||
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason == Right(TemporaryChannelFailure(channelUpdate_bc)))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail to relay an htlc-add when next channel has too many inflight htlcs") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -308,14 +312,14 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
assert(fwd.shortChannelId === channelUpdate_bc.shortChannelId)
|
||||
assert(fwd.message.upstream_opt === Some(add_ab))
|
||||
|
||||
sender.send(relayer, Status.Failure(AddHtlcFailed(channelId_bc, new TooManyAcceptedHtlcs(channelId_bc, 30), Relayed(add_ab.channelId, add_ab.id, add_ab.amountMsat, cmd.amountMsat), Some(channelUpdate_bc))))
|
||||
sender.send(relayer, Status.Failure(AddHtlcFailed(channelId_bc, add_ab.paymentHash, new TooManyAcceptedHtlcs(channelId_bc, 30), Relayed(add_ab.channelId, add_ab.id, add_ab.amountMsat, cmd.amountMsat), Some(channelUpdate_bc))))
|
||||
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason == Right(TemporaryChannelFailure(channelUpdate_bc)))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("fail to relay an htlc-add when next channel has a timed out htlc (and is thus closing)") { case (relayer, register, paymentHandler) =>
|
||||
|
@ -332,14 +336,14 @@ class RelayerSpec extends TestkitBaseClass {
|
|||
assert(fwd.shortChannelId === channelUpdate_bc.shortChannelId)
|
||||
assert(fwd.message.upstream_opt === Some(add_ab))
|
||||
|
||||
sender.send(relayer, Status.Failure(AddHtlcFailed(channelId_bc, new HtlcTimedout(channelId_bc), Relayed(add_ab.channelId, add_ab.id, add_ab.amountMsat, cmd.amountMsat), Some(channelUpdate_bc))))
|
||||
sender.send(relayer, Status.Failure(AddHtlcFailed(channelId_bc, add_ab.paymentHash, new HtlcTimedout(channelId_bc), Relayed(add_ab.channelId, add_ab.id, add_ab.amountMsat, cmd.amountMsat), Some(channelUpdate_bc))))
|
||||
|
||||
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
|
||||
assert(fail.id === add_ab.id)
|
||||
assert(fail.reason == Right(PermanentChannelFailure))
|
||||
|
||||
register.expectNoMsg(500 millis)
|
||||
paymentHandler.expectNoMsg(500 millis)
|
||||
register.expectNoMsg(100 millis)
|
||||
paymentHandler.expectNoMsg(100 millis)
|
||||
}
|
||||
|
||||
test("relay an htlc-fulfill") { case (relayer, register, _) =>
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
package fr.acinq.eclair.router
|
||||
|
||||
import akka.actor.ActorSystem
|
||||
import akka.testkit.TestProbe
|
||||
import akka.pattern.pipe
|
||||
import fr.acinq.bitcoin.Crypto.PrivateKey
|
||||
import fr.acinq.bitcoin.{BinaryData, Block, Satoshi, Script, Transaction}
|
||||
import fr.acinq.eclair.blockchain.ValidateResult
|
||||
import fr.acinq.eclair.blockchain.bitcoind.BitcoinCoreWallet
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BitcoinJsonRPCClient, ExtendedBitcoinClient}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BasicBitcoinJsonRPCClient, ExtendedBitcoinClient}
|
||||
import fr.acinq.eclair.transactions.Scripts
|
||||
import fr.acinq.eclair.wire.{ChannelAnnouncement, ChannelUpdate}
|
||||
import fr.acinq.eclair.{randomKey, toShortId}
|
||||
|
@ -27,7 +30,7 @@ class AnnouncementsBatchValidationSpec extends FunSuite {
|
|||
import scala.concurrent.ExecutionContext.Implicits.global
|
||||
|
||||
implicit val system = ActorSystem()
|
||||
implicit val extendedBitcoinClient = new ExtendedBitcoinClient(new BitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 18332))
|
||||
implicit val extendedBitcoinClient = new ExtendedBitcoinClient(new BasicBitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 18332))
|
||||
|
||||
val channels = for (i <- 0 until 50) yield {
|
||||
// let's generate a block every 10 txs so that we can compute short ids
|
||||
|
@ -37,16 +40,16 @@ class AnnouncementsBatchValidationSpec extends FunSuite {
|
|||
generateBlocks(6)
|
||||
val announcements = channels.map(makeChannelAnnouncement)
|
||||
|
||||
val alteredAnnouncements = announcements.zipWithIndex map {
|
||||
case (ann, 3) => ann.copy(shortChannelId = Long.MaxValue) // invalid block height
|
||||
case (ann, 7) => ann.copy(shortChannelId = toShortId(500, 1000, 0)) // invalid tx index
|
||||
case (ann, _) => ann
|
||||
}
|
||||
val sender = TestProbe()
|
||||
|
||||
val res = Await.result(extendedBitcoinClient.getParallel(alteredAnnouncements), 10 seconds)
|
||||
extendedBitcoinClient.validate(announcements(0)).pipeTo(sender.ref)
|
||||
sender.expectMsgType[ValidateResult].tx.isDefined
|
||||
|
||||
assert(res.r(3).tx == None)
|
||||
assert(res.r(7).tx == None)
|
||||
extendedBitcoinClient.validate(announcements(1).copy(shortChannelId = Long.MaxValue)).pipeTo(sender.ref) // invalid block height
|
||||
sender.expectMsgType[ValidateResult].tx.isEmpty
|
||||
|
||||
extendedBitcoinClient.validate(announcements(2).copy(shortChannelId = toShortId(500, 1000, 0))).pipeTo(sender.ref) // invalid tx index
|
||||
sender.expectMsgType[ValidateResult].tx.isEmpty
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ import fr.acinq.bitcoin.Crypto.PrivateKey
|
|||
import fr.acinq.bitcoin.Script.{pay2wsh, write}
|
||||
import fr.acinq.bitcoin.{Block, Satoshi, Transaction, TxOut}
|
||||
import fr.acinq.eclair.TestConstants.Alice
|
||||
import fr.acinq.eclair.blockchain.{IndividualResult, ParallelGetRequest, ParallelGetResponse, WatchSpentBasic}
|
||||
import fr.acinq.eclair.blockchain.{ValidateResult, ValidateRequest, WatchSpentBasic}
|
||||
import fr.acinq.eclair.router.Announcements._
|
||||
import fr.acinq.eclair.transactions.Scripts
|
||||
import fr.acinq.eclair.wire._
|
||||
|
@ -102,17 +102,16 @@ abstract class BaseRouterSpec extends TestkitBaseClass {
|
|||
router ! channelUpdate_dc
|
||||
router ! channelUpdate_ef
|
||||
router ! channelUpdate_fe
|
||||
// we manually trigger a validation
|
||||
router ! TickValidate
|
||||
// watcher receives the get tx requests
|
||||
assert(watcher.expectMsgType[ParallelGetRequest].ann.toSet === Set(chan_ab, chan_bc, chan_cd, chan_ef))
|
||||
watcher.expectMsg(ValidateRequest(chan_ab))
|
||||
watcher.expectMsg(ValidateRequest(chan_bc))
|
||||
watcher.expectMsg(ValidateRequest(chan_cd))
|
||||
watcher.expectMsg(ValidateRequest(chan_ef))
|
||||
// and answers with valid scripts
|
||||
watcher.send(router, ParallelGetResponse(
|
||||
IndividualResult(chan_ab, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_a, funding_b)))) :: Nil, lockTime = 0)), true) ::
|
||||
IndividualResult(chan_bc, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_b, funding_c)))) :: Nil, lockTime = 0)), true) ::
|
||||
IndividualResult(chan_cd, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_c, funding_d)))) :: Nil, lockTime = 0)), true) ::
|
||||
IndividualResult(chan_ef, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_e, funding_f)))) :: Nil, lockTime = 0)), true) :: Nil
|
||||
))
|
||||
watcher.send(router, ValidateResult(chan_ab, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_a, funding_b)))) :: Nil, lockTime = 0)), true, None))
|
||||
watcher.send(router, ValidateResult(chan_bc, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_b, funding_c)))) :: Nil, lockTime = 0)), true, None))
|
||||
watcher.send(router, ValidateResult(chan_cd, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_c, funding_d)))) :: Nil, lockTime = 0)), true, None))
|
||||
watcher.send(router, ValidateResult(chan_ef, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_e, funding_f)))) :: Nil, lockTime = 0)), true, None))
|
||||
// watcher receives watch-spent request
|
||||
//watcher.expectMsgType[WatchSpentBasic]
|
||||
//watcher.expectMsgType[WatchSpentBasic]
|
||||
|
@ -128,7 +127,6 @@ abstract class BaseRouterSpec extends TestkitBaseClass {
|
|||
val channels = sender.expectMsgType[Iterable[ChannelAnnouncement]]
|
||||
sender.send(router, 'updates)
|
||||
val updates = sender.expectMsgType[Iterable[ChannelUpdate]]
|
||||
//nodes.size === 6 &&
|
||||
channels.size === 4 && updates.size === 8
|
||||
}, max = 10 seconds, interval = 1 second)
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import fr.acinq.bitcoin.Script.{pay2wsh, write}
|
|||
import fr.acinq.bitcoin.{Block, Satoshi, Transaction, TxOut}
|
||||
import fr.acinq.eclair.blockchain._
|
||||
import fr.acinq.eclair.channel.BITCOIN_FUNDING_EXTERNAL_CHANNEL_SPENT
|
||||
import fr.acinq.eclair.crypto.TransportHandler
|
||||
import fr.acinq.eclair.payment.PaymentRequest.ExtraHop
|
||||
import fr.acinq.eclair.router.Announcements.makeChannelUpdate
|
||||
import fr.acinq.eclair.transactions.Scripts
|
||||
|
@ -22,7 +23,7 @@ import scala.concurrent.duration._
|
|||
@RunWith(classOf[JUnitRunner])
|
||||
class RouterSpec extends BaseRouterSpec {
|
||||
|
||||
test("properly announce valid new channels and ignore invalid ones") { case (router, watcher) =>
|
||||
ignore("properly announce valid new channels and ignore invalid ones") { case (router, watcher) =>
|
||||
val eventListener = TestProbe()
|
||||
system.eventStream.subscribe(eventListener.ref, classOf[NetworkEvent])
|
||||
|
||||
|
@ -53,20 +54,21 @@ class RouterSpec extends BaseRouterSpec {
|
|||
router ! update_ax
|
||||
router ! update_ay
|
||||
router ! update_az
|
||||
router ! TickValidate // we manually trigger a validation
|
||||
assert(watcher.expectMsgType[ParallelGetRequest].ann.toSet === Set(chan_ac, chan_ax, chan_ay, chan_az))
|
||||
watcher.send(router, ParallelGetResponse(
|
||||
IndividualResult(chan_ac, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_a, funding_c)))) :: Nil, lockTime = 0)), true) ::
|
||||
IndividualResult(chan_ax, None, false) ::
|
||||
IndividualResult(chan_ay, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_a, randomKey.publicKey)))) :: Nil, lockTime = 0)), true) ::
|
||||
IndividualResult(chan_az, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_a, priv_funding_z.publicKey)))) :: Nil, lockTime = 0)), false) :: Nil))
|
||||
watcher.expectMsg(ValidateRequest(chan_ac))
|
||||
watcher.expectMsg(ValidateRequest(chan_ax))
|
||||
watcher.expectMsg(ValidateRequest(chan_ay))
|
||||
watcher.expectMsg(ValidateRequest(chan_az))
|
||||
watcher.send(router, ValidateResult(chan_ac, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_a, funding_c)))) :: Nil, lockTime = 0)), true, None))
|
||||
watcher.send(router, ValidateResult(chan_ax, None, false, None))
|
||||
watcher.send(router, ValidateResult(chan_ay, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_a, randomKey.publicKey)))) :: Nil, lockTime = 0)), true, None))
|
||||
watcher.send(router, ValidateResult(chan_az, Some(Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(1000000), write(pay2wsh(Scripts.multiSig2of2(funding_a, priv_funding_z.publicKey)))) :: Nil, lockTime = 0)), false, None))
|
||||
//watcher.expectMsgType[WatchSpentBasic]
|
||||
watcher.expectNoMsg(1 second)
|
||||
|
||||
eventListener.expectMsg(ChannelDiscovered(chan_ac, Satoshi(1000000)))
|
||||
}
|
||||
|
||||
test("properly announce lost channels and nodes") { case (router, watcher) =>
|
||||
test("properly announce lost channels and nodes") { case (router, _) =>
|
||||
val eventListener = TestProbe()
|
||||
system.eventStream.subscribe(eventListener.ref, classOf[NetworkEvent])
|
||||
|
||||
|
@ -96,6 +98,7 @@ class RouterSpec extends BaseRouterSpec {
|
|||
val chan_ac = channelAnnouncement(channelId_ac, priv_a, priv_c, priv_funding_a, priv_funding_c)
|
||||
val buggy_chan_ac = chan_ac.copy(nodeSignature1 = chan_ac.nodeSignature2)
|
||||
sender.send(router, buggy_chan_ac)
|
||||
sender.expectMsg(TransportHandler.ReadAck(buggy_chan_ac))
|
||||
sender.expectMsgType[Error]
|
||||
}
|
||||
|
||||
|
@ -103,6 +106,7 @@ class RouterSpec extends BaseRouterSpec {
|
|||
val sender = TestProbe()
|
||||
val buggy_ann_a = ann_a.copy(signature = ann_b.signature, timestamp = ann_a.timestamp + 1)
|
||||
sender.send(router, buggy_ann_a)
|
||||
sender.expectMsg(TransportHandler.ReadAck(buggy_ann_a))
|
||||
sender.expectMsgType[Error]
|
||||
}
|
||||
|
||||
|
@ -110,6 +114,7 @@ class RouterSpec extends BaseRouterSpec {
|
|||
val sender = TestProbe()
|
||||
val buggy_channelUpdate_ab = channelUpdate_ab.copy(signature = ann_b.signature, timestamp = channelUpdate_ab.timestamp + 1)
|
||||
sender.send(router, buggy_channelUpdate_ab)
|
||||
sender.expectMsg(TransportHandler.ReadAck(buggy_channelUpdate_ab))
|
||||
sender.expectMsgType[Error]
|
||||
}
|
||||
|
||||
|
@ -165,6 +170,7 @@ class RouterSpec extends BaseRouterSpec {
|
|||
|
||||
val channelUpdate_cd1 = makeChannelUpdate(Block.RegtestGenesisBlock.hash, priv_c, d, channelId_cd, cltvExpiryDelta = 3, 0, feeBaseMsat = 153000, feeProportionalMillionths = 4, enable = false)
|
||||
sender.send(router, channelUpdate_cd1)
|
||||
sender.expectMsg(TransportHandler.ReadAck(channelUpdate_cd1))
|
||||
sender.send(router, RouteRequest(a, d))
|
||||
sender.expectMsg(Failure(RouteNotFound))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
package fr.acinq.eclair.wire
|
||||
|
||||
import fr.acinq.eclair.channel.{CMD_FAIL_HTLC, CMD_FAIL_MALFORMED_HTLC, CMD_FULFILL_HTLC, Command}
|
||||
import fr.acinq.eclair.randomBytes
|
||||
import org.junit.runner.RunWith
|
||||
import org.scalatest.FunSuite
|
||||
import org.scalatest.junit.JUnitRunner
|
||||
|
||||
/**
|
||||
* Created by PM on 31/05/2016.
|
||||
*/
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class CommandCodecsSpec extends FunSuite {
|
||||
|
||||
test("encode/decode all channel messages") {
|
||||
val msgs: List[Command] =
|
||||
CMD_FULFILL_HTLC(1573L, randomBytes(32)) ::
|
||||
CMD_FAIL_HTLC(42456L, Left(randomBytes(145))) ::
|
||||
CMD_FAIL_HTLC(253, Right(TemporaryNodeFailure)) ::
|
||||
CMD_FAIL_MALFORMED_HTLC(7984, randomBytes(32), FailureMessageCodecs.BADONION) :: Nil
|
||||
|
||||
msgs.foreach {
|
||||
case msg => {
|
||||
val encoded = CommandCodecs.cmdCodec.encode(msg).require
|
||||
val decoded = CommandCodecs.cmdCodec.decode(encoded).require
|
||||
assert(msg === decoded.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -11,7 +11,7 @@ import scala.util.Random
|
|||
* Created by PM on 31/05/2016.
|
||||
*/
|
||||
@RunWith(classOf[JUnitRunner])
|
||||
class FailureMessageLightningMessageCodecsSpec extends FunSuite {
|
||||
class FailureMessageCodecsSpec extends FunSuite {
|
||||
val channelUpdate = ChannelUpdate(
|
||||
signature = BinaryData("3045022100c451cd65c88f55b1767941a247e849e12f5f4d4a93a07316659e22f5267d2088022009042a595c6bc8942cd9d729317b82b306edc259fb6b3a3cecb3dd1bd446e90601"),
|
||||
chainHash = Block.RegtestGenesisBlock.hash,
|
|
@ -1,17 +1,22 @@
|
|||
akka {
|
||||
loggers = ["akka.event.slf4j.Slf4jLogger"]
|
||||
loglevel = "DEBUG"
|
||||
loglevel = "INFO"
|
||||
|
||||
actor {
|
||||
debug {
|
||||
# enable DEBUG logging of all LoggingFSMs for events, transitions and timers
|
||||
fsm = on
|
||||
}
|
||||
}
|
||||
io {
|
||||
tcp {
|
||||
|
||||
# The maximum number of bytes delivered by a `Received` message. Before
|
||||
# more data is read from the network the connection actor will try to
|
||||
# do other work.
|
||||
# The purpose of this setting is to impose a smaller limit than the
|
||||
# configured receive buffer size. When using value 'unlimited' it will
|
||||
# try to read all from the receive buffer.
|
||||
# As per BOLT#8 lightning messages are at most 2 + 16 + 65535 + 16 = 65569bytes
|
||||
# Currently the largest message is update_add_htlc (~1500b).
|
||||
# As a tradeoff to reduce the RAM consumption, in conjunction with tcp pull mode,
|
||||
# the default value is chosen to allow for a decent number of messages to be prefetched.
|
||||
max-received-message-size = 16384b
|
||||
|
||||
http {
|
||||
host-connection-pool {
|
||||
max-open-requests = 64
|
||||
}
|
||||
}
|
||||
}
|
|
@ -81,7 +81,7 @@
|
|||
<appender-ref ref="CYAN"/>
|
||||
</logger>
|
||||
|
||||
<logger name="fr.acinq.eclair.gui" level="ERROR" additivity="false">
|
||||
<logger name="fr.acinq.eclair.gui" level="WARN" additivity="false">
|
||||
<appender-ref ref="MAGENTA"/>
|
||||
</logger>
|
||||
|
||||
|
|
|
@ -21,8 +21,9 @@ object Boot extends App with Logging {
|
|||
}
|
||||
|
||||
def onError(t: Throwable): Unit = {
|
||||
System.err.println(s"fatal error: ${t.getMessage}")
|
||||
logger.error(s"fatal error: ${t.getMessage}")
|
||||
val errorMsg = if (t.getMessage != null) t.getMessage else t.getClass.getSimpleName
|
||||
System.err.println(s"fatal error: $errorMsg")
|
||||
logger.error(s"fatal error: $errorMsg")
|
||||
System.exit(1)
|
||||
}
|
||||
}
|
||||
|
|
2
pom.xml
2
pom.xml
|
@ -106,6 +106,8 @@
|
|||
<arg>-language:implicitConversions</arg>
|
||||
<arg>-Xfatal-warnings</arg>
|
||||
<arg>-unchecked</arg>
|
||||
<arg>-Xmax-classfile-name</arg>
|
||||
<arg>140</arg>
|
||||
</args>
|
||||
<scalaCompatVersion>${scala.version.short}</scalaCompatVersion>
|
||||
</configuration>
|
||||
|
|
|
@ -11,7 +11,7 @@ cd libbase58
|
|||
cd
|
||||
git clone https://github.com/ElementsProject/lightning.git
|
||||
cd lightning
|
||||
git checkkout fce9ee29e3c37b4291ebb050e6a687cfaa7df95a
|
||||
git checkout fce9ee29e3c37b4291ebb050e6a687cfaa7df95a
|
||||
git submodule init
|
||||
git submodule update
|
||||
make
|
||||
|
|
Loading…
Add table
Reference in a new issue