1
0
Fork 0
mirror of https://github.com/ACINQ/eclair.git synced 2025-03-13 11:35:47 +01:00

Merge branch 'master' into android

This commit is contained in:
pm47 2018-11-23 14:03:11 +01:00
commit e91ab8199d
No known key found for this signature in database
GPG key ID: E434ED292E85643A
40 changed files with 401 additions and 346 deletions

View file

@ -4,7 +4,7 @@
[![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](LICENSE)
[![Gitter chat](https://img.shields.io/badge/chat-on%20gitter-red.svg)](https://gitter.im/ACINQ/eclair)
**Eclair** (french for Lightning) is a scala implementation of the Lightning Network. It can run with or without a GUI, and a JSON-RPC API is also available.
**Eclair** (French for Lightning) is a Scala implementation of the Lightning Network. It can run with or without a GUI, and a JSON-RPC API is also available.
This software follows the [Lightning Network Specifications (BOLTs)](https://github.com/lightningnetwork/lightning-rfc). Other implementations include [c-lightning](https://github.com/ElementsProject/lightning) and [lnd](https://github.com/LightningNetwork/lnd).
@ -101,7 +101,8 @@ name | description
eclair.api.password | API password (BASIC) | "" (must be set if the API is enabled)
eclair.bitcoind.rpcuser | Bitcoin Core RPC user | foo
eclair.bitcoind.rpcpassword | Bitcoin Core RPC password | bar
eclair.bitcoind.zmq | Bitcoin Core ZMQ address | "tcp://127.0.0.1:29000"
eclair.bitcoind.zmqblock | Bitcoin Core ZMQ block address | "tcp://127.0.0.1:29000"
eclair.bitcoind.zmqtx | Bitcoin Core ZMQ tx address | "tcp://127.0.0.1:29000"
eclair.gui.unit | Unit in which amounts are displayed (possible values: msat, sat, mbtc, btc) | btc
Quotes are not required unless the value contains special characters. Full syntax guide [here](https://github.com/lightbend/config/blob/master/HOCON.md).
@ -208,7 +209,7 @@ addresstype=p2sh-segwit
deprecatedrpc=signrawtransaction
```
You may also want to take advantage of the new configuration sections in `bitcoin.conf` to manage parameters that are network speficic, so you can reasliy run your bitcoin node on both mainnet and testnet. For example you could use:
You may also want to take advantage of the new configuration sections in `bitcoin.conf` to manage parameters that are network specific, so you can easily run your bitcoin node on both mainnet and testnet. For example you could use:
```
server=1

View file

@ -2,6 +2,9 @@
# Check if jq is installed. If not, display instructions and abort program
command -v jq >/dev/null 2>&1 || { echo -e "This tool requires jq.\nFor installation instructions, visit https://stedolan.github.io/jq/download/.\n\nAborting..."; exit 1; }
# curl installed? If not, give a hint
command -v curl >/dev/null 2>&1 || { echo -e "This tool requires curl.\n\nAborting..."; exit 1; }
FULL_OUTPUT='false'
URL='http://localhost:8080'
PASSWORD=''

View file

@ -126,17 +126,22 @@
<artifactId>akka-slf4j_${scala.version.short}</artifactId>
<version>${akka.version}</version>
</dependency>
<!-- HTTP -->
<!-- HTTP CLIENT -->
<dependency>
<groupId>com.ning</groupId>
<artifactId>async-http-client</artifactId>
<version>1.9.40</version>
<groupId>com.softwaremill.sttp</groupId>
<artifactId>okhttp-backend_${scala.version.short}</artifactId>
<version>${sttp.version}</version>
</dependency>
<!-- JSON -->
<dependency>
<groupId>org.json4s</groupId>
<artifactId>json4s-jackson_${scala.version.short}</artifactId>
<version>3.5.3</version>
<version>3.6.0</version>
</dependency>
<dependency>
<groupId>com.softwaremill.sttp</groupId>
<artifactId>json4s_${scala.version.short}</artifactId>
<version>${sttp.version}</version>
</dependency>
<dependency>
<groupId>com.lihaoyi</groupId>

View file

@ -22,7 +22,8 @@ eclair {
rpcport = 18332
rpcuser = "foo"
rpcpassword = "bar"
zmq = "tcp://127.0.0.1:29000"
zmqblock = "tcp://127.0.0.1:29000"
zmqtx = "tcp://127.0.0.1:29000"
}
default-feerates { // those are in satoshis per kilobyte

View file

@ -1,53 +0,0 @@
/*
* Copyright 2018 ACINQ SAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.acinq.eclair
import com.ning.http.client.{AsyncCompletionHandler, AsyncHttpClient, AsyncHttpClientConfig, Response}
import grizzled.slf4j.Logging
import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JNothing, JValue}
import org.json4s.jackson.JsonMethods.parse
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success, Try}
object HttpHelper extends Logging {
val client = new AsyncHttpClient(new AsyncHttpClientConfig.Builder().setAcceptAnyCertificate(true).build())
implicit val formats = DefaultFormats
def get(url: String)(implicit ec: ExecutionContext): Future[JValue] = {
val promise = Promise[JValue]
client
.prepareGet(url)
.execute(new AsyncCompletionHandler[Unit] {
override def onCompleted(response: Response): Unit = {
Try(parse(response.getResponseBody)) match {
case Success(json) => promise.success(json)
case Failure(t) => promise.success(JNothing)
}
}
})
val f = promise.future
f onFailure {
case t: Throwable => logger.error(s"GET $url failed: ", t)
}
f
}
}

View file

@ -20,6 +20,7 @@ import java.io.File
import akka.actor.{ActorRef, ActorSystem, Props, SupervisorStrategy}
import akka.util.Timeout
import com.softwaremill.sttp.okhttp.OkHttpFutureBackend
import com.typesafe.config.{Config, ConfigFactory}
import fr.acinq.bitcoin.{BinaryData, Block}
import fr.acinq.eclair.NodeParams.ELECTRUM
@ -38,12 +39,12 @@ import scala.concurrent.{ExecutionContext, Future, Promise}
/**
* Setup eclair from a datadir.
* Setup eclair from a data directory.
*
* Created by PM on 25/01/2016.
*
* @param datadir directory where eclair-core will write/read its data
* @param overrideDefaults
* @param datadir directory where eclair-core will write/read its data.
* @param overrideDefaults use this parameter to programmatically override the node configuration .
* @param seed_opt optional seed, if set eclair will use it instead of generating one and won't create a seed.dat file.
*/
class Setup(datadir: File,
@ -64,9 +65,8 @@ class Setup(datadir: File,
logger.info(s"nodeid=${nodeParams.nodeId} alias=${nodeParams.alias}")
logger.info(s"using chain=$chain chainHash=${nodeParams.chainHash}")
implicit val timeout = Timeout(30 seconds)
implicit val formats = org.json4s.DefaultFormats
implicit val ec = ExecutionContext.Implicits.global
implicit val sttpBackend = OkHttpFutureBackend()
def bootstrap: Future[Kit] =
for {
@ -121,6 +121,7 @@ class Setup(datadir: File,
wallet = bitcoin match {
case Electrum(electrumClient) =>
val electrumWallet = system.actorOf(ElectrumWallet.props(seed, electrumClient, ElectrumWallet.WalletParameters(nodeParams.chainHash)), "electrum-wallet")
implicit val timeout = Timeout(30 seconds)
new ElectrumEclairWallet(electrumWallet, nodeParams.chainHash)
case _ => ???
}

View file

@ -16,7 +16,6 @@
package fr.acinq.eclair.blockchain.bitcoind
import akka.actor.ActorSystem
import fr.acinq.bitcoin._
import fr.acinq.eclair._
import fr.acinq.eclair.blockchain._
@ -30,11 +29,10 @@ import scala.concurrent.{ExecutionContext, Future}
/**
* Created by PM on 06/07/2017.
*/
class BitcoinCoreWallet(rpcClient: BitcoinJsonRPCClient)(implicit system: ActorSystem, ec: ExecutionContext) extends EclairWallet with Logging {
class BitcoinCoreWallet(rpcClient: BitcoinJsonRPCClient)(implicit ec: ExecutionContext) extends EclairWallet with Logging {
import BitcoinCoreWallet._
def fundTransaction(hex: String, lockUnspents: Boolean, feeRatePerKw: Long): Future[FundTransactionResponse] = {
val feeRatePerKB = BigDecimal(feerateKw2KB(feeRatePerKw))
rpcClient.invoke("fundrawtransaction", hex, Options(lockUnspents, feeRatePerKB.bigDecimal.scaleByPowerOfTen(-8))).map(json => {

View file

@ -16,68 +16,36 @@
package fr.acinq.eclair.blockchain.bitcoind.rpc
import akka.actor.ActorSystem
import com.ning.http.client._
import com.softwaremill.sttp._
import com.softwaremill.sttp.json4s._
import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JInt, JNull, JString, JValue}
import org.json4s.jackson.JsonMethods.parse
import org.json4s.jackson.Serialization._
import org.json4s.JsonAST.JValue
import org.json4s.jackson.Serialization
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.{ExecutionContext, Future}
class BasicBitcoinJsonRPCClient(config: AsyncHttpClientConfig, host: String, port: Int, ssl: Boolean)(implicit system: ActorSystem) extends BitcoinJsonRPCClient {
def this(user: String, password: String, host: String = "127.0.0.1", port: Int = 8332, ssl: Boolean = false)(implicit system: ActorSystem) = this(
new AsyncHttpClientConfig.Builder()
.setRealm(new Realm.RealmBuilder().setPrincipal(user).setPassword(password).setUsePreemptiveAuth(true).setScheme(Realm.AuthScheme.BASIC).build)
.build,
host,
port,
ssl
)
val client: AsyncHttpClient = new AsyncHttpClient(config)
class BasicBitcoinJsonRPCClient(user: String, password: String, host: String = "127.0.0.1", port: Int = 8332, ssl: Boolean = false)(implicit http: SttpBackend[Future, Nothing]) extends BitcoinJsonRPCClient {
val scheme = if (ssl) "https" else "http"
implicit val formats = DefaultFormats.withBigDecimal
implicit val serialization = Serialization
override def invoke(method: String, params: Any*)(implicit ec: ExecutionContext): Future[JValue] =
invoke(JsonRPCRequest(method = method, params = params))
invoke(Seq(JsonRPCRequest(method = method, params = params))).map(l => jsonResponse2Exception(l.head).result)
def jsonResponse2Exception(jsonRPCResponse: JsonRPCResponse): JsonRPCResponse = jsonRPCResponse match {
case JsonRPCResponse(_, Some(error), _) => throw JsonRPCError(error)
case o => o
}
def invoke(request: JsonRPCRequest)(implicit ec: ExecutionContext): Future[JValue] = {
val promise = Promise[JValue]()
client
.preparePost((if (ssl) "https" else "http") + s"://$host:$port/")
.addHeader("Content-Type", "application/json")
.setBody(write(request))
.execute(new AsyncCompletionHandler[Unit] {
override def onCompleted(response: Response): Unit =
try {
val jvalue = parse(response.getResponseBody, useBigDecimalForDouble = true)
val jerror = jvalue \ "error"
val result = jvalue \ "result"
if (jerror != JNull) {
for {
JInt(code) <- jerror \ "code"
JString(message) <- jerror \ "message"
} yield promise.failure(new JsonRPCError(Error(code.toInt, message)))
} else {
promise.success(result)
}
} catch {
case t: Throwable => promise.failure(t)
}
override def onThrowable(t: Throwable): Unit = promise.failure(t)
})
promise.future
}
def invoke(request: Seq[(String, Seq[Any])])(implicit ec: ExecutionContext): Future[Seq[JValue]] = ???
def invoke(requests: Seq[JsonRPCRequest])(implicit ec: ExecutionContext): Future[Seq[JsonRPCResponse]] =
for {
res <- sttp
.post(uri"$scheme://$host:$port")
.body(requests)
.auth.basic(user, password)
.response(asJson[Seq[JsonRPCResponse]])
.send()
} yield res.unsafeBody
}

View file

@ -17,7 +17,7 @@
package fr.acinq.eclair.blockchain.fee
import fr.acinq.bitcoin._
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BitcoinJsonRPCClient, Error, JsonRPCError}
import fr.acinq.eclair.blockchain.bitcoind.rpc.BitcoinJsonRPCClient
import org.json4s.DefaultFormats
import org.json4s.JsonAST._

View file

@ -16,26 +16,33 @@
package fr.acinq.eclair.blockchain.fee
import akka.actor.ActorSystem
import com.softwaremill.sttp._
import com.softwaremill.sttp.json4s._
import fr.acinq.bitcoin.{BinaryData, Block}
import fr.acinq.eclair.HttpHelper.get
import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JInt, JValue}
import org.json4s.jackson.Serialization
import scala.concurrent.{ExecutionContext, Future}
class BitgoFeeProvider(chainHash: BinaryData)(implicit system: ActorSystem, ec: ExecutionContext) extends FeeProvider {
class BitgoFeeProvider(chainHash: BinaryData)(implicit http: SttpBackend[Future, Nothing], ec: ExecutionContext) extends FeeProvider {
import BitgoFeeProvider._
implicit val formats = DefaultFormats
implicit val serialization = Serialization
val uri = chainHash match {
case Block.LivenetGenesisBlock.hash => "https://www.bitgo.com/api/v2/btc/tx/fee"
case _ => "https://test.bitgo.com/api/v2/tbtc/tx/fee"
case Block.LivenetGenesisBlock.hash => uri"https://www.bitgo.com/api/v2/btc/tx/fee"
case _ => uri"https://test.bitgo.com/api/v2/tbtc/tx/fee"
}
override def getFeerates: Future[FeeratesPerKB] =
for {
json <- get(uri)
feeRanges = parseFeeRanges(json)
res <- sttp.get(uri)
.response(asJson[JValue])
.send()
feeRanges = parseFeeRanges(res.unsafeBody)
} yield extractFeerates(feeRanges)
}

View file

@ -16,23 +16,32 @@
package fr.acinq.eclair.blockchain.fee
import akka.actor.ActorSystem
import fr.acinq.eclair.HttpHelper.get
import com.softwaremill.sttp._
import com.softwaremill.sttp.json4s._
import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JArray, JInt, JValue}
import org.json4s.jackson.Serialization
import scala.concurrent.{ExecutionContext, Future}
/**
* Created by PM on 16/11/2017.
*/
class EarnDotComFeeProvider(implicit system: ActorSystem, ec: ExecutionContext) extends FeeProvider {
class EarnDotComFeeProvider(implicit http: SttpBackend[Future, Nothing], ec: ExecutionContext) extends FeeProvider {
import EarnDotComFeeProvider._
implicit val formats = DefaultFormats
implicit val serialization = Serialization
val uri = uri"https://bitcoinfees.earn.com/api/v1/fees/list"
override def getFeerates: Future[FeeratesPerKB] =
for {
json <- get("https://bitcoinfees.earn.com/api/v1/fees/list")
feeRanges = parseFeeRanges(json)
json <- sttp.get(uri)
.response(asJson[JValue])
.send()
feeRanges = parseFeeRanges(json.unsafeBody)
} yield extractFeerates(feeRanges)
}

View file

@ -105,7 +105,7 @@ case class BITCOIN_PARENT_TX_CONFIRMED(childTx: Transaction) extends BitcoinEven
*/
sealed trait Command
final case class CMD_ADD_HTLC(amountMsat: Long, paymentHash: BinaryData, expiry: Long, onion: BinaryData = Sphinx.LAST_PACKET.serialize, upstream_opt: Option[UpdateAddHtlc] = None, commit: Boolean = false, redirected: Boolean = false) extends Command
final case class CMD_ADD_HTLC(amountMsat: Long, paymentHash: BinaryData, cltvExpiry: Long, onion: BinaryData = Sphinx.LAST_PACKET.serialize, upstream_opt: Option[UpdateAddHtlc] = None, commit: Boolean = false, redirected: Boolean = false) extends Command
final case class CMD_FULFILL_HTLC(id: Long, r: BinaryData, commit: Boolean = false) extends Command
final case class CMD_FAIL_HTLC(id: Long, reason: Either[BinaryData, FailureMessage], commit: Boolean = false) extends Command
final case class CMD_FAIL_MALFORMED_HTLC(id: Long, onionHash: BinaryData, failureCode: Int, commit: Boolean = false) extends Command

View file

@ -62,9 +62,9 @@ case class Commitments(localParams: LocalParams, remoteParams: RemoteParams,
def hasNoPendingHtlcs: Boolean = localCommit.spec.htlcs.isEmpty && remoteCommit.spec.htlcs.isEmpty && remoteNextCommitInfo.isRight
def hasTimedoutOutgoingHtlcs(blockheight: Long): Boolean =
localCommit.spec.htlcs.exists(htlc => htlc.direction == OUT && blockheight >= htlc.add.expiry) ||
remoteCommit.spec.htlcs.exists(htlc => htlc.direction == IN && blockheight >= htlc.add.expiry) ||
remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit.spec.htlcs.exists(htlc => htlc.direction == IN && blockheight >= htlc.add.expiry)).getOrElse(false)
localCommit.spec.htlcs.exists(htlc => htlc.direction == OUT && blockheight >= htlc.add.cltvExpiry) ||
remoteCommit.spec.htlcs.exists(htlc => htlc.direction == IN && blockheight >= htlc.add.cltvExpiry) ||
remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit.spec.htlcs.exists(htlc => htlc.direction == IN && blockheight >= htlc.add.cltvExpiry)).getOrElse(false)
def addLocalProposal(proposal: UpdateMessage): Commitments = Commitments.addLocalProposal(this, proposal)
@ -102,13 +102,13 @@ object Commitments {
val blockCount = Globals.blockCount.get()
// our counterparty needs a reasonable amount of time to pull the funds from downstream before we can get refunded (see BOLT 2 and BOLT 11 for a calculation and rationale)
val minExpiry = blockCount + Channel.MIN_CLTV_EXPIRY
if (cmd.expiry < minExpiry) {
return Left(ExpiryTooSmall(commitments.channelId, minimum = minExpiry, actual = cmd.expiry, blockCount = blockCount))
if (cmd.cltvExpiry < minExpiry) {
return Left(ExpiryTooSmall(commitments.channelId, minimum = minExpiry, actual = cmd.cltvExpiry, blockCount = blockCount))
}
val maxExpiry = blockCount + Channel.MAX_CLTV_EXPIRY
// we don't want to use too high a refund timeout, because our funds will be locked during that time if the payment is never fulfilled
if (cmd.expiry >= maxExpiry) {
return Left(ExpiryTooBig(commitments.channelId, maximum = maxExpiry, actual = cmd.expiry, blockCount = blockCount))
if (cmd.cltvExpiry >= maxExpiry) {
return Left(ExpiryTooBig(commitments.channelId, maximum = maxExpiry, actual = cmd.cltvExpiry, blockCount = blockCount))
}
if (cmd.amountMsat < commitments.remoteParams.htlcMinimumMsat) {
@ -116,7 +116,7 @@ object Commitments {
}
// let's compute the current commitment *as seen by them* with this change taken into account
val add = UpdateAddHtlc(commitments.channelId, commitments.localNextHtlcId, cmd.amountMsat, cmd.paymentHash, cmd.expiry, cmd.onion)
val add = UpdateAddHtlc(commitments.channelId, commitments.localNextHtlcId, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry, cmd.onion)
// we increment the local htlc index and add an entry to the origins map
val commitments1 = addLocalProposal(commitments, add).copy(localNextHtlcId = commitments.localNextHtlcId + 1, originChannels = commitments.originChannels + (add.id -> origin))
// we need to base the next current commitment on the last sig we sent, even if we didn't yet receive their revocation
@ -540,17 +540,17 @@ object Commitments {
| toLocal: ${commitments.localCommit.spec.toLocalMsat}
| toRemote: ${commitments.localCommit.spec.toRemoteMsat}
| htlcs:
|${commitments.localCommit.spec.htlcs.map(h => s" ${h.direction} ${h.add.id} ${h.add.expiry}").mkString("\n")}
|${commitments.localCommit.spec.htlcs.map(h => s" ${h.direction} ${h.add.id} ${h.add.cltvExpiry}").mkString("\n")}
|remotecommit:
| toLocal: ${commitments.remoteCommit.spec.toLocalMsat}
| toRemote: ${commitments.remoteCommit.spec.toRemoteMsat}
| htlcs:
|${commitments.remoteCommit.spec.htlcs.map(h => s" ${h.direction} ${h.add.id} ${h.add.expiry}").mkString("\n")}
|${commitments.remoteCommit.spec.htlcs.map(h => s" ${h.direction} ${h.add.id} ${h.add.cltvExpiry}").mkString("\n")}
|next remotecommit:
| toLocal: ${commitments.remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit.spec.toLocalMsat).getOrElse("N/A")}
| toRemote: ${commitments.remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit.spec.toRemoteMsat).getOrElse("N/A")}
| htlcs:
|${commitments.remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit.spec.htlcs.map(h => s" ${h.direction} ${h.add.id} ${h.add.expiry}").mkString("\n")).getOrElse("N/A")}""".stripMargin
|${commitments.remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit.spec.htlcs.map(h => s" ${h.direction} ${h.add.id} ${h.add.cltvExpiry}").mkString("\n")).getOrElse("N/A")}""".stripMargin
}
}

View file

@ -27,12 +27,12 @@ import fr.acinq.bitcoin.{BinaryData, DeterministicWallet, MilliSatoshi, Protocol
import fr.acinq.eclair.blockchain.EclairWallet
import fr.acinq.eclair.channel._
import fr.acinq.eclair.crypto.TransportHandler
import fr.acinq.eclair.crypto.TransportHandler
import fr.acinq.eclair.router._
import fr.acinq.eclair.wire._
import fr.acinq.eclair.{wire, _}
import scodec.Attempt
import scala.compat.Platform
import scala.concurrent.duration._
import scala.util.Random
@ -73,42 +73,43 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
stay using d.copy(attempts = attempts + 1)
}
case Event(Authenticator.Authenticated(_, transport, remoteNodeId, address, outgoing, origin_opt), DisconnectedData(_, channels, _, init)) =>
case Event(Authenticator.Authenticated(_, transport, remoteNodeId1, address, outgoing, origin_opt), d: DisconnectedData) =>
require(remoteNodeId == remoteNodeId1, s"invalid nodeid: $remoteNodeId != $remoteNodeId1")
log.debug(s"got authenticated connection to $remoteNodeId@${address.getHostString}:${address.getPort}")
transport ! TransportHandler.Listener(self)
context watch transport
val localInit = init.getOrElse(wire.Init(globalFeatures = nodeParams.globalFeatures, localFeatures = nodeParams.localFeatures))
val localInit = d.localInit.getOrElse(wire.Init(globalFeatures = nodeParams.globalFeatures, localFeatures = nodeParams.localFeatures))
transport ! localInit
// we store the ip upon successful outgoing connection, keeping only the most recent one
if (outgoing) {
nodeParams.peersDb.addOrUpdatePeer(remoteNodeId, address)
}
goto(INITIALIZING) using InitializingData(if (outgoing) Some(address) else None, transport, channels, origin_opt, localInit)
goto(INITIALIZING) using InitializingData(if (outgoing) Some(address) else None, transport, d.channels, origin_opt, localInit)
case Event(Terminated(actor), d@DisconnectedData(_, channels, _, _)) if channels.exists(_._2 == actor) =>
val h = channels.filter(_._2 == actor).map(_._1)
case Event(Terminated(actor), d: DisconnectedData) if d.channels.exists(_._2 == actor) =>
val h = d.channels.filter(_._2 == actor).keys
log.info(s"channel closed: channelId=${h.mkString("/")}")
stay using d.copy(channels = channels -- h)
stay using d.copy(channels = d.channels -- h)
case Event(_: wire.LightningMessage, _) => stay // we probably just got disconnected and that's the last messages we received
}
when(INITIALIZING) {
case Event(remoteInit: wire.Init, InitializingData(address_opt, transport, channels, origin_opt, localInit)) =>
transport ! TransportHandler.ReadAck(remoteInit)
case Event(remoteInit: wire.Init, d: InitializingData) =>
d.transport ! TransportHandler.ReadAck(remoteInit)
val remoteHasInitialRoutingSync = Features.hasFeature(remoteInit.localFeatures, Features.INITIAL_ROUTING_SYNC_BIT_OPTIONAL)
val remoteHasChannelRangeQueriesOptional = Features.hasFeature(remoteInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_BIT_OPTIONAL)
val remoteHasChannelRangeQueriesMandatory = Features.hasFeature(remoteInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_BIT_MANDATORY)
val remoteHasChannelRangeQueriesExOptional = Features.hasFeature(remoteInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_EX_BIT_OPTIONAL)
val remoteHasChannelRangeQueriesExMandatory = Features.hasFeature(remoteInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_EX_BIT_MANDATORY)
val localHasChannelRangeQueriesOptional = Features.hasFeature(localInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_BIT_OPTIONAL)
val localHasChannelRangeQueriesMandatory = Features.hasFeature(localInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_BIT_MANDATORY)
val localHasChannelRangeQueriesExOptional = Features.hasFeature(localInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_EX_BIT_OPTIONAL)
val localHasChannelRangeQueriesExMandatory = Features.hasFeature(localInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_EX_BIT_MANDATORY)
val localHasChannelRangeQueriesOptional = Features.hasFeature(d.localInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_BIT_OPTIONAL)
val localHasChannelRangeQueriesMandatory = Features.hasFeature(d.localInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_BIT_MANDATORY)
val localHasChannelRangeQueriesExOptional = Features.hasFeature(d.localInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_EX_BIT_OPTIONAL)
val localHasChannelRangeQueriesExMandatory = Features.hasFeature(d.localInit.localFeatures, Features.CHANNEL_RANGE_QUERIES_EX_BIT_MANDATORY)
log.info(s"$remoteNodeId has features: initialRoutingSync=$remoteHasInitialRoutingSync channelRangeQueriesOptional=$remoteHasChannelRangeQueriesOptional channelRangeQueriesMandatory=$remoteHasChannelRangeQueriesMandatory")
if (Features.areSupported(remoteInit.localFeatures)) {
origin_opt.map(origin => origin ! "connected")
d.origin_opt.foreach(origin => origin ! "connected")
if (remoteHasInitialRoutingSync) {
if (remoteHasChannelRangeQueriesExOptional || remoteHasChannelRangeQueriesExMandatory) {
@ -125,25 +126,25 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
// TODO: this is a hack for the Android version: don't send queries if we advertise that we don't support them
if ((localHasChannelRangeQueriesExOptional || localHasChannelRangeQueriesExMandatory) && (remoteHasChannelRangeQueriesExOptional || remoteHasChannelRangeQueriesExMandatory)) {
// if they support extended channel queries, always ask for their filter
router ! SendChannelQueryEx(remoteNodeId, transport)
router ! SendChannelQueryEx(remoteNodeId, d.transport)
} else if ((localHasChannelRangeQueriesOptional || localHasChannelRangeQueriesMandatory) && (remoteHasChannelRangeQueriesOptional || remoteHasChannelRangeQueriesMandatory)) {
// if they support channel queries, always ask for their filter
router ! SendChannelQuery(remoteNodeId, transport)
router ! SendChannelQuery(remoteNodeId, d.transport)
}
// let's bring existing/requested channels online
channels.values.toSet[ActorRef].foreach(_ ! INPUT_RECONNECTED(transport)) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
goto(CONNECTED) using ConnectedData(address_opt, transport, remoteInit, channels.map { case (k: ChannelId, v) => (k, v) }, localInit = localInit)
d.channels.values.toSet[ActorRef].foreach(_ ! INPUT_RECONNECTED(d.transport)) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
goto(CONNECTED) using ConnectedData(d.address_opt, d.transport, d.localInit, remoteInit, d.channels.map { case (k: ChannelId, v) => (k, v) })
} else {
log.warning(s"incompatible features, disconnecting")
origin_opt.map(origin => origin ! Status.Failure(new RuntimeException("incompatible features")))
transport ! PoisonPill
d.origin_opt.foreach(origin => origin ! Status.Failure(new RuntimeException("incompatible features")))
d.transport ! PoisonPill
stay
}
case Event(Authenticator.Authenticated(connection, _, _, _, _, origin_opt), _) =>
// two connections in parallel
origin_opt.map(origin => origin ! Status.Failure(new RuntimeException("there is another connection attempt in progress")))
origin_opt.foreach(origin => origin ! Status.Failure(new RuntimeException("there is another connection attempt in progress")))
// we kill this one
log.warning(s"killing parallel connection $connection")
connection ! PoisonPill
@ -155,105 +156,122 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
context.system.scheduler.scheduleOnce(100 milliseconds, self, o)
stay
case Event(Terminated(actor), InitializingData(address_opt, transport, channels, _, _)) if actor == transport =>
case Event(Terminated(actor), d: InitializingData) if actor == d.transport =>
log.warning(s"lost connection to $remoteNodeId")
goto(DISCONNECTED) using DisconnectedData(address_opt, channels)
goto(DISCONNECTED) using DisconnectedData(d.address_opt, d.channels)
case Event(Terminated(actor), d@InitializingData(_, _, channels, _, _)) if channels.exists(_._2 == actor) =>
val h = channels.filter(_._2 == actor).map(_._1)
case Event(Terminated(actor), d: InitializingData) if d.channels.exists(_._2 == actor) =>
val h = d.channels.filter(_._2 == actor).keys
log.info(s"channel closed: channelId=${h.mkString("/")}")
stay using d.copy(channels = channels -- h)
stay using d.copy(channels = d.channels -- h)
}
when(CONNECTED, stateTimeout = nodeParams.pingInterval) {
case Event(StateTimeout, ConnectedData(_, transport, _, _, _, _ ,_)) =>
when(CONNECTED) {
case Event(SendPing, d: ConnectedData) =>
// no need to use secure random here
val pingSize = Random.nextInt(1000)
val pongSize = Random.nextInt(1000)
transport ! wire.Ping(pongSize, BinaryData("00" * pingSize))
val ping = wire.Ping(pongSize, BinaryData("00" * pingSize))
setTimer(PingTimeout.toString, PingTimeout(ping), 10 seconds, repeat = false)
d.transport ! ping
stay using d.copy(expectedPong_opt = Some(ExpectedPong(ping)))
case Event(PingTimeout(ping), d: ConnectedData) =>
log.warning(s"no response to ping=$ping, closing connection")
d.transport ! PoisonPill
stay
case Event(ping@wire.Ping(pongLength, _), ConnectedData(_, transport, _, _, _, _ ,_)) =>
transport ! TransportHandler.ReadAck(ping)
// TODO: (optional) check against the expected data size tat we requested when we sent ping messages
if (pongLength > 0) {
transport ! wire.Pong(BinaryData("00" * pongLength))
case Event(ping@wire.Ping(pongLength, _), d: ConnectedData) =>
d.transport ! TransportHandler.ReadAck(ping)
if (pongLength <= 65532) {
// see BOLT 1: we reply only if requested pong length is acceptable
d.transport ! wire.Pong(BinaryData(Seq.fill[Byte](pongLength)(0.toByte)))
} else {
log.warning(s"ignoring invalid ping with pongLength=${ping.pongLength}")
}
stay
case Event(pong@wire.Pong(data), ConnectedData(_, transport, _, _, _, _ ,_)) =>
transport ! TransportHandler.ReadAck(pong)
// TODO: compute latency for remote peer ?
log.debug(s"received pong with ${data.length} bytes")
stay
case Event(pong@wire.Pong(data), d: ConnectedData) =>
d.transport ! TransportHandler.ReadAck(pong)
d.expectedPong_opt match {
case Some(ExpectedPong(ping, timestamp)) if ping.pongLength == data.length =>
// we use the pong size to correlate between pings and pongs
val latency = Platform.currentTime - timestamp
log.debug(s"received pong with latency=$latency")
cancelTimer(PingTimeout.toString())
schedulePing()
case None =>
log.debug(s"received unexpected pong with size=${data.length}")
}
stay using d.copy(expectedPong_opt = None)
case Event(err@wire.Error(channelId, reason), ConnectedData(_, transport, _, channels, _, _ ,_)) if channelId == CHANNELID_ZERO =>
transport ! TransportHandler.ReadAck(err)
case Event(err@wire.Error(channelId, reason), d: ConnectedData) if channelId == CHANNELID_ZERO =>
d.transport ! TransportHandler.ReadAck(err)
log.error(s"connection-level error, failing all channels! reason=${new String(reason)}")
channels.values.toSet[ActorRef].foreach(_ forward err) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
transport ! PoisonPill
d.channels.values.toSet[ActorRef].foreach(_ forward err) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
d.transport ! PoisonPill
stay
case Event(err: wire.Error, ConnectedData(_, transport, _, channels, _, _ ,_)) =>
transport ! TransportHandler.ReadAck(err)
case Event(err: wire.Error, d: ConnectedData) =>
d.transport ! TransportHandler.ReadAck(err)
// error messages are a bit special because they can contain either temporaryChannelId or channelId (see BOLT 1)
channels.get(FinalChannelId(err.channelId)).orElse(channels.get(TemporaryChannelId(err.channelId))) match {
d.channels.get(FinalChannelId(err.channelId)).orElse(d.channels.get(TemporaryChannelId(err.channelId))) match {
case Some(channel) => channel forward err
case None => transport ! wire.Error(err.channelId, UNKNOWN_CHANNEL_MESSAGE)
case None => d.transport ! wire.Error(err.channelId, UNKNOWN_CHANNEL_MESSAGE)
}
stay
case Event(c: Peer.OpenChannel, d@ConnectedData(_, transport, remoteInit, channels, _, _ ,_)) =>
case Event(c: Peer.OpenChannel, d: ConnectedData) =>
log.info(s"requesting a new channel to $remoteNodeId with fundingSatoshis=${c.fundingSatoshis}, pushMsat=${c.pushMsat} and fundingFeeratePerByte=${c.fundingTxFeeratePerKw_opt}")
val (channel, localParams) = createNewChannel(nodeParams, funder = true, c.fundingSatoshis.toLong, origin_opt = Some(sender))
val temporaryChannelId = randomBytes(32)
val channelFeeratePerKw = Globals.feeratesPerKw.get.blocks_2
val fundingTxFeeratePerKw = c.fundingTxFeeratePerKw_opt.getOrElse(Globals.feeratesPerKw.get.blocks_6)
channel ! INPUT_INIT_FUNDER(temporaryChannelId, c.fundingSatoshis.amount, c.pushMsat.amount, channelFeeratePerKw, fundingTxFeeratePerKw, localParams, transport, remoteInit, c.channelFlags.getOrElse(nodeParams.channelFlags))
stay using d.copy(channels = channels + (TemporaryChannelId(temporaryChannelId) -> channel))
channel ! INPUT_INIT_FUNDER(temporaryChannelId, c.fundingSatoshis.amount, c.pushMsat.amount, channelFeeratePerKw, fundingTxFeeratePerKw, localParams, d.transport, d.remoteInit, c.channelFlags.getOrElse(nodeParams.channelFlags))
stay using d.copy(channels = d.channels + (TemporaryChannelId(temporaryChannelId) -> channel))
case Event(msg: wire.OpenChannel, d@ConnectedData(_, transport, remoteInit, channels, _, _ ,_)) =>
transport ! TransportHandler.ReadAck(msg)
channels.get(TemporaryChannelId(msg.temporaryChannelId)) match {
case Event(msg: wire.OpenChannel, d: ConnectedData) =>
d.transport ! TransportHandler.ReadAck(msg)
d.channels.get(TemporaryChannelId(msg.temporaryChannelId)) match {
case None =>
log.info(s"accepting a new channel to $remoteNodeId")
val (channel, localParams) = createNewChannel(nodeParams, funder = false, fundingSatoshis = msg.fundingSatoshis, origin_opt = None)
val temporaryChannelId = msg.temporaryChannelId
channel ! INPUT_INIT_FUNDEE(temporaryChannelId, localParams, transport, remoteInit)
channel ! INPUT_INIT_FUNDEE(temporaryChannelId, localParams, d.transport, d.remoteInit)
channel ! msg
stay using d.copy(channels = channels + (TemporaryChannelId(temporaryChannelId) -> channel))
stay using d.copy(channels = d.channels + (TemporaryChannelId(temporaryChannelId) -> channel))
case Some(_) =>
log.warning(s"ignoring open_channel with duplicate temporaryChannelId=${msg.temporaryChannelId}")
stay
}
case Event(msg: wire.HasChannelId, ConnectedData(_, transport, _, channels, _, _, _)) =>
transport ! TransportHandler.ReadAck(msg)
channels.get(FinalChannelId(msg.channelId)) match {
case Event(msg: wire.HasChannelId, d: ConnectedData) =>
d.transport ! TransportHandler.ReadAck(msg)
d.channels.get(FinalChannelId(msg.channelId)) match {
case Some(channel) => channel forward msg
case None => transport ! wire.Error(msg.channelId, UNKNOWN_CHANNEL_MESSAGE)
case None => d.transport ! wire.Error(msg.channelId, UNKNOWN_CHANNEL_MESSAGE)
}
stay
case Event(msg: wire.HasTemporaryChannelId, ConnectedData(_, transport, _, channels, _, _, _)) =>
transport ! TransportHandler.ReadAck(msg)
channels.get(TemporaryChannelId(msg.temporaryChannelId)) match {
case Event(msg: wire.HasTemporaryChannelId, d: ConnectedData) =>
d.transport ! TransportHandler.ReadAck(msg)
d.channels.get(TemporaryChannelId(msg.temporaryChannelId)) match {
case Some(channel) => channel forward msg
case None => transport ! wire.Error(msg.temporaryChannelId, UNKNOWN_CHANNEL_MESSAGE)
case None => d.transport ! wire.Error(msg.temporaryChannelId, UNKNOWN_CHANNEL_MESSAGE)
}
stay
case Event(ChannelIdAssigned(channel, _, temporaryChannelId, channelId), d@ConnectedData(_, _, _, channels, _, _, _)) if channels.contains(TemporaryChannelId(temporaryChannelId)) =>
case Event(ChannelIdAssigned(channel, _, temporaryChannelId, channelId), d: ConnectedData) if d.channels.contains(TemporaryChannelId(temporaryChannelId)) =>
log.info(s"channel id switch: previousId=$temporaryChannelId nextId=$channelId")
// NB: we keep the temporary channel id because the switch is not always acknowledged at this point (see https://github.com/lightningnetwork/lightning-rfc/pull/151)
// we won't clean it up, but we won't remember the temporary id on channel termination
stay using d.copy(channels = channels + (FinalChannelId(channelId) -> channel))
stay using d.copy(channels = d.channels + (FinalChannelId(channelId) -> channel))
case Event(RoutingState(channels, updates, nodes), ConnectedData(_, transport, _, _, _, _, _)) =>
case Event(RoutingState(channels, updates, nodes), d: ConnectedData) =>
// let's send the messages
def send(announcements: Iterable[_ <: LightningMessage]) = announcements.foldLeft(0) {
case (c, ann) =>
transport ! ann
d.transport ! ann
c + 1
}
@ -264,7 +282,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
log.info(s"sent all announcements to {}: channels={} updates={} nodes={}", remoteNodeId, channelsSent, updatesSent, nodesSent)
stay
case Event(rebroadcast: Rebroadcast, ConnectedData(_, transport, _, _, maybeGossipTimestampFilter, _, _)) =>
case Event(rebroadcast: Rebroadcast, d: ConnectedData) =>
/**
* Send and count in a single iteration
@ -273,11 +291,11 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
case (count, (_, origins)) if origins.contains(self) =>
// the announcement came from this peer, we don't send it back
count
case (count, (msg: HasTimestamp, _)) if !timestampInRange(msg, maybeGossipTimestampFilter) =>
case (count, (msg: HasTimestamp, _)) if !timestampInRange(msg, d.gossipTimestampFilter) =>
// the peer has set up a filter on timestamp and this message is out of range
count
case (count, (msg, _)) =>
transport ! msg
d.transport ! msg
count + 1
}
@ -290,7 +308,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
}
stay
case Event(msg: GossipTimestampFilter, data: ConnectedData) =>
case Event(msg: GossipTimestampFilter, d: ConnectedData) =>
// special case: time range filters are peer specific and must not be sent to the router
sender ! TransportHandler.ReadAck(msg)
if (msg.chainHash != nodeParams.chainHash) {
@ -299,26 +317,26 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
} else {
log.info(s"setting up gossipTimestampFilter=$msg")
// update their timestamp filter
stay using data.copy(gossipTimestampFilter = Some(msg))
stay using d.copy(gossipTimestampFilter = Some(msg))
}
case Event(msg: wire.RoutingMessage, ConnectedData(_, transport, _, _, _, _, behavior)) =>
case Event(msg: wire.RoutingMessage, d: ConnectedData) =>
msg match {
case _: ChannelAnnouncement | _: ChannelUpdate | _: NodeAnnouncement if behavior.ignoreNetworkAnnouncement =>
case _: ChannelAnnouncement | _: ChannelUpdate | _: NodeAnnouncement if d.behavior.ignoreNetworkAnnouncement =>
// this peer is currently under embargo!
sender ! TransportHandler.ReadAck(msg)
case _ =>
// Note: we don't ack messages here because we don't want them to be stacked in the router's mailbox
router ! PeerRoutingMessage(transport, remoteNodeId, msg)
router ! PeerRoutingMessage(d.transport, remoteNodeId, msg)
}
stay
case Event(readAck: TransportHandler.ReadAck, c: ConnectedData) =>
case Event(readAck: TransportHandler.ReadAck, d: ConnectedData) =>
// we just forward acks from router to transport
c.transport forward readAck
d.transport forward readAck
stay
case Event(badMessage: BadMessage, data@ConnectedData(_, transport, _, _, _, _, behavior)) =>
case Event(badMessage: BadMessage, d: ConnectedData) =>
val behavior1 = badMessage match {
case InvalidSignature(r) =>
val bin: String = LightningMessageCodecs.lightningMessageCodec.encode(r) match {
@ -327,66 +345,64 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
}
log.error(s"peer sent us a routing message with invalid sig: r=$r bin=$bin")
// for now we just return an error, maybe ban the peer in the future?
transport ! Error(CHANNELID_ZERO, s"bad announcement sig! bin=$bin".getBytes())
behavior
d.transport ! Error(CHANNELID_ZERO, s"bad announcement sig! bin=$bin".getBytes())
d.behavior
case ChannelClosed(_) =>
if (behavior.ignoreNetworkAnnouncement) {
if (d.behavior.ignoreNetworkAnnouncement) {
// we already are ignoring announcements, we may have additional notifications for announcements that were received right before our ban
behavior.copy(fundingTxAlreadySpentCount = behavior.fundingTxAlreadySpentCount + 1)
} else if (behavior.fundingTxAlreadySpentCount < MAX_FUNDING_TX_ALREADY_SPENT) {
behavior.copy(fundingTxAlreadySpentCount = behavior.fundingTxAlreadySpentCount + 1)
d.behavior.copy(fundingTxAlreadySpentCount = d.behavior.fundingTxAlreadySpentCount + 1)
} else if (d.behavior.fundingTxAlreadySpentCount < MAX_FUNDING_TX_ALREADY_SPENT) {
d.behavior.copy(fundingTxAlreadySpentCount = d.behavior.fundingTxAlreadySpentCount + 1)
} else {
log.warning(s"peer sent us too many channel announcements with funding tx already spent (count=${behavior.fundingTxAlreadySpentCount + 1}), ignoring network announcements for $IGNORE_NETWORK_ANNOUNCEMENTS_PERIOD")
import scala.concurrent.ExecutionContext.Implicits.global
context.system.scheduler.scheduleOnce(IGNORE_NETWORK_ANNOUNCEMENTS_PERIOD, self, ResumeAnnouncements)
behavior.copy(fundingTxAlreadySpentCount = behavior.fundingTxAlreadySpentCount + 1, ignoreNetworkAnnouncement = true)
log.warning(s"peer sent us too many channel announcements with funding tx already spent (count=${d.behavior.fundingTxAlreadySpentCount + 1}), ignoring network announcements for $IGNORE_NETWORK_ANNOUNCEMENTS_PERIOD")
setTimer(ResumeAnnouncements.toString, ResumeAnnouncements, IGNORE_NETWORK_ANNOUNCEMENTS_PERIOD, repeat = false)
d.behavior.copy(fundingTxAlreadySpentCount = d.behavior.fundingTxAlreadySpentCount + 1, ignoreNetworkAnnouncement = true)
}
case NonexistingChannel(_) =>
// this should never happen, unless we are not in sync or there is a 6+ blocks reorg
if (behavior.ignoreNetworkAnnouncement) {
if (d.behavior.ignoreNetworkAnnouncement) {
// we already are ignoring announcements, we may have additional notifications for announcements that were received right before our ban
behavior.copy(fundingTxNotFoundCount = behavior.fundingTxNotFoundCount + 1)
} else if (behavior.fundingTxNotFoundCount < MAX_FUNDING_TX_NOT_FOUND) {
behavior.copy(fundingTxNotFoundCount = behavior.fundingTxNotFoundCount + 1)
d.behavior.copy(fundingTxNotFoundCount = d.behavior.fundingTxNotFoundCount + 1)
} else if (d.behavior.fundingTxNotFoundCount < MAX_FUNDING_TX_NOT_FOUND) {
d.behavior.copy(fundingTxNotFoundCount = d.behavior.fundingTxNotFoundCount + 1)
} else {
log.warning(s"peer sent us too many channel announcements with non-existing funding tx (count=${behavior.fundingTxNotFoundCount + 1}), ignoring network announcements for $IGNORE_NETWORK_ANNOUNCEMENTS_PERIOD")
import scala.concurrent.ExecutionContext.Implicits.global
context.system.scheduler.scheduleOnce(IGNORE_NETWORK_ANNOUNCEMENTS_PERIOD, self, ResumeAnnouncements)
behavior.copy(fundingTxNotFoundCount = behavior.fundingTxNotFoundCount + 1, ignoreNetworkAnnouncement = true)
log.warning(s"peer sent us too many channel announcements with non-existing funding tx (count=${d.behavior.fundingTxNotFoundCount + 1}), ignoring network announcements for $IGNORE_NETWORK_ANNOUNCEMENTS_PERIOD")
setTimer(ResumeAnnouncements.toString, ResumeAnnouncements, IGNORE_NETWORK_ANNOUNCEMENTS_PERIOD, repeat = false)
d.behavior.copy(fundingTxNotFoundCount = d.behavior.fundingTxNotFoundCount + 1, ignoreNetworkAnnouncement = true)
}
}
stay using data.copy(behavior = behavior1)
stay using d.copy(behavior = behavior1)
case Event(ResumeAnnouncements, data: ConnectedData) =>
case Event(ResumeAnnouncements, d: ConnectedData) =>
log.info(s"resuming processing of network announcements for peer")
stay using data.copy(behavior = data.behavior.copy(fundingTxAlreadySpentCount = 0, ignoreNetworkAnnouncement = false))
stay using d.copy(behavior = d.behavior.copy(fundingTxAlreadySpentCount = 0, ignoreNetworkAnnouncement = false))
case Event(Disconnect, c:ConnectedData) =>
c.transport ! PoisonPill
case Event(Disconnect, d: ConnectedData) =>
d.transport ! PoisonPill
stay
case Event(Terminated(actor), ConnectedData(address_opt, transport, _, channels, _, _, _)) if actor == transport =>
case Event(Terminated(actor), d: ConnectedData) if actor == d.transport =>
log.info(s"lost connection to $remoteNodeId")
channels.values.toSet[ActorRef].foreach(_ ! INPUT_DISCONNECTED) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
goto(DISCONNECTED) using DisconnectedData(address_opt, channels.collect { case (k: FinalChannelId, v) => (k, v) })
d.channels.values.toSet[ActorRef].foreach(_ ! INPUT_DISCONNECTED) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
goto(DISCONNECTED) using DisconnectedData(d.address_opt, d.channels.collect { case (k: FinalChannelId, v) => (k, v) })
case Event(Terminated(actor), d@ConnectedData(_, transport, _, channels, _, _, _)) if channels.values.toSet.contains(actor) =>
case Event(Terminated(actor), d: ConnectedData) if d.channels.values.toSet.contains(actor) =>
// we will have at most 2 ids: a TemporaryChannelId and a FinalChannelId
val channelIds = channels.filter(_._2 == actor).keys
val channelIds = d.channels.filter(_._2 == actor).keys
log.info(s"channel closed: channelId=${channelIds.mkString("/")}")
if (channels.values.toSet - actor == Set.empty) {
if (d.channels.values.toSet - actor == Set.empty) {
log.info(s"that was the last open channel, closing the connection")
transport ! PoisonPill
d.transport ! PoisonPill
}
stay using d.copy(channels = channels -- channelIds)
stay using d.copy(channels = d.channels -- channelIds)
case Event(h: Authenticator.Authenticated, ConnectedData(address_opt, oldTransport, _, channels, _, _, _)) =>
case Event(h: Authenticator.Authenticated, d: ConnectedData) =>
log.info(s"got new transport while already connected, switching to new transport")
context unwatch oldTransport
oldTransport ! PoisonPill
channels.values.toSet[ActorRef].foreach(_ ! INPUT_DISCONNECTED) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
context unwatch d.transport
d.transport ! PoisonPill
d.channels.values.toSet[ActorRef].foreach(_ ! INPUT_DISCONNECTED) // we deduplicate with toSet because there might be two entries per channel (tmp id and final id)
self ! h
goto(DISCONNECTED) using DisconnectedData(address_opt, channels.collect { case (k: FinalChannelId, v) => (k, v) })
goto(DISCONNECTED) using DisconnectedData(d.address_opt, d.channels.collect { case (k: FinalChannelId, v) => (k, v) })
}
whenUnhandled {
@ -403,11 +419,16 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
stay
case Event(_: TransportHandler.ReadAck, _) => stay // ignored
case Event(SendPing, _) => stay // we got disconnected in the meantime
case Event(_: Pong, _) => stay // we got disconnected before receiving the pong
}
onTransition {
case _ -> DISCONNECTED if nodeParams.autoReconnect && nextStateData.address_opt.isDefined => setTimer(RECONNECT_TIMER, Reconnect, 1 second, repeat = false)
case DISCONNECTED -> _ if nodeParams.autoReconnect && stateData.address_opt.isDefined => cancelTimer(RECONNECT_TIMER)
case _ -> CONNECTED => schedulePing()
}
def createNewChannel(nodeParams: NodeParams, funder: Boolean, fundingSatoshis: Long, origin_opt: Option[ActorRef]): (ActorRef, LocalParams) = {
@ -430,6 +451,12 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
override def mdc(currentMessage: Any): MDC = Logs.mdc(remoteNodeId_opt = Some(remoteNodeId))
def schedulePing(): Unit = {
// pings are periodically with some randomization
val nextDelay = nodeParams.pingInterval + secureRandom.nextInt(10).seconds
setTimer(SendPing.toString, SendPing, nextDelay, repeat = false)
}
}
object Peer {
@ -461,7 +488,9 @@ object Peer {
case class Nothing() extends Data { override def address_opt = None; override def channels = Map.empty }
case class DisconnectedData(address_opt: Option[InetSocketAddress], channels: Map[FinalChannelId, ActorRef], attempts: Int = 0, localInit: Option[wire.Init] = None) extends Data
case class InitializingData(address_opt: Option[InetSocketAddress], transport: ActorRef, channels: Map[FinalChannelId, ActorRef], origin_opt: Option[ActorRef], localInit: wire.Init) extends Data
case class ConnectedData(address_opt: Option[InetSocketAddress], transport: ActorRef, remoteInit: wire.Init, channels: Map[ChannelId, ActorRef], gossipTimestampFilter: Option[GossipTimestampFilter] = None, localInit: wire.Init, behavior: Behavior = Behavior()) extends Data
case class ConnectedData(address_opt: Option[InetSocketAddress], transport: ActorRef, localInit: wire.Init, remoteInit: wire.Init, channels: Map[ChannelId, ActorRef], gossipTimestampFilter: Option[GossipTimestampFilter] = None, behavior: Behavior = Behavior(), expectedPong_opt: Option[ExpectedPong] = None) extends Data
case class ExpectedPong(ping: Ping, timestamp: Long = Platform.currentTime)
case class PingTimeout(ping: Ping)
sealed trait State
case object INSTANTIATING extends State
@ -482,6 +511,7 @@ object Peer {
require(fundingTxFeeratePerKw_opt.getOrElse(0L) >= 0, s"funding tx feerate must be positive")
}
case object GetPeerInfo
case object SendPing
case class PeerInfo(nodeId: PublicKey, state: String, address: Option[InetSocketAddress], channels: Int)
case class PeerRoutingMessage(transport: ActorRef, remoteNodeId: PublicKey, message: RoutingMessage)

View file

@ -30,23 +30,25 @@ import scala.concurrent.duration._
import scala.util.Try
/**
* Simple payment handler that generates payment requests and fulfills incoming htlcs.
*
* Note that unfulfilled payment requests are kept forever if they don't have an expiry!
*
* Created by PM on 17/06/2016.
*/
class LocalPaymentHandler(nodeParams: NodeParams) extends Actor with ActorLogging {
implicit val ec: ExecutionContext = context.system.dispatcher
import LocalPaymentHandler._
context.system.scheduler.schedule(10 minutes, 10 minutes)(self ! Platform.currentTime / 1000)
implicit val ec: ExecutionContext = context.system.dispatcher
context.system.scheduler.schedule(10 minutes, 10 minutes)(self ! PurgeExpiredRequests)
override def receive: Receive = run(Map.empty)
def run(hash2preimage: Map[BinaryData, (BinaryData, PaymentRequest)]): Receive = {
def run(hash2preimage: Map[BinaryData, PendingPaymentRequest]): Receive = {
case currentSeconds: Long =>
context.become(run(hash2preimage.collect {
case e@(_, (_, pr)) if pr.expiry.isEmpty => e // requests that don't expire are kept forever
case e@(_, (_, pr)) if pr.timestamp + pr.expiry.get > currentSeconds => e // clean up expired requests
}))
case PurgeExpiredRequests =>
context.become(run(hash2preimage.filterNot { case (_, pr) => hasExpired(pr) }))
case ReceivePayment(amount_opt, desc, expirySeconds_opt, extraHops) =>
Try {
@ -59,7 +61,7 @@ class LocalPaymentHandler(nodeParams: NodeParams) extends Actor with ActorLoggin
val paymentRequest = PaymentRequest(nodeParams.chainHash, amount_opt, paymentHash, nodeParams.privateKey, desc, fallbackAddress = None, expirySeconds = Some(expirySeconds), extraHops = extraHops)
log.debug(s"generated payment request=${PaymentRequest.write(paymentRequest)} from amount=$amount_opt")
sender ! paymentRequest
context.become(run(hash2preimage + (paymentHash -> (paymentPreimage, paymentRequest))))
context.become(run(hash2preimage + (paymentHash -> PendingPaymentRequest(paymentPreimage, paymentRequest))))
} recover { case t => sender ! Status.Failure(t) }
case CheckPayment(paymentHash) =>
@ -69,14 +71,17 @@ class LocalPaymentHandler(nodeParams: NodeParams) extends Actor with ActorLoggin
}
case htlc: UpdateAddHtlc =>
hash2preimage.get(htlc.paymentHash) match {
case Some((paymentPreimage, paymentRequest)) =>
hash2preimage
.get(htlc.paymentHash) // we retrieve the request
.filterNot(hasExpired) // and filter it out if it is expired (it will be purged independently)
match {
case Some(PendingPaymentRequest(paymentPreimage, paymentRequest)) =>
val minFinalExpiry = Globals.blockCount.get() + paymentRequest.minFinalCltvExpiry.getOrElse(Channel.MIN_CLTV_EXPIRY)
// The htlc amount must be equal or greater than the requested amount. A slight overpaying is permitted, however
// it must not be greater than two times the requested amount.
// see https://github.com/lightningnetwork/lightning-rfc/blob/master/04-onion-routing.md#failure-messages
paymentRequest.amount match {
case _ if htlc.expiry < minFinalExpiry =>
case _ if htlc.cltvExpiry < minFinalExpiry =>
sender ! CMD_FAIL_HTLC(htlc.id, Right(FinalExpiryTooSoon), commit = true)
case Some(amount) if MilliSatoshi(htlc.amountMsat) < amount =>
log.warning(s"received payment with amount too small for paymentHash=${htlc.paymentHash} amountMsat=${htlc.amountMsat}")
@ -95,9 +100,25 @@ class LocalPaymentHandler(nodeParams: NodeParams) extends Actor with ActorLoggin
case None =>
sender ! CMD_FAIL_HTLC(htlc.id, Right(UnknownPaymentHash), commit = true)
}
case 'requests =>
// this is just for testing
sender ! hash2preimage
}
}
object LocalPaymentHandler {
def props(nodeParams: NodeParams): Props = Props(new LocalPaymentHandler(nodeParams))
case object PurgeExpiredRequests
case class PendingPaymentRequest(preimage: BinaryData, paymentRequest: PaymentRequest)
def hasExpired(pr: PendingPaymentRequest): Boolean = pr.paymentRequest.expiry match {
case Some(expiry) => pr.paymentRequest.timestamp + expiry <= Platform.currentTime / 1000
case None => false // this request will never expire
}
}

View file

@ -200,7 +200,7 @@ object Relayer {
case class FinalPayload(add: UpdateAddHtlc, payload: PerHopPayload) extends NextPayload
case class RelayPayload(add: UpdateAddHtlc, payload: PerHopPayload, nextPacket: Sphinx.Packet) extends NextPayload {
val relayFeeSatoshi = add.amountMsat - payload.amtToForward
val expiryDelta = add.expiry - payload.outgoingCltvValue
val expiryDelta = add.cltvExpiry - payload.outgoingCltvValue
}
// @formatter:on
@ -240,8 +240,8 @@ object Relayer {
finalPayload.payload match {
case PerHopPayload(_, finalAmountToForward, _) if finalAmountToForward > add.amountMsat =>
Left(CMD_FAIL_HTLC(add.id, Right(FinalIncorrectHtlcAmount(add.amountMsat)), commit = true))
case PerHopPayload(_, _, finalOutgoingCltvValue) if finalOutgoingCltvValue != add.expiry =>
Left(CMD_FAIL_HTLC(add.id, Right(FinalIncorrectCltvExpiry(add.expiry)), commit = true))
case PerHopPayload(_, _, finalOutgoingCltvValue) if finalOutgoingCltvValue != add.cltvExpiry =>
Left(CMD_FAIL_HTLC(add.id, Right(FinalIncorrectCltvExpiry(add.cltvExpiry)), commit = true))
case _ =>
Right(add)
}
@ -265,7 +265,7 @@ object Relayer {
case Some(channelUpdate) if payload.amtToForward < channelUpdate.htlcMinimumMsat =>
Left(CMD_FAIL_HTLC(add.id, Right(AmountBelowMinimum(add.amountMsat, channelUpdate)), commit = true))
case Some(channelUpdate) if relayPayload.expiryDelta != channelUpdate.cltvExpiryDelta =>
Left(CMD_FAIL_HTLC(add.id, Right(IncorrectCltvExpiry(add.expiry, channelUpdate)), commit = true))
Left(CMD_FAIL_HTLC(add.id, Right(IncorrectCltvExpiry(add.cltvExpiry, channelUpdate)), commit = true))
case Some(channelUpdate) if relayPayload.relayFeeSatoshi < nodeFee(channelUpdate.feeBaseMsat, channelUpdate.feeProportionalMillionths, payload.amtToForward) =>
Left(CMD_FAIL_HTLC(add.id, Right(FeeInsufficient(add.amountMsat, channelUpdate)), commit = true))
case Some(channelUpdate) =>

View file

@ -262,8 +262,8 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSMDiagnosticAct
val ignoredUpdates = getIgnoredChannelDesc(d.updates ++ d.privateUpdates ++ assistedUpdates, ignoreNodes) ++ ignoreChannels ++ d.excludedChannels
log.info(s"finding a route $start->$end with assistedChannels={} ignoreNodes={} ignoreChannels={} excludedChannels={}", assistedUpdates.keys.mkString(","), ignoreNodes.map(_.toBin).mkString(","), ignoreChannels.mkString(","), d.excludedChannels.mkString(","))
findRoute(d.graph, start, end, withEdges = assistedUpdates, withoutEdges = ignoredUpdates)
.map(r => sender ! RouteResponse(r, ignoreNodes, ignoreChannels))
.recover { case t => sender ! Status.Failure(t) }
.map(r => sender ! RouteResponse(r, ignoreNodes, ignoreChannels))
.recover { case t => sender ! Status.Failure(t) }
stay
case Event(SendChannelQuery(remoteNodeId, remote), d) =>

View file

@ -200,7 +200,7 @@ object Transactions {
val htlcOfferedOutputs = trimOfferedHtlcs(localDustLimit, spec)
.map(htlc => TxOut(MilliSatoshi(htlc.add.amountMsat), pay2wsh(htlcOffered(localHtlcPubkey, remoteHtlcPubkey, localRevocationPubkey, ripemd160(htlc.add.paymentHash)))))
val htlcReceivedOutputs = trimReceivedHtlcs(localDustLimit, spec)
.map(htlc => TxOut(MilliSatoshi(htlc.add.amountMsat), pay2wsh(htlcReceived(localHtlcPubkey, remoteHtlcPubkey, localRevocationPubkey, ripemd160(htlc.add.paymentHash), htlc.add.expiry))))
.map(htlc => TxOut(MilliSatoshi(htlc.add.amountMsat), pay2wsh(htlcReceived(localHtlcPubkey, remoteHtlcPubkey, localRevocationPubkey, ripemd160(htlc.add.paymentHash), htlc.add.cltvExpiry))))
val txnumber = obscuredCommitTxNumber(commitTxNumber, localIsFunder, localPaymentBasePoint, remotePaymentBasePoint)
val (sequence, locktime) = encodeTxNumber(txnumber)
@ -227,12 +227,12 @@ object Transactions {
version = 2,
txIn = TxIn(input.outPoint, Array.emptyByteArray, 0x00000000L) :: Nil,
txOut = TxOut(amount, pay2wsh(toLocalDelayed(localRevocationPubkey, toLocalDelay, localDelayedPaymentPubkey))) :: Nil,
lockTime = htlc.expiry))
lockTime = htlc.cltvExpiry))
}
def makeHtlcSuccessTx(commitTx: Transaction, outputsAlreadyUsed: Set[Int], localDustLimit: Satoshi, localRevocationPubkey: PublicKey, toLocalDelay: Int, localDelayedPaymentPubkey: PublicKey, localHtlcPubkey: PublicKey, remoteHtlcPubkey: PublicKey, feeratePerKw: Long, htlc: UpdateAddHtlc): HtlcSuccessTx = {
val fee = weight2fee(feeratePerKw, htlcSuccessWeight)
val redeemScript = htlcReceived(localHtlcPubkey, remoteHtlcPubkey, localRevocationPubkey, ripemd160(htlc.paymentHash), htlc.expiry)
val redeemScript = htlcReceived(localHtlcPubkey, remoteHtlcPubkey, localRevocationPubkey, ripemd160(htlc.paymentHash), htlc.cltvExpiry)
val pubkeyScript = write(pay2wsh(redeemScript))
val outputIndex = findPubKeyScriptIndex(commitTx, pubkeyScript, outputsAlreadyUsed, amount_opt = Some(Satoshi(htlc.amountMsat / 1000)))
val amount = MilliSatoshi(htlc.amountMsat) - fee
@ -286,7 +286,7 @@ object Transactions {
}
def makeClaimHtlcTimeoutTx(commitTx: Transaction, outputsAlreadyUsed: Set[Int], localDustLimit: Satoshi, localHtlcPubkey: PublicKey, remoteHtlcPubkey: PublicKey, remoteRevocationPubkey: PublicKey, localFinalScriptPubKey: BinaryData, htlc: UpdateAddHtlc, feeratePerKw: Long): ClaimHtlcTimeoutTx = {
val redeemScript = htlcReceived(remoteHtlcPubkey, localHtlcPubkey, remoteRevocationPubkey, ripemd160(htlc.paymentHash), htlc.expiry)
val redeemScript = htlcReceived(remoteHtlcPubkey, localHtlcPubkey, remoteRevocationPubkey, ripemd160(htlc.paymentHash), htlc.cltvExpiry)
val pubkeyScript = write(pay2wsh(redeemScript))
val outputIndex = findPubKeyScriptIndex(commitTx, pubkeyScript, outputsAlreadyUsed, amount_opt = Some(Satoshi(htlc.amountMsat / 1000)))
val input = InputInfo(OutPoint(commitTx, outputIndex), commitTx.txOut(outputIndex), write(redeemScript))
@ -296,7 +296,7 @@ object Transactions {
version = 2,
txIn = TxIn(input.outPoint, Array.emptyByteArray, 0x00000000L) :: Nil,
txOut = TxOut(Satoshi(0), localFinalScriptPubKey) :: Nil,
lockTime = htlc.expiry)
lockTime = htlc.cltvExpiry)
val weight = Transactions.addSigs(ClaimHtlcTimeoutTx(input, tx), BinaryData("00" * 73)).tx.weight()
val fee = weight2fee(feeratePerKw, weight)

View file

@ -112,7 +112,7 @@ case class UpdateAddHtlc(channelId: BinaryData,
id: Long,
amountMsat: Long,
paymentHash: BinaryData,
expiry: Long,
cltvExpiry: Long,
onionRoutingPacket: BinaryData) extends HtlcMessage with UpdateMessage with HasChannelId
case class UpdateFulfillHtlc(channelId: BinaryData,

View file

@ -6,6 +6,6 @@ rpcuser=foo
rpcpassword=bar
txindex=1
zmqpubrawblock=tcp://127.0.0.1:28334
zmqpubrawtx=tcp://127.0.0.1:28334
zmqpubrawtx=tcp://127.0.0.1:28335
rpcworkqueue=64
addresstype=bech32

View file

@ -19,7 +19,7 @@ package fr.acinq.eclair
import akka.actor.ActorSystem
import fr.acinq.bitcoin.{Block, Transaction}
import fr.acinq.eclair.blockchain._
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BitcoinJsonRPCClient, ExtendedBitcoinClient}
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BasicBitcoinJsonRPCClient, BitcoinJsonRPCClient, ExtendedBitcoinClient}
import org.json4s.JsonAST
import scala.concurrent.duration._
@ -28,9 +28,7 @@ import scala.concurrent.{ExecutionContext, Future}
/**
* Created by PM on 26/04/2016.
*/
class TestBitcoinClient()(implicit system: ActorSystem) extends ExtendedBitcoinClient(new BitcoinJsonRPCClient {
override def invoke(method: String, params: Any*)(implicit ec: ExecutionContext): Future[JsonAST.JValue] = ???
}) {
class TestBitcoinClient()(implicit system: ActorSystem) extends ExtendedBitcoinClient(new BasicBitcoinJsonRPCClient("", "", "", 0)(http = null)) {
import scala.concurrent.ExecutionContext.Implicits.global

View file

@ -48,7 +48,6 @@ class BitcoinCoreWalletSpec extends TestKit(ActorSystem("test")) with BitcoindSe
implicit val formats = DefaultFormats.withBigDecimal
override def beforeAll(): Unit = {
startBitcoind()
}

View file

@ -23,6 +23,8 @@ import java.util.UUID
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.pattern.pipe
import akka.testkit.{TestKitBase, TestProbe}
import com.softwaremill.sttp.okhttp.OkHttpFutureBackend
import com.softwaremill.sttp.okhttp.OkHttpFutureBackend
import fr.acinq.eclair.blockchain.bitcoind.rpc.{BasicBitcoinJsonRPCClient, BitcoinJsonRPCClient}
import fr.acinq.eclair.integration.IntegrationSpec
import grizzled.slf4j.Logging
@ -35,6 +37,7 @@ trait BitcoindService extends Logging {
self: TestKitBase =>
implicit val system: ActorSystem
implicit val sttpBackend = OkHttpFutureBackend()
import scala.sys.process._

View file

@ -28,6 +28,7 @@ import fr.acinq.eclair.blockchain.bitcoind.{BitcoinCoreWallet, BitcoindService}
import fr.acinq.eclair.blockchain.electrum.ElectrumClient.{BroadcastTransaction, BroadcastTransactionResponse}
import grizzled.slf4j.Logging
import org.json4s.JsonAST.{JDecimal, JString, JValue}
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, FunSuiteLike}
import scala.concurrent.Await

View file

@ -16,8 +16,8 @@
package fr.acinq.eclair.blockchain.fee
import akka.actor.ActorSystem
import akka.util.Timeout
import com.softwaremill.sttp.okhttp.OkHttpFutureBackend
import grizzled.slf4j.Logging
import org.json4s.DefaultFormats
import org.scalatest.FunSuite
@ -70,7 +70,7 @@ class EarnDotComFeeProviderSpec extends FunSuite with Logging {
test("make sure API hasn't changed") {
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
implicit val system = ActorSystem()
implicit val sttpBackend = OkHttpFutureBackend()
implicit val timeout = Timeout(30 seconds)
val provider = new EarnDotComFeeProvider()
logger.info("earn.com livenet fees: " + Await.result(provider.getFeerates, 10 seconds))

View file

@ -90,8 +90,8 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
val initialState = alice.stateData.asInstanceOf[DATA_NORMAL]
val sender = TestProbe()
val h = BinaryData("42" * 32)
val originHtlc = UpdateAddHtlc(channelId = "42" * 32, id = 5656, amountMsat = 50000000, expiry = 400144, paymentHash = h, onionRoutingPacket = "00" * 1254)
val cmd = CMD_ADD_HTLC(originHtlc.amountMsat - 10000, h, originHtlc.expiry - 7, upstream_opt = Some(originHtlc))
val originHtlc = UpdateAddHtlc(channelId = "42" * 32, id = 5656, amountMsat = 50000000, cltvExpiry = 400144, paymentHash = h, onionRoutingPacket = "00" * 1254)
val cmd = CMD_ADD_HTLC(originHtlc.amountMsat - 10000, h, originHtlc.cltvExpiry - 7, upstream_opt = Some(originHtlc))
sender.send(alice, cmd)
sender.expectMsg("ok")
val htlc = alice2bob.expectMsgType[UpdateAddHtlc]
@ -108,7 +108,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
import f._
val initialState = alice.stateData.asInstanceOf[DATA_NORMAL]
val sender = TestProbe()
val add = CMD_ADD_HTLC(500000000, "11" * 42, expiry = 400144)
val add = CMD_ADD_HTLC(500000000, "11" * 42, cltvExpiry = 400144)
sender.send(alice, add)
val error = InvalidPaymentHash(channelId(alice))
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate), Some(add))))
@ -121,7 +121,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
val initialState = alice.stateData.asInstanceOf[DATA_NORMAL]
val currentBlockCount = Globals.blockCount.get
val expiryTooSmall = currentBlockCount + 3
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = expiryTooSmall)
val add = CMD_ADD_HTLC(500000000, "11" * 32, cltvExpiry = expiryTooSmall)
sender.send(alice, add)
val error = ExpiryTooSmall(channelId(alice), currentBlockCount + Channel.MIN_CLTV_EXPIRY, expiryTooSmall, currentBlockCount)
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate), Some(add))))
@ -134,7 +134,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
val initialState = alice.stateData.asInstanceOf[DATA_NORMAL]
val currentBlockCount = Globals.blockCount.get
val expiryTooBig = currentBlockCount + Channel.MAX_CLTV_EXPIRY + 1
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = expiryTooBig)
val add = CMD_ADD_HTLC(500000000, "11" * 32, cltvExpiry = expiryTooBig)
sender.send(alice, add)
val error = ExpiryTooBig(channelId(alice), maximum = currentBlockCount + Channel.MAX_CLTV_EXPIRY, actual = expiryTooBig, blockCount = currentBlockCount)
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate), Some(add))))
@ -257,7 +257,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
awaitCond(alice.stateData.asInstanceOf[DATA_NORMAL].localShutdown.isDefined && !alice.stateData.asInstanceOf[DATA_NORMAL].remoteShutdown.isDefined)
// actual test starts here
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = 400144)
val add = CMD_ADD_HTLC(500000000, "11" * 32, cltvExpiry = 400144)
sender.send(alice, add)
val error = NoMoreHtlcsClosingInProgress(channelId(alice))
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), Some(initialState.channelUpdate), Some(add))))
@ -269,14 +269,14 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
val sender = TestProbe()
val initialState = alice.stateData.asInstanceOf[DATA_NORMAL]
// let's make alice send an htlc
val add1 = CMD_ADD_HTLC(500000000, "11" * 32, expiry = 400144)
val add1 = CMD_ADD_HTLC(500000000, "11" * 32, cltvExpiry = 400144)
sender.send(alice, add1)
sender.expectMsg("ok")
// at the same time bob initiates a closing
sender.send(bob, CMD_CLOSE(None))
sender.expectMsg("ok")
// this command will be received by alice right after having received the shutdown
val add2 = CMD_ADD_HTLC(100000000, "22" * 32, expiry = 300000)
val add2 = CMD_ADD_HTLC(100000000, "22" * 32, cltvExpiry = 300000)
// messages cross
alice2bob.expectMsgType[UpdateAddHtlc]
alice2bob.forward(bob)
@ -328,7 +328,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
test("recv UpdateAddHtlc (value too small)") { f =>
import f._
val tx = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.localCommit.publishableTxs.commitTx.tx
val htlc = UpdateAddHtlc("00" * 32, 0, 150, BinaryData("42" * 32), expiry = 400144, defaultOnion)
val htlc = UpdateAddHtlc("00" * 32, 0, 150, BinaryData("42" * 32), cltvExpiry = 400144, defaultOnion)
alice2bob.forward(bob, htlc)
val error = bob2alice.expectMsgType[Error]
assert(new String(error.data) === HtlcValueTooSmall(channelId(bob), minimum = 1000, actual = 150).getMessage)

View file

@ -97,7 +97,7 @@ class ShutdownStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
test("recv CMD_ADD_HTLC") { f =>
import f._
val sender = TestProbe()
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = 300000)
val add = CMD_ADD_HTLC(500000000, "11" * 32, cltvExpiry = 300000)
sender.send(alice, add)
val error = ChannelUnavailable(channelId(alice))
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), None, Some(add))))

View file

@ -69,7 +69,7 @@ class NegotiatingStateSpec extends TestkitBaseClass with StateTestsHelperMethods
import f._
alice2bob.expectMsgType[ClosingSigned]
val sender = TestProbe()
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = 300000)
val add = CMD_ADD_HTLC(500000000, "11" * 32, cltvExpiry = 300000)
sender.send(alice, add)
val error = ChannelUnavailable(channelId(alice))
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), None, Some(add))))

View file

@ -110,7 +110,7 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
// actual test starts here
val sender = TestProbe()
val add = CMD_ADD_HTLC(500000000, "11" * 32, expiry = 300000)
val add = CMD_ADD_HTLC(500000000, "11" * 32, cltvExpiry = 300000)
sender.send(alice, add)
val error = ChannelUnavailable(channelId(alice))
sender.expectMsg(Failure(AddHtlcFailed(channelId(alice), add.paymentHash, error, Local(Some(sender.ref)), None, Some(add))))

View file

@ -85,7 +85,7 @@ class SqliteNetworkDbSpec extends FunSuite {
val channel_update_1 = Announcements.makeChannelUpdate(Block.RegtestGenesisBlock.hash, randomKey, randomKey.publicKey, ShortChannelId(42), 5, 7000000, 50000, 100, 500000000L, true)
val channel_update_2 = Announcements.makeChannelUpdate(Block.RegtestGenesisBlock.hash, randomKey, randomKey.publicKey, ShortChannelId(43), 5, 7000000, 50000, 100, 500000000L, true)
val channel_update_3 = ChannelUpdate(BinaryData.empty, Block.RegtestGenesisBlock.hash, ShortChannelId(44), 123456789, Announcements.makeMessageFlags(hasOptionChannelHtlcMax = false), Announcements.makeChannelFlags(isNode1 = true, enable = true), 5, 7000000, 50000, 100, None)
val channel_update_3 = Announcements.makeChannelUpdate(Block.RegtestGenesisBlock.hash, randomKey, randomKey.publicKey, ShortChannelId(44), 5, 7000000, 50000, 100, 500000000L, true)
assert(db.listChannelUpdates().toSet === Set.empty)
db.addChannelUpdate(channel_update_1)

View file

@ -19,7 +19,8 @@ package fr.acinq.eclair.integration
import java.io.{File, PrintWriter}
import java.util.Properties
import akka.actor.{ActorRef, ActorSystem}
import akka.actor.{ActorRef, ActorSystem, Terminated}
import akka.pattern.pipe
import akka.testkit.{TestKit, TestProbe}
import com.google.common.net.HostAndPort
import com.typesafe.config.{Config, ConfigFactory}
@ -108,7 +109,7 @@ class IntegrationSpec extends TestKit(ActorSystem("test")) with BitcoindService
test("starting eclair nodes") {
import collection.JavaConversions._
val commonConfig = ConfigFactory.parseMap(Map("eclair.chain" -> "regtest", "eclair.spv" -> false, "eclair.server.public-ips.1" -> "127.0.0.1", "eclair.bitcoind.port" -> 28333, "eclair.bitcoind.rpcport" -> 28332, "eclair.bitcoind.zmq" -> "tcp://127.0.0.1:28334", "eclair.mindepth-blocks" -> 2, "eclair.max-htlc-value-in-flight-msat" -> 100000000000L, "eclair.router-broadcast-interval" -> "2 second", "eclair.auto-reconnect" -> false))
val commonConfig = ConfigFactory.parseMap(Map("eclair.chain" -> "regtest", "eclair.spv" -> false, "eclair.server.public-ips.1" -> "127.0.0.1", "eclair.bitcoind.port" -> 28333, "eclair.bitcoind.rpcport" -> 28332, "eclair.bitcoind.zmqblock" -> "tcp://127.0.0.1:28334", "eclair.bitcoind.zmqtx" -> "tcp://127.0.0.1:28335", "eclair.mindepth-blocks" -> 2, "eclair.max-htlc-value-in-flight-msat" -> 100000000000L, "eclair.router-broadcast-interval" -> "2 second", "eclair.auto-reconnect" -> false))
instantiateEclairNode("A", ConfigFactory.parseMap(Map("eclair.node-alias" -> "A", "eclair.delay-blocks" -> 130, "eclair.server.port" -> 29730, "eclair.api.port" -> 28080, "eclair.channel-flags" -> 0)).withFallback(commonConfig)) // A's channels are private
instantiateEclairNode("B", ConfigFactory.parseMap(Map("eclair.node-alias" -> "B", "eclair.delay-blocks" -> 131, "eclair.server.port" -> 29731, "eclair.api.port" -> 28081)).withFallback(commonConfig))
instantiateEclairNode("C", ConfigFactory.parseMap(Map("eclair.node-alias" -> "C", "eclair.delay-blocks" -> 132, "eclair.server.port" -> 29732, "eclair.api.port" -> 28082, "eclair.payment-handler" -> "noop")).withFallback(commonConfig))

View file

@ -4,14 +4,15 @@ import java.net.InetSocketAddress
import akka.actor.ActorRef
import akka.testkit.TestProbe
import fr.acinq.eclair.randomBytes
import fr.acinq.bitcoin.Crypto.PublicKey
import fr.acinq.eclair.TestConstants._
import fr.acinq.eclair.blockchain.EclairWallet
import fr.acinq.eclair.crypto.TransportHandler
import fr.acinq.eclair.io.Peer.{CHANNELID_ZERO, ResumeAnnouncements}
import fr.acinq.eclair.io.Peer.{CHANNELID_ZERO, ResumeAnnouncements, SendPing}
import fr.acinq.eclair.router.RoutingSyncSpec.makeFakeRoutingInfo
import fr.acinq.eclair.router.{ChannelRangeQueries, ChannelRangeQueriesSpec, Rebroadcast}
import fr.acinq.eclair.wire.Error
import fr.acinq.eclair.wire.{Error, Ping, Pong}
import fr.acinq.eclair.{ShortChannelId, TestkitBaseClass, wire}
import org.scalatest.Outcome
@ -54,6 +55,39 @@ class PeerSpec extends TestkitBaseClass {
assert(probe.expectMsgType[Peer.PeerInfo].state == "CONNECTED")
}
test("reply to ping") { f =>
import f._
val probe = TestProbe()
connect(remoteNodeId, authenticator, watcher, router, relayer, connection, transport, peer)
val ping = Ping(42, randomBytes(127))
probe.send(peer, ping)
transport.expectMsg(TransportHandler.ReadAck(ping))
assert(transport.expectMsgType[Pong].data.size === ping.pongLength)
}
test("ignore malicious ping") { f =>
import f._
val probe = TestProbe()
connect(remoteNodeId, authenticator, watcher, router, relayer, connection, transport, peer)
// huge requested pong length
val ping = Ping(Int.MaxValue, randomBytes(127))
probe.send(peer, ping)
transport.expectMsg(TransportHandler.ReadAck(ping))
transport.expectNoMsg()
}
test("disconnect if no reply to ping") { f =>
import f._
val sender = TestProbe()
val deathWatcher = TestProbe()
connect(remoteNodeId, authenticator, watcher, router, relayer, connection, transport, peer)
// we manually trigger a ping because we don't want to wait too long in tests
sender.send(peer, SendPing)
transport.expectMsgType[Ping]
deathWatcher.watch(transport.ref)
deathWatcher.expectTerminated(transport.ref, max = 11 seconds)
}
test("filter gossip message (no filtering)") { f =>
import f._
val probe = TestProbe()
@ -165,4 +199,5 @@ class PeerSpec extends TestkitBaseClass {
assert(error.channelId === CHANNELID_ZERO)
assert(new String(error.data).startsWith("bad announcement sig! bin=0100"))
}
}

View file

@ -99,7 +99,7 @@ class HtlcGenerationSpec extends FunSuite {
val (add, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops)
assert(add.amountMsat > finalAmountMsat)
assert(add.expiry === finalExpiry + channelUpdate_de.cltvExpiryDelta + channelUpdate_cd.cltvExpiryDelta + channelUpdate_bc.cltvExpiryDelta)
assert(add.cltvExpiry === finalExpiry + channelUpdate_de.cltvExpiryDelta + channelUpdate_cd.cltvExpiryDelta + channelUpdate_bc.cltvExpiryDelta)
assert(add.paymentHash === paymentHash)
assert(add.onion.length === Sphinx.PacketLength)
@ -133,7 +133,7 @@ class HtlcGenerationSpec extends FunSuite {
val (add, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops.take(1))
assert(add.amountMsat === finalAmountMsat)
assert(add.expiry === finalExpiry)
assert(add.cltvExpiry === finalExpiry)
assert(add.paymentHash === paymentHash)
assert(add.onion.size === Sphinx.PacketLength)

View file

@ -18,13 +18,14 @@ package fr.acinq.eclair.payment
import akka.actor.Status.Failure
import akka.actor.{ActorSystem, Status}
import akka.testkit.{TestKit, TestProbe}
import fr.acinq.bitcoin.{MilliSatoshi, Satoshi}
import akka.testkit.{TestActorRef, TestKit, TestProbe}
import fr.acinq.bitcoin.{BinaryData, MilliSatoshi, Satoshi}
import fr.acinq.eclair.TestConstants.Alice
import fr.acinq.eclair.channel.{CMD_FAIL_HTLC, CMD_FULFILL_HTLC}
import fr.acinq.eclair.payment.LocalPaymentHandler.PendingPaymentRequest
import fr.acinq.eclair.payment.PaymentLifecycle.{CheckPayment, ReceivePayment}
import fr.acinq.eclair.payment.PaymentRequest.ExtraHop
import fr.acinq.eclair.wire.{FinalExpiryTooSoon, UpdateAddHtlc}
import fr.acinq.eclair.wire.{FinalExpiryTooSoon, UnknownPaymentHash, UpdateAddHtlc}
import fr.acinq.eclair.{Globals, ShortChannelId, randomKey}
import org.scalatest.FunSuiteLike
@ -38,7 +39,7 @@ class PaymentHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLike
test("LocalPaymentHandler should reply with a fulfill/fail, emit a PaymentReceived and adds payment in DB") {
val nodeParams = Alice.nodeParams
val handler = system.actorOf(LocalPaymentHandler.props(nodeParams))
val handler = TestActorRef[LocalPaymentHandler](LocalPaymentHandler.props(nodeParams))
val sender = TestProbe()
val eventListener = TestProbe()
system.eventStream.subscribe(eventListener.ref, classOf[PaymentReceived])
@ -55,7 +56,7 @@ class PaymentHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLike
sender.send(handler, add)
sender.expectMsgType[CMD_FULFILL_HTLC]
val paymentRelayed = eventListener.expectMsgType[PaymentReceived]
assert(paymentRelayed.copy(timestamp = 0) === PaymentReceived(amountMsat,add.paymentHash, add.channelId, timestamp = 0))
assert(paymentRelayed.copy(timestamp = 0) === PaymentReceived(amountMsat, add.paymentHash, add.channelId, timestamp = 0))
sender.send(handler, CheckPayment(pr.paymentHash))
assert(sender.expectMsgType[Boolean] === true)
}
@ -69,7 +70,7 @@ class PaymentHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLike
sender.send(handler, add)
sender.expectMsgType[CMD_FULFILL_HTLC]
val paymentRelayed = eventListener.expectMsgType[PaymentReceived]
assert(paymentRelayed.copy(timestamp = 0) === PaymentReceived(amountMsat,add.paymentHash, add.channelId, timestamp = 0))
assert(paymentRelayed.copy(timestamp = 0) === PaymentReceived(amountMsat, add.paymentHash, add.channelId, timestamp = 0))
sender.send(handler, CheckPayment(pr.paymentHash))
assert(sender.expectMsgType[Boolean] === true)
}
@ -79,13 +80,36 @@ class PaymentHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLike
val pr = sender.expectMsgType[PaymentRequest]
sender.send(handler, CheckPayment(pr.paymentHash))
assert(sender.expectMsgType[Boolean] === false)
val add = UpdateAddHtlc("11" * 32, 0, amountMsat.amount, pr.paymentHash, expiry = Globals.blockCount.get() + 3, "")
val add = UpdateAddHtlc("11" * 32, 0, amountMsat.amount, pr.paymentHash, cltvExpiry = Globals.blockCount.get() + 3, "")
sender.send(handler, add)
assert(sender.expectMsgType[CMD_FAIL_HTLC].reason == Right(FinalExpiryTooSoon))
eventListener.expectNoMsg(300 milliseconds)
sender.send(handler, CheckPayment(pr.paymentHash))
assert(sender.expectMsgType[Boolean] === false)
}
{
sender.send(handler, ReceivePayment(Some(amountMsat), "timeout expired", Some(1L)))
//allow request to timeout
Thread.sleep(1001)
val pr = sender.expectMsgType[PaymentRequest]
sender.send(handler, CheckPayment(pr.paymentHash))
assert(sender.expectMsgType[Boolean] === false)
val add = UpdateAddHtlc("11" * 32, 0, amountMsat.amount, pr.paymentHash, expiry, "")
sender.send(handler, add)
assert(sender.expectMsgType[CMD_FAIL_HTLC].reason == Right(UnknownPaymentHash))
// We chose UnknownPaymentHash on purpose. So if you have expired by 1 second or 1 hour you get the same error message.
eventListener.expectNoMsg(300 milliseconds)
sender.send(handler, CheckPayment(pr.paymentHash))
assert(sender.expectMsgType[Boolean] === false)
// make sure that the request is indeed pruned
sender.send(handler, 'requests)
sender.expectMsgType[Map[BinaryData, PendingPaymentRequest]].contains(pr.paymentHash)
sender.send(handler, LocalPaymentHandler.PurgeExpiredRequests)
awaitCond({
sender.send(handler, 'requests)
sender.expectMsgType[Map[BinaryData, PendingPaymentRequest]].contains(pr.paymentHash) == false
})
}
}
test("Payment request generation should fail when the amount asked in not valid") {

View file

@ -66,7 +66,7 @@ class RelayerSpec extends TestkitBaseClass {
// we use this to build a valid onion
val (cmd, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops)
// and then manually build an htlc
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.expiry, cmd.onion)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry, cmd.onion)
relayer ! LocalChannelUpdate(null, channelId_bc, channelUpdate_bc.shortChannelId, c, None, channelUpdate_bc, makeCommitments(channelId_bc))
sender.send(relayer, ForwardAdd(add_ab))
@ -86,7 +86,7 @@ class RelayerSpec extends TestkitBaseClass {
// we use this to build a valid onion
val (cmd, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops)
// and then manually build an htlc
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.expiry, cmd.onion)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry, cmd.onion)
sender.send(relayer, ForwardAdd(add_ab))
@ -106,7 +106,7 @@ class RelayerSpec extends TestkitBaseClass {
// we use this to build a valid onion
val (cmd, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops)
// and then manually build an htlc
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.expiry, cmd.onion)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry, cmd.onion)
relayer ! LocalChannelUpdate(null, channelId_bc, channelUpdate_bc.shortChannelId, c, None, channelUpdate_bc, makeCommitments(channelId_bc))
sender.send(relayer, ForwardAdd(add_ab))
@ -132,7 +132,7 @@ class RelayerSpec extends TestkitBaseClass {
// check that payments are sent properly
val (cmd, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.expiry, cmd.onion)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry, cmd.onion)
relayer ! LocalChannelUpdate(null, channelId_bc, channelUpdate_bc.shortChannelId, c, None, channelUpdate_bc, makeCommitments(channelId_bc))
sender.send(relayer, ForwardAdd(add_ab))
@ -148,7 +148,7 @@ class RelayerSpec extends TestkitBaseClass {
relayer ! LocalChannelDown(sender.ref, channelId = channelId_bc, shortChannelId = channelUpdate_bc.shortChannelId, remoteNodeId = TestConstants.Bob.nodeParams.nodeId)
val (cmd1, _) = buildCommand(finalAmountMsat, finalExpiry, "02" * 32, hops)
val add_ab1 = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd1.amountMsat, cmd1.paymentHash, cmd1.expiry, cmd1.onion)
val add_ab1 = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd1.amountMsat, cmd1.paymentHash, cmd1.cltvExpiry, cmd1.onion)
sender.send(relayer, ForwardAdd(add_ab))
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
@ -166,7 +166,7 @@ class RelayerSpec extends TestkitBaseClass {
// we use this to build a valid onion
val (cmd, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops)
// and then manually build an htlc
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.expiry, cmd.onion)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry, cmd.onion)
val channelUpdate_bc_disabled = channelUpdate_bc.copy(channelFlags = Announcements.makeChannelFlags(Announcements.isNode1(channelUpdate_bc.channelFlags), enable = false))
relayer ! LocalChannelUpdate(null, channelId_bc, channelUpdate_bc.shortChannelId, c, None, channelUpdate_bc_disabled, makeCommitments(channelId_bc))
@ -187,7 +187,7 @@ class RelayerSpec extends TestkitBaseClass {
// we use this to build a valid onion
val (cmd, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops)
// and then manually build an htlc
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.expiry, "00" * Sphinx.PacketLength)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry, "00" * Sphinx.PacketLength)
relayer ! LocalChannelUpdate(null, channelId_bc, channelUpdate_bc.shortChannelId, c, None, channelUpdate_bc, makeCommitments(channelId_bc))
sender.send(relayer, ForwardAdd(add_ab))
@ -207,7 +207,7 @@ class RelayerSpec extends TestkitBaseClass {
// we use this to build a valid onion
val (cmd, _) = buildCommand(channelUpdate_bc.htlcMinimumMsat - 1, finalExpiry, paymentHash, hops.map(hop => hop.copy(lastUpdate = hop.lastUpdate.copy(feeBaseMsat = 0, feeProportionalMillionths = 0))))
// and then manually build an htlc
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.expiry, cmd.onion)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry, cmd.onion)
relayer ! LocalChannelUpdate(null, channelId_bc, channelUpdate_bc.shortChannelId, c, None, channelUpdate_bc, makeCommitments(channelId_bc))
sender.send(relayer, ForwardAdd(add_ab))
@ -227,14 +227,14 @@ class RelayerSpec extends TestkitBaseClass {
val hops1 = hops.updated(1, hops(1).copy(lastUpdate = hops(1).lastUpdate.copy(cltvExpiryDelta = 0)))
val (cmd, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops1)
// and then manually build an htlc
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.expiry, cmd.onion)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry, cmd.onion)
relayer ! LocalChannelUpdate(null, channelId_bc, channelUpdate_bc.shortChannelId, c, None, channelUpdate_bc, makeCommitments(channelId_bc))
sender.send(relayer, ForwardAdd(add_ab))
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
assert(fail.id === add_ab.id)
assert(fail.reason == Right(IncorrectCltvExpiry(cmd.expiry, channelUpdate_bc)))
assert(fail.reason == Right(IncorrectCltvExpiry(cmd.cltvExpiry, channelUpdate_bc)))
register.expectNoMsg(100 millis)
paymentHandler.expectNoMsg(100 millis)
@ -247,7 +247,7 @@ class RelayerSpec extends TestkitBaseClass {
val hops1 = hops.updated(1, hops(1).copy(lastUpdate = hops(1).lastUpdate.copy(feeBaseMsat = hops(1).lastUpdate.feeBaseMsat / 2)))
val (cmd, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops1)
// and then manually build an htlc
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.expiry, cmd.onion)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry, cmd.onion)
relayer ! LocalChannelUpdate(null, channelId_bc, channelUpdate_bc.shortChannelId, c, None, channelUpdate_bc, makeCommitments(channelId_bc))
sender.send(relayer, ForwardAdd(add_ab))
@ -268,7 +268,7 @@ class RelayerSpec extends TestkitBaseClass {
val hops1 = hops.head :: Nil
val (cmd, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops1)
// and then manually build an htlc with a wrong expiry
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat - 1, cmd.paymentHash, cmd.expiry, cmd.onion)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat - 1, cmd.paymentHash, cmd.cltvExpiry, cmd.onion)
relayer ! LocalChannelUpdate(null, channelId_bc, channelUpdate_bc.shortChannelId, c, None, channelUpdate_bc, makeCommitments(channelId_bc))
sender.send(relayer, ForwardAdd(add_ab))
@ -289,14 +289,14 @@ class RelayerSpec extends TestkitBaseClass {
val hops1 = hops.head :: Nil
val (cmd, _) = buildCommand(finalAmountMsat, finalExpiry, paymentHash, hops1)
// and then manually build an htlc with a wrong expiry
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.expiry - 1, cmd.onion)
val add_ab = UpdateAddHtlc(channelId = channelId_ab, id = 123456, cmd.amountMsat, cmd.paymentHash, cmd.cltvExpiry - 1, cmd.onion)
relayer ! LocalChannelUpdate(null, channelId_bc, channelUpdate_bc.shortChannelId, c, None, channelUpdate_bc, makeCommitments(channelId_bc))
sender.send(relayer, ForwardAdd(add_ab))
val fail = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message
assert(fail.id === add_ab.id)
assert(fail.reason == Right(FinalIncorrectCltvExpiry(add_ab.expiry)))
assert(fail.reason == Right(FinalIncorrectCltvExpiry(add_ab.cltvExpiry)))
register.expectNoMsg(100 millis)
paymentHandler.expectNoMsg(100 millis)
@ -340,7 +340,7 @@ class RelayerSpec extends TestkitBaseClass {
system.eventStream.subscribe(eventListener.ref, classOf[PaymentEvent])
// we build a fake htlc for the downstream channel
val add_bc = UpdateAddHtlc(channelId = channelId_bc, id = 72, amountMsat = 10000000L, paymentHash = "00" * 32, expiry = 4200, onionRoutingPacket = "")
val add_bc = UpdateAddHtlc(channelId = channelId_bc, id = 72, amountMsat = 10000000L, paymentHash = "00" * 32, cltvExpiry = 4200, onionRoutingPacket = "")
val fulfill_ba = UpdateFulfillHtlc(channelId = channelId_bc, id = 42, paymentPreimage = "00" * 32)
val origin = Relayed(channelId_ab, 150, 11000000L, 10000000L)
sender.send(relayer, ForwardFulfill(fulfill_ba, origin, add_bc))
@ -358,7 +358,7 @@ class RelayerSpec extends TestkitBaseClass {
val sender = TestProbe()
// we build a fake htlc for the downstream channel
val add_bc = UpdateAddHtlc(channelId = channelId_bc, id = 72, amountMsat = 10000000L, paymentHash = "00" * 32, expiry = 4200, onionRoutingPacket = "")
val add_bc = UpdateAddHtlc(channelId = channelId_bc, id = 72, amountMsat = 10000000L, paymentHash = "00" * 32, cltvExpiry = 4200, onionRoutingPacket = "")
val fail_ba = UpdateFailHtlc(channelId = channelId_bc, id = 42, reason = Sphinx.createErrorPacket(BinaryData("01" * 32), TemporaryChannelFailure(channelUpdate_cd)))
val origin = Relayed(channelId_ab, 150, 11000000L, 10000000L)
sender.send(relayer, ForwardFail(fail_ba, origin, add_bc))

View file

@ -19,6 +19,7 @@ package fr.acinq.eclair.router
import akka.actor.ActorSystem
import akka.pattern.pipe
import akka.testkit.TestProbe
import com.softwaremill.sttp.okhttp.OkHttpFutureBackend
import fr.acinq.bitcoin.Crypto.PrivateKey
import fr.acinq.bitcoin.{BinaryData, Block, Satoshi, Script, Transaction}
import fr.acinq.eclair.blockchain.ValidateResult
@ -44,6 +45,7 @@ class AnnouncementsBatchValidationSpec extends FunSuite {
import scala.concurrent.ExecutionContext.Implicits.global
implicit val system = ActorSystem()
implicit val sttpBackend = OkHttpFutureBackend()
implicit val extendedBitcoinClient = new ExtendedBitcoinClient(new BasicBitcoinJsonRPCClient(user = "foo", password = "bar", host = "localhost", port = 18332))
val channels = for (i <- 0 until 50) yield {
@ -76,7 +78,7 @@ object AnnouncementsBatchValidationSpec {
def generateBlocks(numBlocks: Int)(implicit extendedBitcoinClient: ExtendedBitcoinClient, ec: ExecutionContext) =
Await.result(extendedBitcoinClient.rpcClient.invoke("generate", numBlocks), 10 seconds)
def simulateChannel()(implicit extendedBitcoinClient: ExtendedBitcoinClient, ec: ExecutionContext, system: ActorSystem): SimulatedChannel = {
def simulateChannel()(implicit extendedBitcoinClient: ExtendedBitcoinClient, ec: ExecutionContext): SimulatedChannel = {
val node1Key = randomKey
val node2Key = randomKey
val node1BitcoinKey = randomKey

View file

@ -160,7 +160,7 @@ class TestVectorsSpec extends FunSuite with Logging {
)
val htlcScripts = htlcs.map(htlc => htlc.direction match {
case OUT => Scripts.htlcOffered(Local.payment_privkey.publicKey, Remote.payment_privkey.publicKey, Local.revocation_pubkey, Crypto.ripemd160(htlc.add.paymentHash))
case IN => Scripts.htlcReceived(Local.payment_privkey.publicKey, Remote.payment_privkey.publicKey, Local.revocation_pubkey, Crypto.ripemd160(htlc.add.paymentHash), htlc.add.expiry)
case IN => Scripts.htlcReceived(Local.payment_privkey.publicKey, Remote.payment_privkey.publicKey, Local.revocation_pubkey, Crypto.ripemd160(htlc.add.paymentHash), htlc.add.cltvExpiry)
})
def dir2string(dir: Direction) = dir match {
@ -171,7 +171,7 @@ class TestVectorsSpec extends FunSuite with Logging {
for (i <- 0 until htlcs.length) {
logger.info(s"htlc $i direction: ${dir2string(htlcs(i).direction)}")
logger.info(s"htlc $i amount_msat: ${htlcs(i).add.amountMsat}")
logger.info(s"htlc $i expiry: ${htlcs(i).add.expiry}")
logger.info(s"htlc $i expiry: ${htlcs(i).add.cltvExpiry}")
logger.info(s"htlc $i payment_preimage: ${paymentPreimages(i)}")
}

View file

@ -124,8 +124,8 @@ class TransactionsSpec extends FunSuite with Logging {
// HtlcPenaltyTx
// first we create a fake commitTx tx, containing only the output that will be spent by the ClaimHtlcSuccessTx
val paymentPreimage = BinaryData("42" * 32)
val htlc = UpdateAddHtlc("00" * 32, 0, Satoshi(20000).amount * 1000, sha256(paymentPreimage), expiry = 400144, BinaryData.empty)
val redeemScript = htlcReceived(localHtlcPriv.publicKey, remoteHtlcPriv.publicKey, localRevocationPriv.publicKey, ripemd160(htlc.paymentHash), htlc.expiry)
val htlc = UpdateAddHtlc("00" * 32, 0, Satoshi(20000).amount * 1000, sha256(paymentPreimage), cltvExpiry = 400144, BinaryData.empty)
val redeemScript = htlcReceived(localHtlcPriv.publicKey, remoteHtlcPriv.publicKey, localRevocationPriv.publicKey, ripemd160(htlc.paymentHash), htlc.cltvExpiry)
val pubKeyScript = write(pay2wsh(redeemScript))
val commitTx = Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(htlc.amountMsat / 1000), pubKeyScript) :: Nil, lockTime = 0)
val htlcPenaltyTx = makeHtlcPenaltyTx(commitTx, outputsAlreadyUsed = Set.empty, Script.write(redeemScript), localDustLimit, finalPubKeyScript, feeratePerKw)
@ -139,7 +139,7 @@ class TransactionsSpec extends FunSuite with Logging {
// ClaimHtlcSuccessTx
// first we create a fake commitTx tx, containing only the output that will be spent by the ClaimHtlcSuccessTx
val paymentPreimage = BinaryData("42" * 32)
val htlc = UpdateAddHtlc("00" * 32, 0, Satoshi(20000).amount * 1000, sha256(paymentPreimage), expiry = 400144, BinaryData.empty)
val htlc = UpdateAddHtlc("00" * 32, 0, Satoshi(20000).amount * 1000, sha256(paymentPreimage), cltvExpiry = 400144, BinaryData.empty)
val pubKeyScript = write(pay2wsh(htlcOffered(localHtlcPriv.publicKey, remoteHtlcPriv.publicKey, localRevocationPriv.publicKey, ripemd160(htlc.paymentHash))))
val commitTx = Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(htlc.amountMsat / 1000), pubKeyScript) :: Nil, lockTime = 0)
val claimHtlcSuccessTx = makeClaimHtlcSuccessTx(commitTx, outputsAlreadyUsed = Set.empty, localDustLimit, remoteHtlcPriv.publicKey, localHtlcPriv.publicKey, localRevocationPriv.publicKey, finalPubKeyScript, htlc, feeratePerKw)
@ -153,8 +153,8 @@ class TransactionsSpec extends FunSuite with Logging {
// ClaimHtlcTimeoutTx
// first we create a fake commitTx tx, containing only the output that will be spent by the ClaimHtlcSuccessTx
val paymentPreimage = BinaryData("42" * 32)
val htlc = UpdateAddHtlc("00" * 32, 0, Satoshi(20000).amount * 1000, sha256(paymentPreimage), expiry = 400144, BinaryData.empty)
val pubKeyScript = write(pay2wsh(htlcReceived(localHtlcPriv.publicKey, remoteHtlcPriv.publicKey, localRevocationPriv.publicKey, ripemd160(htlc.paymentHash), htlc.expiry)))
val htlc = UpdateAddHtlc("00" * 32, 0, Satoshi(20000).amount * 1000, sha256(paymentPreimage), cltvExpiry = 400144, BinaryData.empty)
val pubKeyScript = write(pay2wsh(htlcReceived(localHtlcPriv.publicKey, remoteHtlcPriv.publicKey, localRevocationPriv.publicKey, ripemd160(htlc.paymentHash), htlc.cltvExpiry)))
val commitTx = Transaction(version = 0, txIn = Nil, txOut = TxOut(Satoshi(htlc.amountMsat / 1000), pubKeyScript) :: Nil, lockTime = 0)
val claimClaimHtlcTimeoutTx = makeClaimHtlcTimeoutTx(commitTx, outputsAlreadyUsed = Set.empty, localDustLimit, remoteHtlcPriv.publicKey, localHtlcPriv.publicKey, localRevocationPriv.publicKey, finalPubKeyScript, htlc, feeratePerKw)
// we use dummy signatures to compute the weight
@ -304,7 +304,7 @@ class TransactionsSpec extends FunSuite with Logging {
{
// remote spends received HTLC output with revocation key
val script = Script.write(Scripts.htlcReceived(localHtlcPriv.publicKey, remoteHtlcPriv.publicKey, localRevocationPriv.publicKey, Crypto.ripemd160(htlc2.paymentHash), htlc2.expiry))
val script = Script.write(Scripts.htlcReceived(localHtlcPriv.publicKey, remoteHtlcPriv.publicKey, localRevocationPriv.publicKey, Crypto.ripemd160(htlc2.paymentHash), htlc2.cltvExpiry))
val htlcPenaltyTx = makeHtlcPenaltyTx(commitTx.tx, outputsAlreadyUsed = Set.empty, script, localDustLimit, finalPubKeyScript, feeratePerKw)
val sig = sign(htlcPenaltyTx, localRevocationPriv)
val signed = addSigs(htlcPenaltyTx, sig, localRevocationPriv.publicKey)

View file

@ -104,7 +104,7 @@ class ChannelCodecsSpec extends FunSuite {
channelId = randomBytes(32),
id = Random.nextInt(Int.MaxValue),
amountMsat = Random.nextInt(Int.MaxValue),
expiry = Random.nextInt(Int.MaxValue),
cltvExpiry = Random.nextInt(Int.MaxValue),
paymentHash = randomBytes(32),
onionRoutingPacket = randomBytes(Sphinx.PacketLength))
val htlc1 = DirectedHtlc(direction = IN, add = add)
@ -118,14 +118,14 @@ class ChannelCodecsSpec extends FunSuite {
channelId = randomBytes(32),
id = Random.nextInt(Int.MaxValue),
amountMsat = Random.nextInt(Int.MaxValue),
expiry = Random.nextInt(Int.MaxValue),
cltvExpiry = Random.nextInt(Int.MaxValue),
paymentHash = randomBytes(32),
onionRoutingPacket = randomBytes(Sphinx.PacketLength))
val add2 = UpdateAddHtlc(
channelId = randomBytes(32),
id = Random.nextInt(Int.MaxValue),
amountMsat = Random.nextInt(Int.MaxValue),
expiry = Random.nextInt(Int.MaxValue),
cltvExpiry = Random.nextInt(Int.MaxValue),
paymentHash = randomBytes(32),
onionRoutingPacket = randomBytes(Sphinx.PacketLength))
val htlc1 = DirectedHtlc(direction = IN, add = add1)

View file

@ -65,6 +65,7 @@
<scala.version>2.11.11</scala.version>
<scala.version.short>2.11</scala.version.short>
<akka.version>2.3.14</akka.version>
<sttp.version>1.3.9</sttp.version>
<bitcoinlib.version>0.9.17</bitcoinlib.version>
<guava.version>24.0-android</guava.version>
</properties>