mirror of
https://github.com/ACINQ/eclair.git
synced 2024-11-19 01:43:22 +01:00
Fix typos (#417)
This commit is contained in:
parent
e55e1e0dab
commit
7a6fa8a619
2
LICENSE
2
LICENSE
@ -186,7 +186,7 @@ Apache License
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyrigh 2014 ACINQ SAS
|
||||
Copyright 2014 ACINQ SAS
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -49,7 +49,7 @@ Eclair will use any BTC it finds in the Bitcoin Core wallet to fund any channels
|
||||
On **__testnet__**, you also need to make sure that all your UTXOs are `p2sh-of-p2wpkh`.
|
||||
To do this, use the debug console, create a new address with `getnewaddress`, import it as a witness address with `addwitnessaddress`, and
|
||||
send all your balance to this witness address.
|
||||
If you need to create and send funds manually, don't forget to create and specify a witness address for the change output (this option is avaliable on the GUI once you set the `Enable coin control features` wallet option).
|
||||
If you need to create and send funds manually, don't forget to create and specify a witness address for the change output (this option is available on the GUI once you set the `Enable coin control features` wallet option).
|
||||
|
||||
|
||||
### Installing Eclair
|
||||
|
@ -76,7 +76,7 @@ class BitcoinCoreWallet(rpcClient: BitcoinJsonRPCClient)(implicit system: ActorS
|
||||
} yield MakeFundingTxResponse(fundingTx, outputIndex)
|
||||
|
||||
override def commit(tx: Transaction): Future[Boolean] = publishTransaction(tx)
|
||||
.map(_ => true) // if bitcoind says OK, then we consider the tx succesfully published
|
||||
.map(_ => true) // if bitcoind says OK, then we consider the tx successfully published
|
||||
.recoverWith { case JsonRPCError(e) =>
|
||||
logger.warn(s"txid=${tx.txid} error=$e")
|
||||
getTransaction(tx.txid).map(_ => true).recover { case _ => false } // if we get a parseable error from bitcoind AND the tx is NOT in the mempool/blockchain, then we consider that the tx was not published
|
||||
|
@ -83,7 +83,7 @@ class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext =
|
||||
w match {
|
||||
case WatchSpentBasic(_, txid, outputIndex, _, _) =>
|
||||
// not: we assume parent tx was published, we just need to make sure this particular output has not been spent
|
||||
client.isTransactionOuputSpendable(txid.toString(), outputIndex, true).collect {
|
||||
client.isTransactionOutputSpendable(txid.toString(), outputIndex, true).collect {
|
||||
case false =>
|
||||
log.warning(s"output=$outputIndex of txid=$txid has already been spent")
|
||||
self ! TriggerEvent(w, WatchEventSpentBasic(w.event))
|
||||
@ -94,7 +94,7 @@ class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext =
|
||||
client.getTxConfirmations(txid.toString()).collect {
|
||||
case Some(_) =>
|
||||
// parent tx was published, we need to make sure this particular output has not been spent
|
||||
client.isTransactionOuputSpendable(txid.toString(), outputIndex, true).collect {
|
||||
client.isTransactionOutputSpendable(txid.toString(), outputIndex, true).collect {
|
||||
case false =>
|
||||
log.warning(s"output=$outputIndex of txid=$txid has already been spent")
|
||||
log.warning(s"looking first in the mempool")
|
||||
|
@ -94,9 +94,9 @@ class ExtendedBitcoinClient(val rpcClient: BitcoinJsonRPCClient) {
|
||||
tx <- getTransaction(txid)
|
||||
} yield tx
|
||||
|
||||
def isTransactionOuputSpendable(txId: String, ouputIndex: Int, includeMempool: Boolean)(implicit ec: ExecutionContext): Future[Boolean] =
|
||||
def isTransactionOutputSpendable(txId: String, outputIndex: Int, includeMempool: Boolean)(implicit ec: ExecutionContext): Future[Boolean] =
|
||||
for {
|
||||
json <- rpcClient.invoke("gettxout", txId, ouputIndex, includeMempool)
|
||||
json <- rpcClient.invoke("gettxout", txId, outputIndex, includeMempool)
|
||||
} yield json != JNull
|
||||
|
||||
|
||||
|
@ -418,12 +418,12 @@ object ElectrumWallet {
|
||||
}
|
||||
|
||||
/**
|
||||
* Wallet state, which stores data returned by EletrumX servers.
|
||||
* Wallet state, which stores data returned by ElectrumX servers.
|
||||
* Most items are indexed by script hash (i.e. by pubkey script sha256 hash).
|
||||
* Height follow ElectrumX's conventions:
|
||||
* - h > 0 means that the tx was confirmed at block #h
|
||||
* - 0 means unconfirmed, but all input are confirmed
|
||||
* < 0 means unconfirmed, and sonme inputs are unconfirmed as well
|
||||
* < 0 means unconfirmed, and some inputs are unconfirmed as well
|
||||
*
|
||||
* @param tip current blockchain tip
|
||||
* @param accountKeys account keys
|
||||
|
@ -184,7 +184,7 @@ class ElectrumWatcher(client: ActorRef) extends Actor with Stash with ActorLoggi
|
||||
|
||||
case ElectrumClient.ElectrumDisconnected =>
|
||||
// we remember watches and keep track of tx that have not yet been published
|
||||
// we also re-send the txes that we previsouly sent but hadn't yet received the confirmation
|
||||
// we also re-send the txes that we previously sent but hadn't yet received the confirmation
|
||||
context become disconnected(watches, sent.map(PublishAsap(_)), block2tx)
|
||||
}
|
||||
|
||||
|
@ -690,7 +690,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
||||
val closingSigned = Closing.makeFirstClosingTx(d.commitments, localShutdown.scriptPubKey, remoteShutdown.scriptPubKey)
|
||||
goto(NEGOTIATING) using store(DATA_NEGOTIATING(d.commitments, localShutdown, remoteShutdown, closingSigned :: Nil)) sending sendList :+ closingSigned
|
||||
} else {
|
||||
// there are some pending signed htlcs, we need to fail/fullfill them
|
||||
// there are some pending signed htlcs, we need to fail/fulfill them
|
||||
goto(SHUTDOWN) using store(DATA_SHUTDOWN(d.commitments, localShutdown, remoteShutdown)) sending sendList
|
||||
}
|
||||
}
|
||||
@ -1159,7 +1159,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
||||
wallet.rollback(fundingTx)
|
||||
stay
|
||||
|
||||
case Event(INPUT_DISCONNECTED, _) => stay // we are disconnected, but it doesn't matter anymoer
|
||||
case Event(INPUT_DISCONNECTED, _) => stay // we are disconnected, but it doesn't matter anymore
|
||||
})
|
||||
|
||||
when(OFFLINE)(handleExceptions {
|
||||
@ -1658,7 +1658,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
||||
|
||||
if (revWasSentLast) resendRevocation
|
||||
case Right(_) if commitments1.remoteCommit.index + 1 == channelReestablish.nextLocalCommitmentNumber =>
|
||||
// there wasn't any sig in-flight when the disconnection occured
|
||||
// there wasn't any sig in-flight when the disconnection occurred
|
||||
resendRevocation
|
||||
case _ => throw CommitmentSyncError(d.channelId)
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ object Helpers {
|
||||
if (open.pushMsat > 1000 * open.fundingSatoshis) throw new InvalidPushAmount(open.temporaryChannelId, open.pushMsat, 1000 * open.fundingSatoshis)
|
||||
val localFeeratePerKw = Globals.feeratesPerKw.get.block_1
|
||||
if (isFeeDiffTooHigh(open.feeratePerKw, localFeeratePerKw, nodeParams.maxFeerateMismatch)) throw new FeerateTooDifferent(open.temporaryChannelId, localFeeratePerKw, open.feeratePerKw)
|
||||
// only enfore dust limit check on mainnet
|
||||
// only enforce dust limit check on mainnet
|
||||
if (nodeParams.chainHash == Block.LivenetGenesisBlock.hash) {
|
||||
if (open.dustLimitSatoshis < Channel.MIN_DUSTLIMIT) throw new InvalidDustLimit(open.temporaryChannelId, open.dustLimitSatoshis, Channel.MIN_DUSTLIMIT)
|
||||
}
|
||||
@ -60,7 +60,7 @@ object Helpers {
|
||||
*/
|
||||
def validateParamsFunder(nodeParams: NodeParams, open: OpenChannel, accept: AcceptChannel): Unit = {
|
||||
if (accept.maxAcceptedHtlcs > Channel.MAX_ACCEPTED_HTLCS) throw new InvalidMaxAcceptedHtlcs(accept.temporaryChannelId, accept.maxAcceptedHtlcs, Channel.MAX_ACCEPTED_HTLCS)
|
||||
// only enfore dust limit check on mainnet
|
||||
// only enforce dust limit check on mainnet
|
||||
if (nodeParams.chainHash == Block.LivenetGenesisBlock.hash) {
|
||||
if (accept.dustLimitSatoshis < Channel.MIN_DUSTLIMIT) throw new InvalidDustLimit(accept.temporaryChannelId, accept.dustLimitSatoshis, Channel.MIN_DUSTLIMIT)
|
||||
}
|
||||
@ -519,7 +519,7 @@ object Helpers {
|
||||
def isLocalCommitDone(localCommitPublished: LocalCommitPublished) = {
|
||||
// is the commitment tx buried? (we need to check this because we may not have nay outputs)
|
||||
val isCommitTxConfirmed = localCommitPublished.irrevocablySpent.values.toSet.contains(localCommitPublished.commitTx.txid)
|
||||
// are there remaining spendable outputs from the commitment tx? we just substract all known spent outputs from the ones we control
|
||||
// are there remaining spendable outputs from the commitment tx? we just subtract all known spent outputs from the ones we control
|
||||
val commitOutputsSpendableByUs = (localCommitPublished.claimMainDelayedOutputTx.toSeq ++ localCommitPublished.htlcSuccessTxs ++ localCommitPublished.htlcTimeoutTxs)
|
||||
.flatMap(_.txIn.map(_.outPoint)).toSet -- localCommitPublished.irrevocablySpent.keys
|
||||
// which htlc delayed txes can we expect to be confirmed?
|
||||
|
@ -40,12 +40,12 @@ case class BitStream(bytes: Vector[Byte], offstart: Int, offend: Int) {
|
||||
* append bytes to a bitstream
|
||||
*
|
||||
* @param input bytes to append
|
||||
* @return an udpdate bitstream
|
||||
* @return an updated bitstream
|
||||
*/
|
||||
def writeBytes(input: Seq[Byte]): BitStream = input.foldLeft(this) { case (bs, b) => bs.writeByte(b) }
|
||||
|
||||
/**
|
||||
* append a bit to a bistream
|
||||
* append a bit to a bitstream
|
||||
*
|
||||
* @param bit bit to append
|
||||
* @return an update bitstream
|
||||
@ -63,7 +63,7 @@ case class BitStream(bytes: Vector[Byte], offstart: Int, offend: Int) {
|
||||
}
|
||||
|
||||
/**
|
||||
* append bits to a bistream
|
||||
* append bits to a bitstream
|
||||
*
|
||||
* @param input bits to append
|
||||
* @return an update bitstream
|
||||
|
@ -168,7 +168,7 @@ object Noise {
|
||||
*
|
||||
* @param cipher cipher functions
|
||||
*/
|
||||
case class UnitializedCipherState(cipher: CipherFunctions) extends CipherState {
|
||||
case class UninitializedCipherState(cipher: CipherFunctions) extends CipherState {
|
||||
override val hasKey = false
|
||||
|
||||
override def encryptWithAd(ad: BinaryData, plaintext: BinaryData): (CipherState, BinaryData) = (this, plaintext)
|
||||
@ -197,11 +197,11 @@ object Noise {
|
||||
|
||||
object CipherState {
|
||||
def apply(k: BinaryData, cipher: CipherFunctions): CipherState = k.length match {
|
||||
case 0 => UnitializedCipherState(cipher)
|
||||
case 0 => UninitializedCipherState(cipher)
|
||||
case 32 => InitializedCipherState(k, 0, cipher)
|
||||
}
|
||||
|
||||
def apply(cipher: CipherFunctions): CipherState = UnitializedCipherState(cipher)
|
||||
def apply(cipher: CipherFunctions): CipherState = UninitializedCipherState(cipher)
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -70,28 +70,28 @@ object Sphinx extends Logging {
|
||||
def blind(pub: PublicKey, blindingFactors: Seq[BinaryData]): PublicKey = blindingFactors.foldLeft(pub)(blind)
|
||||
|
||||
/**
|
||||
* computes the ephemereal public keys and shared secrets for all nodes on the route.
|
||||
* computes the ephemeral public keys and shared secrets for all nodes on the route.
|
||||
*
|
||||
* @param sessionKey this node's session key
|
||||
* @param publicKeys public keys of each node on the route
|
||||
* @return a tuple (ephemereal public keys, shared secrets)
|
||||
* @return a tuple (ephemeral public keys, shared secrets)
|
||||
*/
|
||||
def computeEphemerealPublicKeysAndSharedSecrets(sessionKey: PrivateKey, publicKeys: Seq[PublicKey]): (Seq[PublicKey], Seq[BinaryData]) = {
|
||||
val ephemerealPublicKey0 = blind(PublicKey(Crypto.curve.getG, compressed = true), sessionKey.value)
|
||||
def computeEphemeralPublicKeysAndSharedSecrets(sessionKey: PrivateKey, publicKeys: Seq[PublicKey]): (Seq[PublicKey], Seq[BinaryData]) = {
|
||||
val ephemeralPublicKey0 = blind(PublicKey(Crypto.curve.getG, compressed = true), sessionKey.value)
|
||||
val secret0 = computeSharedSecret(publicKeys(0), sessionKey)
|
||||
val blindingFactor0 = computeblindingFactor(ephemerealPublicKey0, secret0)
|
||||
computeEphemerealPublicKeysAndSharedSecrets(sessionKey, publicKeys.tail, Seq(ephemerealPublicKey0), Seq(blindingFactor0), Seq(secret0))
|
||||
val blindingFactor0 = computeblindingFactor(ephemeralPublicKey0, secret0)
|
||||
computeEphemeralPublicKeysAndSharedSecrets(sessionKey, publicKeys.tail, Seq(ephemeralPublicKey0), Seq(blindingFactor0), Seq(secret0))
|
||||
}
|
||||
|
||||
@tailrec
|
||||
def computeEphemerealPublicKeysAndSharedSecrets(sessionKey: PrivateKey, publicKeys: Seq[PublicKey], ephemerealPublicKeys: Seq[PublicKey], blindingFactors: Seq[BinaryData], sharedSecrets: Seq[BinaryData]): (Seq[PublicKey], Seq[BinaryData]) = {
|
||||
def computeEphemeralPublicKeysAndSharedSecrets(sessionKey: PrivateKey, publicKeys: Seq[PublicKey], ephemeralPublicKeys: Seq[PublicKey], blindingFactors: Seq[BinaryData], sharedSecrets: Seq[BinaryData]): (Seq[PublicKey], Seq[BinaryData]) = {
|
||||
if (publicKeys.isEmpty)
|
||||
(ephemerealPublicKeys, sharedSecrets)
|
||||
(ephemeralPublicKeys, sharedSecrets)
|
||||
else {
|
||||
val ephemerealPublicKey = blind(ephemerealPublicKeys.last, blindingFactors.last)
|
||||
val ephemeralPublicKey = blind(ephemeralPublicKeys.last, blindingFactors.last)
|
||||
val secret = computeSharedSecret(blind(publicKeys.head, blindingFactors), sessionKey)
|
||||
val blindingFactor = computeblindingFactor(ephemerealPublicKey, secret)
|
||||
computeEphemerealPublicKeysAndSharedSecrets(sessionKey, publicKeys.tail, ephemerealPublicKeys :+ ephemerealPublicKey, blindingFactors :+ blindingFactor, sharedSecrets :+ secret)
|
||||
val blindingFactor = computeblindingFactor(ephemeralPublicKey, secret)
|
||||
computeEphemeralPublicKeysAndSharedSecrets(sessionKey, publicKeys.tail, ephemeralPublicKeys :+ ephemeralPublicKey, blindingFactors :+ blindingFactor, sharedSecrets :+ secret)
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,11 +176,11 @@ object Sphinx extends Logging {
|
||||
val bin = xor(packet.routingInfo ++ zeroes(PayloadLength + MacLength), generateStream(rho, PayloadLength + MacLength + MaxHops * (PayloadLength + MacLength)))
|
||||
val payload = bin.take(PayloadLength)
|
||||
val hmac = bin.slice(PayloadLength, PayloadLength + MacLength)
|
||||
val nextRoutinfo = bin.drop(PayloadLength + MacLength)
|
||||
val nextRouteInfo = bin.drop(PayloadLength + MacLength)
|
||||
|
||||
val nextPubKey = blind(PublicKey(packet.publicKey), computeblindingFactor(PublicKey(packet.publicKey), sharedSecret))
|
||||
|
||||
ParsedPacket(payload, Packet(Version, nextPubKey, hmac, nextRoutinfo), sharedSecret)
|
||||
ParsedPacket(payload, Packet(Version, nextPubKey, hmac, nextRouteInfo), sharedSecret)
|
||||
}
|
||||
|
||||
@tailrec
|
||||
@ -201,13 +201,13 @@ object Sphinx extends Logging {
|
||||
*
|
||||
* @param payload payload for this packed
|
||||
* @param associatedData associated data
|
||||
* @param ephemerealPublicKey ephemereal key for this packed
|
||||
* @param ephemeralPublicKey ephemeral key for this packed
|
||||
* @param sharedSecret shared secret
|
||||
* @param packet current packet (1 + all zeroes if this is the last packet)
|
||||
* @param routingInfoFiller optional routing info filler, needed only when you're constructing the last packet
|
||||
* @return the next packet
|
||||
*/
|
||||
private def makeNextPacket(payload: BinaryData, associatedData: BinaryData, ephemerealPublicKey: BinaryData, sharedSecret: BinaryData, packet: Packet, routingInfoFiller: BinaryData = BinaryData.empty): Packet = {
|
||||
private def makeNextPacket(payload: BinaryData, associatedData: BinaryData, ephemeralPublicKey: BinaryData, sharedSecret: BinaryData, packet: Packet, routingInfoFiller: BinaryData = BinaryData.empty): Packet = {
|
||||
require(payload.length == PayloadLength)
|
||||
|
||||
val nextRoutingInfo = {
|
||||
@ -217,7 +217,7 @@ object Sphinx extends Logging {
|
||||
}
|
||||
|
||||
val nextHmac: BinaryData = mac(generateKey("mu", sharedSecret), nextRoutingInfo ++ associatedData)
|
||||
val nextPacket = Packet(Version, ephemerealPublicKey, nextHmac, nextRoutingInfo)
|
||||
val nextPacket = Packet(Version, ephemeralPublicKey, nextHmac, nextRoutingInfo)
|
||||
nextPacket
|
||||
}
|
||||
|
||||
@ -249,10 +249,10 @@ object Sphinx extends Logging {
|
||||
* shared secrets (one per node) can be used to parse returned error messages if needed
|
||||
*/
|
||||
def makePacket(sessionKey: PrivateKey, publicKeys: Seq[PublicKey], payloads: Seq[BinaryData], associatedData: BinaryData): PacketAndSecrets = {
|
||||
val (ephemerealPublicKeys, sharedsecrets) = computeEphemerealPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
val (ephemeralPublicKeys, sharedsecrets) = computeEphemeralPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
val filler = generateFiller("rho", sharedsecrets.dropRight(1), PayloadLength + MacLength, MaxHops)
|
||||
|
||||
val lastPacket = makeNextPacket(payloads.last, associatedData, ephemerealPublicKeys.last, sharedsecrets.last, LAST_PACKET, filler)
|
||||
val lastPacket = makeNextPacket(payloads.last, associatedData, ephemeralPublicKeys.last, sharedsecrets.last, LAST_PACKET, filler)
|
||||
|
||||
@tailrec
|
||||
def loop(hoppayloads: Seq[BinaryData], ephkeys: Seq[PublicKey], sharedSecrets: Seq[BinaryData], packet: Packet): Packet = {
|
||||
@ -262,7 +262,7 @@ object Sphinx extends Logging {
|
||||
}
|
||||
}
|
||||
|
||||
val packet = loop(payloads.dropRight(1), ephemerealPublicKeys.dropRight(1), sharedsecrets.dropRight(1), lastPacket)
|
||||
val packet = loop(payloads.dropRight(1), ephemeralPublicKeys.dropRight(1), sharedsecrets.dropRight(1), lastPacket)
|
||||
PacketAndSecrets(packet, sharedsecrets.zip(publicKeys))
|
||||
}
|
||||
|
||||
|
@ -108,24 +108,24 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
||||
stay using currentStateData.copy(buffer = buffer ++ data)
|
||||
|
||||
case Event(Listener(listener), WaitingForListenerData(enc, dec, buffer)) =>
|
||||
val (nextStateData, plaintextMessages) = WaitingForCyphertextData(enc, dec, None, buffer, listener).decrypt
|
||||
val (nextStateData, plaintextMessages) = WaitingForCiphertextData(enc, dec, None, buffer, listener).decrypt
|
||||
context.watch(listener)
|
||||
sendToListener(listener, plaintextMessages)
|
||||
goto(WaitingForCyphertext) using nextStateData
|
||||
goto(WaitingForCiphertext) using nextStateData
|
||||
|
||||
}
|
||||
|
||||
when(WaitingForCyphertext) {
|
||||
case Event(Tcp.Received(data), currentStateData@WaitingForCyphertextData(enc, dec, length, buffer, listener)) =>
|
||||
val (nextStateData, plaintextMessages) = WaitingForCyphertextData.decrypt(currentStateData.copy(buffer = buffer ++ data))
|
||||
when(WaitingForCiphertext) {
|
||||
case Event(Tcp.Received(data), currentStateData@WaitingForCiphertextData(enc, dec, length, buffer, listener)) =>
|
||||
val (nextStateData, plaintextMessages) = WaitingForCiphertextData.decrypt(currentStateData.copy(buffer = buffer ++ data))
|
||||
sendToListener(listener, plaintextMessages)
|
||||
stay using nextStateData
|
||||
|
||||
case Event(t: T, WaitingForCyphertextData(enc, dec, length, buffer, listener)) =>
|
||||
case Event(t: T, WaitingForCiphertextData(enc, dec, length, buffer, listener)) =>
|
||||
val blob = codec.encode(t).require.toByteArray
|
||||
val (enc1, ciphertext) = TransportHandler.encrypt(enc, blob)
|
||||
out ! buf(ciphertext)
|
||||
stay using WaitingForCyphertextData(enc1, dec, length, buffer, listener)
|
||||
stay using WaitingForCiphertextData(enc1, dec, length, buffer, listener)
|
||||
}
|
||||
|
||||
whenUnhandled {
|
||||
@ -208,7 +208,7 @@ object TransportHandler {
|
||||
sealed trait State
|
||||
case object Handshake extends State
|
||||
case object WaitingForListener extends State
|
||||
case object WaitingForCyphertext extends State
|
||||
case object WaitingForCiphertext extends State
|
||||
// @formatter:on
|
||||
|
||||
case class Listener(listener: ActorRef)
|
||||
@ -232,7 +232,7 @@ object TransportHandler {
|
||||
|
||||
override def encryptWithAd(ad: BinaryData, plaintext: BinaryData): (CipherState, BinaryData) = {
|
||||
cs match {
|
||||
case UnitializedCipherState(_) => (this, plaintext)
|
||||
case UninitializedCipherState(_) => (this, plaintext)
|
||||
case InitializedCipherState(k, n, _) if n == 999 => {
|
||||
val (_, ciphertext) = cs.encryptWithAd(ad, plaintext)
|
||||
val (ck1, k1) = SHA256HashFunctions.hkdf(ck, k)
|
||||
@ -247,7 +247,7 @@ object TransportHandler {
|
||||
|
||||
override def decryptWithAd(ad: BinaryData, ciphertext: BinaryData): (CipherState, BinaryData) = {
|
||||
cs match {
|
||||
case UnitializedCipherState(_) => (this, ciphertext)
|
||||
case UninitializedCipherState(_) => (this, ciphertext)
|
||||
case InitializedCipherState(k, n, _) if n == 999 => {
|
||||
val (_, plaintext) = cs.decryptWithAd(ad, ciphertext)
|
||||
val (ck1, k1) = SHA256HashFunctions.hkdf(ck, k)
|
||||
@ -263,13 +263,13 @@ object TransportHandler {
|
||||
|
||||
case class WaitingForListenerData(enc: CipherState, dec: CipherState, buffer: ByteString) extends Data
|
||||
|
||||
case class WaitingForCyphertextData(enc: CipherState, dec: CipherState, ciphertextLength: Option[Int], buffer: ByteString, listener: ActorRef) extends Data {
|
||||
def decrypt: (WaitingForCyphertextData, Seq[BinaryData]) = WaitingForCyphertextData.decrypt(this)
|
||||
case class WaitingForCiphertextData(enc: CipherState, dec: CipherState, ciphertextLength: Option[Int], buffer: ByteString, listener: ActorRef) extends Data {
|
||||
def decrypt: (WaitingForCiphertextData, Seq[BinaryData]) = WaitingForCiphertextData.decrypt(this)
|
||||
}
|
||||
|
||||
object WaitingForCyphertextData {
|
||||
object WaitingForCiphertextData {
|
||||
@tailrec
|
||||
def decrypt(state: WaitingForCyphertextData, acc: Seq[BinaryData] = Nil): (WaitingForCyphertextData, Seq[BinaryData]) = {
|
||||
def decrypt(state: WaitingForCiphertextData, acc: Seq[BinaryData] = Nil): (WaitingForCiphertextData, Seq[BinaryData]) = {
|
||||
(state.ciphertextLength, state.buffer.length) match {
|
||||
case (None, length) if length < 18 => (state, acc)
|
||||
case (None, _) =>
|
||||
|
@ -365,7 +365,7 @@ object PaymentRequest {
|
||||
object Signature {
|
||||
/**
|
||||
*
|
||||
* @param signature 65-bytes signatyre: r (32 bytes) | s (32 bytes) | recid (1 bytes)
|
||||
* @param signature 65-bytes signature: r (32 bytes) | s (32 bytes) | recid (1 bytes)
|
||||
* @return a (r, s, recoveryId)
|
||||
*/
|
||||
def decode(signature: BinaryData): (BigInteger, BigInteger, Byte) = {
|
||||
@ -392,7 +392,7 @@ object PaymentRequest {
|
||||
*
|
||||
* @param stream stream to write to
|
||||
* @param value a 5bits value
|
||||
* @return an upated stream
|
||||
* @return an updated stream
|
||||
*/
|
||||
def write5(stream: BitStream, value: Int5): BitStream = stream.writeBits(toBits(value))
|
||||
|
||||
|
@ -159,7 +159,7 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
|
||||
|
||||
// in case we just validated our first local channel, we announce the local node
|
||||
// note that this will also make sure we always update our node announcement on restart (eg: alias, color), because
|
||||
// even if we had stored a previous announcement, it would be overriden by this more recent one
|
||||
// even if we had stored a previous announcement, it would be overridden by this more recent one
|
||||
if (!d.nodes.contains(nodeParams.nodeId) && validated.exists(isRelatedTo(_, nodeParams.nodeId))) {
|
||||
log.info(s"first local channel validated, announcing local node")
|
||||
val nodeAnn = Announcements.makeNodeAnnouncement(nodeParams.privateKey, nodeParams.alias, nodeParams.color, nodeParams.publicAddresses)
|
||||
|
@ -46,7 +46,7 @@ object TestConstants {
|
||||
smartfeeNBlocks = 3,
|
||||
feeBaseMsat = 546000,
|
||||
feeProportionalMillionth = 10,
|
||||
reserveToFundingRatio = 0.01, // note: not used (overriden below)
|
||||
reserveToFundingRatio = 0.01, // note: not used (overridden below)
|
||||
maxReserveToFundingRatio = 0.05,
|
||||
channelsDb = new SqliteChannelsDb(sqlite),
|
||||
peersDb = new SqlitePeersDb(sqlite),
|
||||
@ -102,7 +102,7 @@ object TestConstants {
|
||||
smartfeeNBlocks = 3,
|
||||
feeBaseMsat = 546000,
|
||||
feeProportionalMillionth = 10,
|
||||
reserveToFundingRatio = 0.01, // note: not used (overriden below)
|
||||
reserveToFundingRatio = 0.01, // note: not used (overridden below)
|
||||
maxReserveToFundingRatio = 0.05,
|
||||
channelsDb = new SqliteChannelsDb(sqlite),
|
||||
peersDb = new SqlitePeersDb(sqlite),
|
||||
|
@ -1630,7 +1630,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
|
||||
within(30 seconds) {
|
||||
val sender = TestProbe()
|
||||
|
||||
// initally we have :
|
||||
// initially we have :
|
||||
// alice = 800 000
|
||||
// bob = 200 000
|
||||
def send(): Transaction = {
|
||||
|
@ -17,7 +17,7 @@ object NoiseDemo extends App {
|
||||
|
||||
def receive = ???
|
||||
|
||||
val handskaheState = if (isWriter) {
|
||||
val handshakeState = if (isWriter) {
|
||||
val state = Noise.HandshakeState.initializeWriter(
|
||||
Noise.handshakePatternXK,
|
||||
"lightning".getBytes(),
|
||||
@ -35,7 +35,7 @@ object NoiseDemo extends App {
|
||||
state
|
||||
}
|
||||
|
||||
context become handshake(handskaheState)
|
||||
context become handshake(handshakeState)
|
||||
|
||||
def toNormal(enc: CipherState, dec: CipherState) = {
|
||||
unstashAll()
|
||||
|
@ -39,8 +39,8 @@ class SphinxSpec extends FunSuite {
|
||||
hop_blinding_factor[4] = 0xc96e00dddaf57e7edcd4fb5954be5b65b09f17cb6d20651b4e90315be5779205
|
||||
hop_ephemeral_pubkey[4] = 0x03a214ebd875aab6ddfd77f22c5e7311d7f77f17a169e599f157bbcdae8bf071f4
|
||||
*/
|
||||
test("generate ephemereal keys and secrets") {
|
||||
val (ephkeys, sharedsecrets) = computeEphemerealPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
test("generate ephemeral keys and secrets") {
|
||||
val (ephkeys, sharedsecrets) = computeEphemeralPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
assert(ephkeys(0) == PublicKey(BinaryData("0x02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619")))
|
||||
assert(sharedsecrets(0) == BinaryData("0x53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66"))
|
||||
assert(ephkeys(1) == PublicKey(BinaryData("0x028f9438bfbf7feac2e108d677e3a82da596be706cc1cf342b75c7b7e22bf4e6e2")))
|
||||
@ -57,7 +57,7 @@ class SphinxSpec extends FunSuite {
|
||||
filler = 0xc6b008cf6414ed6e4c42c291eb505e9f22f5fe7d0ecdd15a833f4d016ac974d33adc6ea3293e20859e87ebfb937ba406abd025d14af692b12e9c9c2adbe307a679779259676211c071e614fdb386d1ff02db223a5b2fae03df68d321c7b29f7c7240edd3fa1b7cb6903f89dc01abf41b2eb0b49b6b8d73bb0774b58204c0d0e96d3cce45ad75406be0bc009e327b3e712a4bd178609c00b41da2daf8a4b0e1319f07a492ab4efb056f0f599f75e6dc7e0d10ce1cf59088ab6e873de377343880f7a24f0e36731a0b72092f8d5bc8cd346762e93b2bf203d00264e4bc136fc142de8f7b69154deb05854ea88e2d7506222c95ba1aab065c8a851391377d3406a35a9af3ac
|
||||
*/
|
||||
test("generate filler") {
|
||||
val (_, sharedsecrets) = computeEphemerealPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
val (_, sharedsecrets) = computeEphemeralPublicKeysAndSharedSecrets(sessionKey, publicKeys)
|
||||
val filler = generateFiller("rho", sharedsecrets.dropRight(1), PayloadLength + MacLength, 20)
|
||||
assert(filler == BinaryData("0xc6b008cf6414ed6e4c42c291eb505e9f22f5fe7d0ecdd15a833f4d016ac974d33adc6ea3293e20859e87ebfb937ba406abd025d14af692b12e9c9c2adbe307a679779259676211c071e614fdb386d1ff02db223a5b2fae03df68d321c7b29f7c7240edd3fa1b7cb6903f89dc01abf41b2eb0b49b6b8d73bb0774b58204c0d0e96d3cce45ad75406be0bc009e327b3e712a4bd178609c00b41da2daf8a4b0e1319f07a492ab4efb056f0f599f75e6dc7e0d10ce1cf59088ab6e873de377343880f7a24f0e36731a0b72092f8d5bc8cd346762e93b2bf203d00264e4bc136fc142de8f7b69154deb05854ea88e2d7506222c95ba1aab065c8a851391377d3406a35a9af3ac"))
|
||||
}
|
||||
|
@ -45,8 +45,8 @@ class TransportHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLik
|
||||
initiator ! Listener(probe1.ref)
|
||||
responder ! Listener(probe2.ref)
|
||||
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCiphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCiphertext)
|
||||
|
||||
initiator.tell(BinaryData("hello".getBytes), probe1.ref)
|
||||
probe2.expectMsg(BinaryData("hello".getBytes))
|
||||
@ -77,8 +77,8 @@ class TransportHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLik
|
||||
initiator ! Listener(probe1.ref)
|
||||
responder ! Listener(probe2.ref)
|
||||
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCiphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCiphertext)
|
||||
|
||||
initiator.tell(MyMessage("hello"), probe1.ref)
|
||||
probe2.expectMsg(MyMessage("hello"))
|
||||
@ -107,8 +107,8 @@ class TransportHandlerSpec extends TestKit(ActorSystem("test")) with FunSuiteLik
|
||||
initiator ! Listener(probe1.ref)
|
||||
responder ! Listener(probe2.ref)
|
||||
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCyphertext)
|
||||
awaitCond(initiator.stateName == TransportHandler.WaitingForCiphertext)
|
||||
awaitCond(responder.stateName == TransportHandler.WaitingForCiphertext)
|
||||
|
||||
initiator.tell(BinaryData("hello".getBytes), probe1.ref)
|
||||
probe2.expectMsg(BinaryData("hello".getBytes))
|
||||
|
@ -36,7 +36,7 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
|
||||
sender.expectMsg(PaymentFailed(request.paymentHash, LocalFailure(RouteNotFound) :: Nil))
|
||||
}
|
||||
|
||||
test("payment failed (unparseable failure)") { case (router, _) =>
|
||||
test("payment failed (unparsable failure)") { case (router, _) =>
|
||||
val relayer = TestProbe()
|
||||
val routerForwarder = TestProbe()
|
||||
val paymentFSM = TestFSMRef(new PaymentLifecycle(a, routerForwarder.ref, relayer.ref))
|
||||
@ -56,7 +56,7 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
|
||||
val WaitingForComplete(_, _, cmd1, Nil, _, _, _, hops) = paymentFSM.stateData
|
||||
|
||||
relayer.expectMsg(ForwardShortId(channelId_ab, cmd1))
|
||||
sender.send(paymentFSM, UpdateFailHtlc("00" * 32, 0, "42" * 32)) // unparseable message
|
||||
sender.send(paymentFSM, UpdateFailHtlc("00" * 32, 0, "42" * 32)) // unparsable message
|
||||
|
||||
// then the payment lifecycle will ask for a new route excluding all intermediate nodes
|
||||
routerForwarder.expectMsg(RouteRequest(a, d, ignoreNodes = Set(c), ignoreChannels = Set.empty))
|
||||
@ -65,9 +65,9 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
|
||||
sender.send(paymentFSM, RouteResponse(hops, Set(c), Set.empty))
|
||||
awaitCond(paymentFSM.stateName == WAITING_FOR_PAYMENT_COMPLETE)
|
||||
val WaitingForComplete(_, _, cmd2, _, _, _, _, _) = paymentFSM.stateData
|
||||
// and reply a 2nd time with an unparseable failure
|
||||
// and reply a 2nd time with an unparsable failure
|
||||
relayer.expectMsg(ForwardShortId(channelId_ab, cmd2))
|
||||
sender.send(paymentFSM, UpdateFailHtlc("00" * 32, 0, "42" * 32)) // unparseable message
|
||||
sender.send(paymentFSM, UpdateFailHtlc("00" * 32, 0, "42" * 32)) // unparsable message
|
||||
|
||||
// we allow 2 tries, so we send a 2nd request to the router
|
||||
sender.expectMsg(PaymentFailed(request.paymentHash, UnreadableRemoteFailure(hops) :: UnreadableRemoteFailure(hops) :: Nil))
|
||||
|
@ -33,7 +33,7 @@ object GUIValidators {
|
||||
/**
|
||||
* Displays a label with an error message.
|
||||
*
|
||||
* @param errorLabel JFX label containing an error messsage
|
||||
* @param errorLabel JFX label containing an error message
|
||||
* @param validCondition if true the label is hidden, else it is shown
|
||||
* @return true if field is valid, false otherwise
|
||||
*/
|
||||
|
@ -11,7 +11,7 @@ cd libbase58
|
||||
cd
|
||||
git clone https://github.com/ElementsProject/lightning.git
|
||||
cd lightning
|
||||
git checkkout fce9ee29e3c37b4291ebb050e6a687cfaa7df95a
|
||||
git checkout fce9ee29e3c37b4291ebb050e6a687cfaa7df95a
|
||||
git submodule init
|
||||
git submodule update
|
||||
make
|
||||
|
Loading…
Reference in New Issue
Block a user