1
0
mirror of https://github.com/ACINQ/eclair.git synced 2024-11-20 02:27:32 +01:00

Improve AddHtlcFailed (#1349)

* Refactor timed out HTLC helpers: directly take a DATA_CLOSING
and extract the relevant parts.

* ClosingStateSpec: test dust HTLCs

* Improve ClosingStateSpec

* Clean up usage of AddHtlcFailed

We were abusing AddHtlcFailed in some cases where an outgoing HTLC
was correctly added, but was later settled on-chain (fulfilled, timed
 out or overridden by a different commit transaction).

These cases are now specifically handled with new Relayer.ForwardMessage
dedicated to on-chain settling.

* Refactor Relayer's ForwardMessages

ForwardFail and ForwardFulfill are now traits.
Handle both on-chain and remote fail/fulfills.
This commit is contained in:
Bastien Teinturier 2020-03-20 12:11:33 +01:00 committed by GitHub
parent 68874c2d6d
commit eae113f098
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 418 additions and 338 deletions

View File

@ -646,7 +646,7 @@ class Channel(val nodeParams: NodeParams, val wallet: EclairWallet, remoteNodeId
Commitments.receiveFulfill(d.commitments, fulfill) match {
case Success((commitments1, origin, htlc)) =>
// we forward preimages as soon as possible to the upstream channel because it allows us to pull funds
relayer ! Relayer.ForwardFulfill(fulfill, origin, htlc)
relayer ! Relayer.ForwardRemoteFulfill(fulfill, origin, htlc)
stay using d.copy(commitments = commitments1)
case Failure(cause) => handleLocalError(cause, d, Some(fulfill))
}
@ -990,7 +990,7 @@ class Channel(val nodeParams: NodeParams, val wallet: EclairWallet, remoteNodeId
Commitments.receiveFulfill(d.commitments, fulfill) match {
case Success((commitments1, origin, htlc)) =>
// we forward preimages as soon as possible to the upstream channel because it allows us to pull funds
relayer ! Relayer.ForwardFulfill(fulfill, origin, htlc)
relayer ! Relayer.ForwardRemoteFulfill(fulfill, origin, htlc)
stay using d.copy(commitments = commitments1)
case Failure(cause) => handleLocalError(cause, d, Some(fulfill))
}
@ -1260,15 +1260,15 @@ class Channel(val nodeParams: NodeParams, val wallet: EclairWallet, remoteNodeId
// we can then use these preimages to fulfill origin htlcs
log.info(s"processing BITCOIN_OUTPUT_SPENT with txid=${tx.txid} tx=$tx")
val extracted = Closing.extractPreimages(d.commitments.localCommit, tx)
extracted foreach { case (htlc, fulfill) =>
d.commitments.originChannels.get(fulfill.id) match {
extracted foreach { case (htlc, preimage) =>
d.commitments.originChannels.get(htlc.id) match {
case Some(origin) =>
log.info(s"fulfilling htlc #${fulfill.id} paymentHash=${sha256(fulfill.paymentPreimage)} origin=$origin")
relayer ! Relayer.ForwardFulfill(fulfill, origin, htlc)
log.info(s"fulfilling htlc #${htlc.id} paymentHash=${htlc.paymentHash} origin=$origin")
relayer ! Relayer.ForwardOnChainFulfill(preimage, origin, htlc)
case None =>
// if we don't have the origin, it means that we already have forwarded the fulfill so that's not a big deal.
// this can happen if they send a signature containing the fulfill, then fail the channel before we have time to sign it
log.info(s"cannot fulfill htlc #${fulfill.id} paymentHash=${sha256(fulfill.paymentPreimage)} (origin not found)")
log.info(s"cannot fulfill htlc #${htlc.id} paymentHash=${htlc.paymentHash} (origin not found)")
}
}
val revokedCommitPublished1 = d.revokedCommitPublished.map { rev =>
@ -1292,27 +1292,22 @@ class Channel(val nodeParams: NodeParams, val wallet: EclairWallet, remoteNodeId
context.system.eventStream.publish(LocalCommitConfirmed(self, remoteNodeId, d.channelId, blockHeight + d.commitments.remoteParams.toSelfDelay.toInt))
}
// we may need to fail some htlcs in case a commitment tx was published and they have reached the timeout threshold
val timedoutHtlcs =
Closing.timedoutHtlcs(d.commitments.localCommit, d.commitments.localParams.dustLimit, tx) ++
Closing.timedoutHtlcs(d.commitments.remoteCommit, d.commitments.remoteParams.dustLimit, tx) ++
d.commitments.remoteNextCommitInfo.left.toSeq.flatMap(r => Closing.timedoutHtlcs(r.nextRemoteCommit, d.commitments.remoteParams.dustLimit, tx))
timedoutHtlcs.foreach { add =>
Closing.timedoutHtlcs(d, tx).foreach { add =>
d.commitments.originChannels.get(add.id) match {
case Some(origin) =>
log.info(s"failing htlc #${add.id} paymentHash=${add.paymentHash} origin=$origin: htlc timed out")
relayer ! Status.Failure(AddHtlcFailed(d.channelId, add.paymentHash, HtlcTimedout(d.channelId, Set(add)), origin, None, None))
relayer ! Relayer.ForwardOnChainFail(HtlcsTimedoutDownstream(d.channelId, Set(add)), origin, add)
case None =>
// same as for fulfilling the htlc (no big deal)
log.info(s"cannot fail timedout htlc #${add.id} paymentHash=${add.paymentHash} (origin not found)")
}
}
// we also need to fail outgoing htlcs that we know will never reach the blockchain
val overriddenHtlcs = Closing.overriddenOutgoingHtlcs(d.commitments.localCommit, d.commitments.remoteCommit, d.commitments.remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit), tx)
overriddenHtlcs.foreach { add =>
Closing.overriddenOutgoingHtlcs(d, tx).foreach { add =>
d.commitments.originChannels.get(add.id) match {
case Some(origin) =>
log.info(s"failing htlc #${add.id} paymentHash=${add.paymentHash} origin=$origin: overridden by local commit")
relayer ! Status.Failure(AddHtlcFailed(d.channelId, add.paymentHash, HtlcOverridenByLocalCommit(d.channelId), origin, None, None))
relayer ! Relayer.ForwardOnChainFail(HtlcOverriddenByLocalCommit(d.channelId, add), origin, add)
case None =>
// same as for fulfilling the htlc (no big deal)
log.info(s"cannot fail overridden htlc #${add.id} paymentHash=${add.paymentHash} (origin not found)")
@ -1884,20 +1879,20 @@ class Channel(val nodeParams: NodeParams, val wallet: EclairWallet, remoteNodeId
val almostTimedOutIncoming = d.commitments.almostTimedOutIncomingHtlcs(c.blockCount, nodeParams.fulfillSafetyBeforeTimeoutBlocks)
if (timedOutOutgoing.nonEmpty) {
// Downstream timed out.
handleLocalError(HtlcTimedout(d.channelId, timedOutOutgoing), d, Some(c))
handleLocalError(HtlcsTimedoutDownstream(d.channelId, timedOutOutgoing), d, Some(c))
} else if (almostTimedOutIncoming.nonEmpty) {
// Upstream is close to timing out.
val relayedFulfills = d.commitments.localChanges.all.collect { case u: UpdateFulfillHtlc => u.id }.toSet
val offendingRelayedHtlcs = almostTimedOutIncoming.filter(htlc => relayedFulfills.contains(htlc.id))
if (offendingRelayedHtlcs.nonEmpty) {
handleLocalError(HtlcWillTimeoutUpstream(d.channelId, offendingRelayedHtlcs), d, Some(c))
handleLocalError(HtlcsWillTimeoutUpstream(d.channelId, offendingRelayedHtlcs), d, Some(c))
} else {
// There might be pending fulfill commands that we haven't relayed yet.
// Since this involves a DB call, we only want to check it if all the previous checks failed (this is the slow path).
val pendingRelayFulfills = nodeParams.db.pendingRelay.listPendingRelay(d.channelId).collect { case CMD_FULFILL_HTLC(id, r, _) => id }
val offendingPendingRelayFulfills = almostTimedOutIncoming.filter(htlc => pendingRelayFulfills.contains(htlc.id))
if (offendingPendingRelayFulfills.nonEmpty) {
handleLocalError(HtlcWillTimeoutUpstream(d.channelId, offendingPendingRelayFulfills), d, Some(c))
handleLocalError(HtlcsWillTimeoutUpstream(d.channelId, offendingPendingRelayFulfills), d, Some(c))
} else {
stay
}

View File

@ -55,9 +55,9 @@ case class ChannelUnavailable (override val channelId: ByteVect
case class InvalidFinalScript (override val channelId: ByteVector32) extends ChannelException(channelId, "invalid final script")
case class FundingTxTimedout (override val channelId: ByteVector32) extends ChannelException(channelId, "funding tx timed out")
case class FundingTxSpent (override val channelId: ByteVector32, spendingTx: Transaction) extends ChannelException(channelId, s"funding tx has been spent by txid=${spendingTx.txid}")
case class HtlcTimedout (override val channelId: ByteVector32, htlcs: Set[UpdateAddHtlc]) extends ChannelException(channelId, s"one or more htlcs timed out: ids=${htlcs.take(10).map(_.id).mkString(",")}") // we only display the first 10 ids
case class HtlcWillTimeoutUpstream (override val channelId: ByteVector32, htlcs: Set[UpdateAddHtlc]) extends ChannelException(channelId, s"one or more htlcs that should be fulfilled are close to timing out upstream: ids=${htlcs.take(10).map(_.id).mkString}") // we only display the first 10 ids
case class HtlcOverridenByLocalCommit (override val channelId: ByteVector32) extends ChannelException(channelId, "htlc was overriden by local commit")
case class HtlcsTimedoutDownstream (override val channelId: ByteVector32, htlcs: Set[UpdateAddHtlc]) extends ChannelException(channelId, s"one or more htlcs timed out downstream: ids=${htlcs.take(10).map(_.id).mkString(",")}") // we only display the first 10 ids
case class HtlcsWillTimeoutUpstream (override val channelId: ByteVector32, htlcs: Set[UpdateAddHtlc]) extends ChannelException(channelId, s"one or more htlcs that should be fulfilled are close to timing out upstream: ids=${htlcs.take(10).map(_.id).mkString}") // we only display the first 10 ids
case class HtlcOverriddenByLocalCommit (override val channelId: ByteVector32, htlc: UpdateAddHtlc) extends ChannelException(channelId, s"htlc ${htlc.id} was overridden by local commit")
case class FeerateTooSmall (override val channelId: ByteVector32, remoteFeeratePerKw: Long) extends ChannelException(channelId, s"remote fee rate is too small: remoteFeeratePerKw=$remoteFeeratePerKw")
case class FeerateTooDifferent (override val channelId: ByteVector32, localFeeratePerKw: Long, remoteFeeratePerKw: Long) extends ChannelException(channelId, s"local/remote feerates are too different: remoteFeeratePerKw=$remoteFeeratePerKw localFeeratePerKw=$localFeeratePerKw")
case class InvalidCommitmentSignature (override val channelId: ByteVector32, tx: Transaction) extends ChannelException(channelId, s"invalid commitment signature: tx=$tx")

View File

@ -544,12 +544,12 @@ object Commitments {
case fail: UpdateFailHtlc =>
val origin = commitments.originChannels(fail.id)
val add = commitments.remoteCommit.spec.findHtlcById(fail.id, IN).map(_.add).get
Relayer.ForwardFail(fail, origin, add)
Relayer.ForwardRemoteFail(fail, origin, add)
// same as above
case fail: UpdateFailMalformedHtlc =>
val origin = commitments.originChannels(fail.id)
val add = commitments.remoteCommit.spec.findHtlcById(fail.id, IN).map(_.add).get
Relayer.ForwardFailMalformed(fail, origin, add)
Relayer.ForwardRemoteFailMalformed(fail, origin, add)
}
// the outgoing following htlcs have been completed (fulfilled or failed) when we received this revocation
// they have been removed from both local and remote commitment

View File

@ -20,8 +20,7 @@ import akka.event.{DiagnosticLoggingAdapter, LoggingAdapter}
import fr.acinq.bitcoin.Crypto.{PrivateKey, PublicKey, ripemd160, sha256}
import fr.acinq.bitcoin.Script._
import fr.acinq.bitcoin._
import fr.acinq.eclair.Features.Wumbo
import fr.acinq.eclair.Features.hasFeature
import fr.acinq.eclair.Features.{Wumbo, hasFeature}
import fr.acinq.eclair.blockchain.EclairWallet
import fr.acinq.eclair.blockchain.fee.{FeeEstimator, FeeTargets}
import fr.acinq.eclair.channel.Channel.REFRESH_CHANNEL_UPDATE_INTERVAL
@ -83,13 +82,13 @@ object Helpers {
}
/**
* Returns the number of confirmations needed to safely handle the funding transaction,
* we make sure the cumulative block reward largely exceeds the channel size.
*
* @param nodeParams
* @param fundingSatoshis funding amount of the channel
* @return number of confirmations needed
*/
* Returns the number of confirmations needed to safely handle the funding transaction,
* we make sure the cumulative block reward largely exceeds the channel size.
*
* @param nodeParams
* @param fundingSatoshis funding amount of the channel
* @return number of confirmations needed
*/
def minDepthForFunding(nodeParams: NodeParams, fundingSatoshis: Satoshi): Long = fundingSatoshis match {
case funding if funding <= Channel.MAX_FUNDING => nodeParams.minDepthBlocks
case funding if funding > Channel.MAX_FUNDING =>
@ -107,7 +106,7 @@ object Helpers {
// MUST reject the channel.
if (nodeParams.chainHash != open.chainHash) throw InvalidChainHash(open.temporaryChannelId, local = nodeParams.chainHash, remote = open.chainHash)
if(open.fundingSatoshis < nodeParams.minFundingSatoshis || open.fundingSatoshis > nodeParams.maxFundingSatoshis) throw InvalidFundingAmount(open.temporaryChannelId, open.fundingSatoshis, nodeParams.minFundingSatoshis, nodeParams.maxFundingSatoshis)
if (open.fundingSatoshis < nodeParams.minFundingSatoshis || open.fundingSatoshis > nodeParams.maxFundingSatoshis) throw InvalidFundingAmount(open.temporaryChannelId, open.fundingSatoshis, nodeParams.minFundingSatoshis, nodeParams.maxFundingSatoshis)
// BOLT #2: Channel funding limits
if (open.fundingSatoshis >= Channel.MAX_FUNDING && !hasFeature(nodeParams.features, Wumbo)) throw InvalidFundingAmount(open.temporaryChannelId, open.fundingSatoshis, nodeParams.minFundingSatoshis, Channel.MAX_FUNDING)
@ -602,7 +601,6 @@ object Helpers {
val channelKeyPath = keyManager.channelKeyPath(localParams, channelVersion)
val localHtlcPubkey = Generators.derivePubKey(keyManager.htlcPoint(channelKeyPath).publicKey, remoteCommit.remotePerCommitmentPoint)
val remoteHtlcPubkey = Generators.derivePubKey(remoteParams.htlcBasepoint, remoteCommit.remotePerCommitmentPoint)
val localPerCommitmentPoint = keyManager.commitmentPoint(channelKeyPath, commitments.localCommit.index.toInt)
val remoteRevocationPubkey = Generators.revocationPubKey(keyManager.revocationPoint(channelKeyPath).publicKey, remoteCommit.remotePerCommitmentPoint)
// we need to use a rather high fee for htlc-claim because we compete with the counterparty
@ -819,18 +817,16 @@ object Helpers {
* Not doing that would result in us losing money, because the downstream node would pull money from one side, and
* the upstream node would get refunded after a timeout.
*
* @param localCommit
* @param tx
* @return a set of pairs (add, fulfills) if extraction was successful:
* @return a set of pairs (add, preimage) if extraction was successful:
* - add is the htlc in the downstream channel from which we extracted the preimage
* - fulfill needs to be sent to the upstream channel
* - preimage needs to be sent to the upstream channel
*/
def extractPreimages(localCommit: LocalCommit, tx: Transaction)(implicit log: LoggingAdapter): Set[(UpdateAddHtlc, UpdateFulfillHtlc)] = {
def extractPreimages(localCommit: LocalCommit, tx: Transaction)(implicit log: LoggingAdapter): Set[(UpdateAddHtlc, ByteVector32)] = {
val paymentPreimages = tx.txIn.map(_.witness match {
case ScriptWitness(Seq(localSig, paymentPreimage, htlcOfferedScript)) if paymentPreimage.size == 32 =>
case ScriptWitness(Seq(_, paymentPreimage, _)) if paymentPreimage.size == 32 =>
log.info(s"extracted paymentPreimage=$paymentPreimage from tx=$tx (claim-htlc-success)")
Some(ByteVector32(paymentPreimage))
case ScriptWitness(Seq(ByteVector.empty, remoteSig, localSig, paymentPreimage, htlcReceivedScript)) if paymentPreimage.size == 32 =>
case ScriptWitness(Seq(ByteVector.empty, _, _, paymentPreimage, _)) if paymentPreimage.size == 32 =>
log.info(s"extracted paymentPreimage=$paymentPreimage from tx=$tx (htlc-success)")
Some(ByteVector32(paymentPreimage))
case _ => None
@ -842,9 +838,7 @@ object Helpers {
// - or we have already received the fulfill and forwarded it upstream
val outgoingHtlcs = localCommit.spec.htlcs.filter(_.direction == OUT).map(_.add)
outgoingHtlcs.collect {
case add if add.paymentHash == sha256(paymentPreimage) =>
// let's just pretend we received the preimage from the counterparty and build a fulfill message
(add, UpdateFulfillHtlc(add.channelId, add.id, paymentPreimage))
case add if add.paymentHash == sha256(paymentPreimage) => (add, paymentPreimage)
}
}
}
@ -853,8 +847,18 @@ object Helpers {
* In CLOSING state, when we are notified that a transaction has been confirmed, we analyze it to find out if one or
* more htlcs have timed out and need to be failed in an upstream channel.
*
* @param localCommit
* @param localDustLimit
* @param tx a tx that has reached mindepth
* @return a set of htlcs that need to be failed upstream
*/
def timedoutHtlcs(d: DATA_CLOSING, tx: Transaction)(implicit log: LoggingAdapter): Set[UpdateAddHtlc] =
timedoutHtlcs(d.commitments.localCommit, d.commitments.localParams.dustLimit, tx) ++
timedoutHtlcs(d.commitments.remoteCommit, d.commitments.remoteParams.dustLimit, tx) ++
d.commitments.remoteNextCommitInfo.left.toSeq.flatMap(r => timedoutHtlcs(r.nextRemoteCommit, d.commitments.remoteParams.dustLimit, tx))
/**
* In CLOSING state, when we are notified that a transaction has been confirmed, we analyze it to find out if one or
* more htlcs have timed out and need to be failed in an upstream channel.
*
* @param tx a tx that has reached mindepth
* @return a set of htlcs that need to be failed upstream
*/
@ -877,8 +881,6 @@ object Helpers {
* In CLOSING state, when we are notified that a transaction has been confirmed, we analyze it to find out if one or
* more htlcs have timed out and need to be failed in an upstream channel.
*
* @param remoteCommit
* @param remoteDustLimit
* @param tx a tx that has reached mindepth
* @return a set of htlcs that need to be failed upstream
*/
@ -901,9 +903,6 @@ object Helpers {
* As soon as a local or remote commitment reaches min_depth, we know which htlcs will be settled on-chain (whether
* or not they actually have an output in the commitment tx).
*
* @param localCommit
* @param remoteCommit
* @param nextRemoteCommit_opt
* @param tx a transaction that is sufficiently buried in the blockchain
*/
def onchainOutgoingHtlcs(localCommit: LocalCommit, remoteCommit: RemoteCommit, nextRemoteCommit_opt: Option[RemoteCommit], tx: Transaction): Set[UpdateAddHtlc] = {
@ -911,7 +910,7 @@ object Helpers {
localCommit.spec.htlcs.filter(_.direction == OUT).map(_.add)
} else if (remoteCommit.txid == tx.txid) {
remoteCommit.spec.htlcs.filter(_.direction == IN).map(_.add)
} else if (nextRemoteCommit_opt.map(_.txid) == Some(tx.txid)) {
} else if (nextRemoteCommit_opt.map(_.txid).contains(tx.txid)) {
nextRemoteCommit_opt.get.spec.htlcs.filter(_.direction == IN).map(_.add)
} else {
Set.empty
@ -923,14 +922,11 @@ object Helpers {
* they will never reach the blockchain.
*
* Those are only present in the remote's commitment.
*
* @param localCommit
* @param remoteCommit
* @param tx
* @param log
* @return
*/
def overriddenOutgoingHtlcs(localCommit: LocalCommit, remoteCommit: RemoteCommit, nextRemoteCommit_opt: Option[RemoteCommit], tx: Transaction)(implicit log: LoggingAdapter): Set[UpdateAddHtlc] =
def overriddenOutgoingHtlcs(d: DATA_CLOSING, tx: Transaction)(implicit log: LoggingAdapter): Set[UpdateAddHtlc] = {
val localCommit = d.commitments.localCommit
val remoteCommit = d.commitments.remoteCommit
val nextRemoteCommit_opt = d.commitments.remoteNextCommitInfo.left.toOption.map(_.nextRemoteCommit)
if (localCommit.publishableTxs.commitTx.tx.txid == tx.txid) {
// our commit got confirmed, so any htlc that we signed but they didn't sign will never reach the chain
val mostRecentRemoteCommit = nextRemoteCommit_opt.getOrElse(remoteCommit)
@ -944,13 +940,16 @@ object Helpers {
// any htlc that we signed in the new commitment that they didn't sign will never reach the chain
nextRemoteCommit.spec.htlcs.filter(_.direction == IN).map(_.add) -- localCommit.spec.htlcs.filter(_.direction == OUT).map(_.add)
case None =>
// their last commitment got confirmed, so no htlcs will be overriden, they will timeout or be fulfilled on chain
// their last commitment got confirmed, so no htlcs will be overridden, they will timeout or be fulfilled on chain
Set.empty
}
} else if (nextRemoteCommit_opt.map(_.txid) == Some(tx.txid)) {
// their last commitment got confirmed, so no htlcs will be overriden, they will timeout or be fulfilled on chain
} else if (nextRemoteCommit_opt.map(_.txid).contains(tx.txid)) {
// their last commitment got confirmed, so no htlcs will be overridden, they will timeout or be fulfilled on chain
Set.empty
} else Set.empty
} else {
Set.empty
}
}
/**
* In CLOSING state, when we are notified that a transaction has been confirmed, we check if this tx belongs in the

View File

@ -221,7 +221,6 @@ object ChannelRelayer {
case (_: TooManyAcceptedHtlcs, Some(channelUpdate)) => TemporaryChannelFailure(channelUpdate)
case (_: ChannelUnavailable, Some(channelUpdate)) if !Announcements.isEnabled(channelUpdate.channelFlags) => ChannelDisabled(channelUpdate.messageFlags, channelUpdate.channelFlags, channelUpdate)
case (_: ChannelUnavailable, None) => PermanentChannelFailure
case (_: HtlcTimedout, _) => PermanentChannelFailure
case _ => TemporaryNodeFailure
}
}

View File

@ -107,15 +107,18 @@ class NodeRelayer(nodeParams: NodeParams, relayer: ActorRef, router: ActorRef, c
case None => log.error("could not find pending incoming payment: payment will not be relayed: please investigate")
}
case Relayer.ForwardFulfill(fulfill, Origin.TrampolineRelayed(_, Some(paymentSender)), _) =>
paymentSender ! fulfill
val paymentHash = Crypto.sha256(fulfill.paymentPreimage)
pendingOutgoing.get(paymentHash).foreach(p => if (!p.fulfilledUpstream) {
// We want to fulfill upstream as soon as we receive the preimage (even if not all HTLCs have fulfilled downstream).
log.debug("trampoline payment successfully relayed")
fulfillPayment(p.upstream, fulfill.paymentPreimage)
context become main(pendingIncoming, pendingOutgoing + (paymentHash -> p.copy(fulfilledUpstream = true)))
})
case ff: Relayer.ForwardFulfill => ff.to match {
case Origin.TrampolineRelayed(_, Some(paymentSender)) =>
paymentSender ! ff
val paymentHash = Crypto.sha256(ff.paymentPreimage)
pendingOutgoing.get(paymentHash).foreach(p => if (!p.fulfilledUpstream) {
// We want to fulfill upstream as soon as we receive the preimage (even if not all HTLCs have fulfilled downstream).
log.debug("trampoline payment successfully relayed")
fulfillPayment(p.upstream, ff.paymentPreimage)
context become main(pendingIncoming, pendingOutgoing + (paymentHash -> p.copy(fulfilledUpstream = true)))
})
case _ => log.error(s"unexpected non-trampoline fulfill: $ff")
}
case PaymentSent(id, paymentHash, paymentPreimage, _, _, parts) =>
// We may have already fulfilled upstream, but we can now emit an accurate relayed event and clean-up resources.

View File

@ -104,17 +104,13 @@ class PostRestartHtlcCleaner(nodeParams: NodeParams, commandBuffer: ActorRef, in
Metrics.PendingNotRelayed.update(notRelayed1.size)
context become main(brokenHtlcs.copy(notRelayed = notRelayed1))
case Relayer.ForwardFulfill(fulfill, to, add) =>
log.info("htlc fulfilled downstream: ({},{})", fulfill.channelId, fulfill.id)
handleDownstreamFulfill(brokenHtlcs, to, add, fulfill.paymentPreimage)
case ff: Relayer.ForwardFulfill =>
log.info("htlc fulfilled downstream: ({},{})", ff.htlc.channelId, ff.htlc.id)
handleDownstreamFulfill(brokenHtlcs, ff.to, ff.htlc, ff.paymentPreimage)
case Relayer.ForwardFail(_, to, add) =>
log.info("htlc failed downstream: ({},{})", add.channelId, add.id)
handleDownstreamFailure(brokenHtlcs, to, add)
case Relayer.ForwardFailMalformed(_, to, add) =>
log.info("htlc failed (malformed) downstream: ({},{})", add.channelId, add.id)
handleDownstreamFailure(brokenHtlcs, to, add)
case ff: Relayer.ForwardFail =>
log.info("htlc failed downstream: ({},{},{})", ff.htlc.channelId, ff.htlc.id, ff.getClass.getSimpleName)
handleDownstreamFailure(brokenHtlcs, ff.to, ff.htlc)
case ack: CommandBuffer.CommandAck => commandBuffer forward ack

View File

@ -147,50 +147,39 @@ class Relayer(nodeParams: NodeParams, router: ActorRef, register: ActorRef, comm
commandBuffer ! CommandBuffer.CommandSend(add.channelId, cmdFail)
}
case Status.Failure(addFailed: AddHtlcFailed) =>
addFailed.origin match {
case Origin.Local(id, None) => log.error(s"received unexpected add failed with no sender (paymentId=$id)")
case Origin.Local(_, Some(sender)) => sender ! Status.Failure(addFailed)
case _: Origin.Relayed => channelRelayer forward Status.Failure(addFailed)
case Origin.TrampolineRelayed(htlcs, None) => log.error(s"received unexpected add failed with no sender (upstream=${htlcs.mkString(", ")}")
case Origin.TrampolineRelayed(_, Some(paymentSender)) => paymentSender ! Status.Failure(addFailed)
}
case Status.Failure(addFailed: AddHtlcFailed) => addFailed.origin match {
case Origin.Local(id, None) => log.error(s"received unexpected add failed with no sender (paymentId=$id)")
case Origin.Local(_, Some(sender)) => sender ! Status.Failure(addFailed)
case _: Origin.Relayed => channelRelayer forward Status.Failure(addFailed)
case Origin.TrampolineRelayed(htlcs, None) => log.error(s"received unexpected add failed with no sender (upstream=${htlcs.mkString(", ")}")
case Origin.TrampolineRelayed(_, Some(paymentSender)) => paymentSender ! Status.Failure(addFailed)
}
case ff@ForwardFulfill(fulfill, to, add) =>
to match {
case Origin.Local(_, None) => postRestartCleaner forward ff
case Origin.Local(_, Some(sender)) => sender ! fulfill
case Origin.Relayed(originChannelId, originHtlcId, amountIn, amountOut) =>
val cmd = CMD_FULFILL_HTLC(originHtlcId, fulfill.paymentPreimage, commit = true)
commandBuffer ! CommandBuffer.CommandSend(originChannelId, cmd)
context.system.eventStream.publish(ChannelPaymentRelayed(amountIn, amountOut, add.paymentHash, originChannelId, fulfill.channelId))
case Origin.TrampolineRelayed(_, None) => postRestartCleaner forward ff
case Origin.TrampolineRelayed(_, Some(_)) => nodeRelayer forward ff
}
case ff: ForwardFulfill => ff.to match {
case Origin.Local(_, None) => postRestartCleaner forward ff
case Origin.Local(_, Some(sender)) => sender ! ff
case Origin.Relayed(originChannelId, originHtlcId, amountIn, amountOut) =>
val cmd = CMD_FULFILL_HTLC(originHtlcId, ff.paymentPreimage, commit = true)
commandBuffer ! CommandBuffer.CommandSend(originChannelId, cmd)
context.system.eventStream.publish(ChannelPaymentRelayed(amountIn, amountOut, ff.htlc.paymentHash, originChannelId, ff.htlc.channelId))
case Origin.TrampolineRelayed(_, None) => postRestartCleaner forward ff
case Origin.TrampolineRelayed(_, Some(_)) => nodeRelayer forward ff
}
case ff@ForwardFail(fail, to, _) =>
to match {
case Origin.Local(_, None) => postRestartCleaner forward ff
case Origin.Local(_, Some(sender)) => sender ! fail
case Origin.Relayed(originChannelId, originHtlcId, _, _) =>
Metrics.recordPaymentRelayFailed(Tags.FailureType.Remote, Tags.RelayType.Channel)
val cmd = CMD_FAIL_HTLC(originHtlcId, Left(fail.reason), commit = true)
commandBuffer ! CommandBuffer.CommandSend(originChannelId, cmd)
case Origin.TrampolineRelayed(_, None) => postRestartCleaner forward ff
case Origin.TrampolineRelayed(_, Some(paymentSender)) => paymentSender ! fail
}
case ff@ForwardFailMalformed(fail, to, _) =>
to match {
case Origin.Local(_, None) => postRestartCleaner forward ff
case Origin.Local(_, Some(sender)) => sender ! fail
case Origin.Relayed(originChannelId, originHtlcId, _, _) =>
Metrics.recordPaymentRelayFailed(Tags.FailureType.Malformed, Tags.RelayType.Channel)
val cmd = CMD_FAIL_MALFORMED_HTLC(originHtlcId, fail.onionHash, fail.failureCode, commit = true)
commandBuffer ! CommandBuffer.CommandSend(originChannelId, cmd)
case Origin.TrampolineRelayed(_, None) => postRestartCleaner forward ff
case Origin.TrampolineRelayed(_, Some(paymentSender)) => paymentSender ! fail
}
case ff: ForwardFail => ff.to match {
case Origin.Local(_, None) => postRestartCleaner forward ff
case Origin.Local(_, Some(sender)) => sender ! ff
case Origin.Relayed(originChannelId, originHtlcId, _, _) =>
Metrics.recordPaymentRelayFailed(Tags.FailureType.Remote, Tags.RelayType.Channel)
val cmd = ff match {
case ForwardRemoteFail(fail, _, _) => CMD_FAIL_HTLC(originHtlcId, Left(fail.reason), commit = true)
case ForwardRemoteFailMalformed(fail, _, _) => CMD_FAIL_MALFORMED_HTLC(originHtlcId, fail.onionHash, fail.failureCode, commit = true)
case _: ForwardOnChainFail => CMD_FAIL_HTLC(originHtlcId, Right(PermanentChannelFailure), commit = true)
}
commandBuffer ! CommandBuffer.CommandSend(originChannelId, cmd)
case Origin.TrampolineRelayed(_, None) => postRestartCleaner forward ff
case Origin.TrampolineRelayed(_, Some(paymentSender)) => paymentSender ! ff
}
case ack: CommandBuffer.CommandAck => commandBuffer forward ack
@ -201,9 +190,8 @@ class Relayer(nodeParams: NodeParams, router: ActorRef, register: ActorRef, comm
val paymentHash_opt = currentMessage match {
case ForwardAdd(add, _) => Some(add.paymentHash)
case Status.Failure(addFailed: AddHtlcFailed) => Some(addFailed.paymentHash)
case ForwardFulfill(_, _, add) => Some(add.paymentHash)
case ForwardFail(_, _, add) => Some(add.paymentHash)
case ForwardFailMalformed(_, _, add) => Some(add.paymentHash)
case ff: ForwardFulfill => Some(ff.htlc.paymentHash)
case ff: ForwardFail => Some(ff.htlc.paymentHash)
case _ => None
}
Logs.mdc(category_opt = Some(Logs.LogCategory.PAYMENT), paymentHash_opt = paymentHash_opt)
@ -222,9 +210,13 @@ object Relayer extends Logging {
// @formatter:off
sealed trait ForwardMessage
case class ForwardAdd(add: UpdateAddHtlc, previousFailures: Seq[AddHtlcFailed] = Seq.empty) extends ForwardMessage
case class ForwardFulfill(fulfill: UpdateFulfillHtlc, to: Origin, htlc: UpdateAddHtlc) extends ForwardMessage
case class ForwardFail(fail: UpdateFailHtlc, to: Origin, htlc: UpdateAddHtlc) extends ForwardMessage
case class ForwardFailMalformed(fail: UpdateFailMalformedHtlc, to: Origin, htlc: UpdateAddHtlc) extends ForwardMessage
sealed trait ForwardFulfill extends ForwardMessage { val paymentPreimage: ByteVector32; val to: Origin; val htlc: UpdateAddHtlc }
case class ForwardRemoteFulfill(fulfill: UpdateFulfillHtlc, to: Origin, htlc: UpdateAddHtlc) extends ForwardFulfill { override val paymentPreimage = fulfill.paymentPreimage }
case class ForwardOnChainFulfill(paymentPreimage: ByteVector32, to: Origin, htlc: UpdateAddHtlc) extends ForwardFulfill
sealed trait ForwardFail extends ForwardMessage { val to: Origin; val htlc: UpdateAddHtlc }
case class ForwardRemoteFail(fail: UpdateFailHtlc, to: Origin, htlc: UpdateAddHtlc) extends ForwardFail
case class ForwardRemoteFailMalformed(fail: UpdateFailMalformedHtlc, to: Origin, htlc: UpdateAddHtlc) extends ForwardFail
case class ForwardOnChainFail(cause: ChannelException, to: Origin, htlc: UpdateAddHtlc) extends ForwardFail
case class UsableBalance(remoteNodeId: PublicKey, shortChannelId: ShortChannelId, canSend: MilliSatoshi, canReceive: MilliSatoshi, isPublic: Boolean)

View File

@ -23,13 +23,14 @@ import akka.event.Logging.MDC
import fr.acinq.bitcoin.ByteVector32
import fr.acinq.bitcoin.Crypto.PublicKey
import fr.acinq.eclair._
import fr.acinq.eclair.channel.{CMD_ADD_HTLC, ChannelCommandResponse, Register}
import fr.acinq.eclair.channel.{CMD_ADD_HTLC, ChannelCommandResponse, HtlcsTimedoutDownstream, Register}
import fr.acinq.eclair.crypto.{Sphinx, TransportHandler}
import fr.acinq.eclair.db.{OutgoingPayment, OutgoingPaymentStatus, PaymentType}
import fr.acinq.eclair.payment.Monitoring.{Metrics, Tags}
import fr.acinq.eclair.payment.PaymentRequest.ExtraHop
import fr.acinq.eclair.payment.PaymentSent.PartialPayment
import fr.acinq.eclair.payment._
import fr.acinq.eclair.payment.relay.Relayer
import fr.acinq.eclair.payment.send.PaymentInitiator.SendPaymentConfig
import fr.acinq.eclair.payment.send.PaymentLifecycle._
import fr.acinq.eclair.router._
@ -120,11 +121,19 @@ class PaymentLifecycle(nodeParams: NodeParams, cfg: SendPaymentConfig, router: A
when(WAITING_FOR_PAYMENT_COMPLETE) {
case Event(ChannelCommandResponse.Ok, _) => stay
case Event(fulfill: UpdateFulfillHtlc, WaitingForComplete(s, c, cmd, _, _, _, _, route)) =>
val p = PartialPayment(id, c.finalPayload.amount, cmd.amount - c.finalPayload.amount, fulfill.channelId, Some(cfg.fullRoute(route)))
case Event(fulfill: Relayer.ForwardFulfill, WaitingForComplete(s, c, cmd, _, _, _, _, route)) =>
val p = PartialPayment(id, c.finalPayload.amount, cmd.amount - c.finalPayload.amount, fulfill.htlc.channelId, Some(cfg.fullRoute(route)))
onSuccess(s, cfg.createPaymentSent(fulfill.paymentPreimage, p :: Nil))
myStop()
case Event(forwardFail: Relayer.ForwardFail, _) =>
forwardFail match {
case Relayer.ForwardRemoteFail(fail, _, _) => self ! fail
case Relayer.ForwardRemoteFailMalformed(fail, _, _) => self ! fail
case Relayer.ForwardOnChainFail(cause, _, _) => self ! Status.Failure(cause)
}
stay
case Event(fail: UpdateFailHtlc, WaitingForComplete(s, c, _, failures, sharedSecrets, ignoreNodes, ignoreChannels, hops)) =>
(Sphinx.FailurePacket.decrypt(fail.reason, sharedSecrets) match {
case success@Success(e) =>
@ -225,9 +234,10 @@ class PaymentLifecycle(nodeParams: NodeParams, cfg: SendPaymentConfig, router: A
case Event(Status.Failure(t), WaitingForComplete(s, c, _, failures, _, ignoreNodes, ignoreChannels, hops)) =>
Metrics.PaymentError.withTag(Tags.Failure, Tags.FailureType(LocalFailure(t))).increment()
// If the first hop was selected by the sender (in routePrefix) and it failed, it doesn't make sense to retry (we
// will end up retrying over that same faulty channel).
if (failures.size + 1 >= c.maxAttempts || c.routePrefix.nonEmpty) {
val isFatal = failures.size + 1 >= c.maxAttempts || // retries exhausted
c.routePrefix.nonEmpty || // first hop was selected by the sender and failed, it doesn't make sense to retry
t.isInstanceOf[HtlcsTimedoutDownstream] // htlc timed out so retrying won't help, we need to re-compute cltvs
if (isFatal) {
onFailure(s, PaymentFailed(id, paymentHash, failures :+ LocalFailure(t)))
myStop()
} else {

View File

@ -131,11 +131,11 @@ class FuzzySpec extends TestkitBaseClass with StateTestsHelperMethods with Loggi
case req: PaymentRequest =>
sendChannel ! buildCmdAdd(req.paymentHash, req.nodeId)
context become {
case u: UpdateFulfillHtlc =>
log.info(s"successfully sent htlc #${u.id}")
case u: Relayer.ForwardFulfill =>
log.info(s"successfully sent htlc #${u.htlc.id}")
initiatePaymentOrStop(remaining - 1)
case u: UpdateFailHtlc =>
log.warning(s"htlc failed: ${u.id}")
case u: Relayer.ForwardFail =>
log.warning(s"htlc failed: ${u.htlc.id}")
initiatePaymentOrStop(remaining - 1)
case Status.Failure(t) =>
log.error(s"htlc error: ${t.getMessage}")

View File

@ -20,7 +20,7 @@ import java.util.UUID
import akka.testkit.{TestFSMRef, TestKitBase, TestProbe}
import fr.acinq.bitcoin.Crypto.PublicKey
import fr.acinq.bitcoin.{ByteVector32, Crypto}
import fr.acinq.bitcoin.{ByteVector32, Crypto, ScriptFlags, Transaction}
import fr.acinq.eclair.TestConstants.{Alice, Bob, TestFeeEstimator}
import fr.acinq.eclair.blockchain._
import fr.acinq.eclair.blockchain.fee.FeeTargets
@ -33,6 +33,8 @@ import fr.acinq.eclair.wire._
import fr.acinq.eclair.{NodeParams, TestConstants, randomBytes32, _}
import org.scalatest.{ParallelTestExecution, fixture}
import scala.concurrent.duration._
/**
* Created by PM on 23/08/2016.
*/
@ -110,24 +112,29 @@ trait StateTestsHelperMethods extends TestKitBase with fixture.TestSuite with Pa
channelUpdateListener.expectMsgType[LocalChannelUpdate]
}
def makeCmdAdd(amount: MilliSatoshi, destination: PublicKey, currentBlockHeight: Long): (ByteVector32, CMD_ADD_HTLC) = {
def makeCmdAdd(amount: MilliSatoshi, destination: PublicKey, currentBlockHeight: Long, upstream: Upstream = Upstream.Local(UUID.randomUUID)): (ByteVector32, CMD_ADD_HTLC) = {
val payment_preimage: ByteVector32 = randomBytes32
val payment_hash: ByteVector32 = Crypto.sha256(payment_preimage)
val expiry = CltvExpiryDelta(144).toCltvExpiry(currentBlockHeight)
val cmd = OutgoingPacket.buildCommand(Upstream.Local(UUID.randomUUID), payment_hash, ChannelHop(null, destination, null) :: Nil, FinalLegacyPayload(amount, expiry))._1.copy(commit = false)
val cmd = OutgoingPacket.buildCommand(upstream, payment_hash, ChannelHop(null, destination, null) :: Nil, FinalLegacyPayload(amount, expiry))._1.copy(commit = false)
(payment_preimage, cmd)
}
def addHtlc(amount: MilliSatoshi, s: TestFSMRef[State, Data, Channel], r: TestFSMRef[State, Data, Channel], s2r: TestProbe, r2s: TestProbe): (ByteVector32, UpdateAddHtlc) = {
val sender = TestProbe()
val currentBlockHeight = s.underlyingActor.nodeParams.currentBlockHeight
val (payment_preimage, cmd) = makeCmdAdd(amount, r.underlyingActor.nodeParams.nodeId, currentBlockHeight)
sender.send(s, cmd)
val htlc = addHtlc(cmd, s, r, s2r, r2s)
(payment_preimage, htlc)
}
def addHtlc(cmdAdd: CMD_ADD_HTLC, s: TestFSMRef[State, Data, Channel], r: TestFSMRef[State, Data, Channel], s2r: TestProbe, r2s: TestProbe): UpdateAddHtlc = {
val sender = TestProbe()
sender.send(s, cmdAdd)
sender.expectMsg(ChannelCommandResponse.Ok)
val htlc = s2r.expectMsgType[UpdateAddHtlc]
s2r.forward(r)
awaitCond(r.stateData.asInstanceOf[HasCommitments].commitments.remoteChanges.proposed.contains(htlc))
(payment_preimage, htlc)
htlc
}
def fulfillHtlc(id: Long, R: ByteVector32, s: TestFSMRef[State, Data, Channel], r: TestFSMRef[State, Data, Channel], s2r: TestProbe, r2s: TestProbe): Unit = {
@ -169,23 +176,115 @@ trait StateTestsHelperMethods extends TestKitBase with fixture.TestSuite with Pa
awaitCond(r.stateData.asInstanceOf[HasCommitments].commitments.localCommit.index == rCommitIndex + 1)
awaitCond(r.stateData.asInstanceOf[HasCommitments].commitments.remoteCommit.index == rCommitIndex + 1)
}
}
def channelId(a: TestFSMRef[State, Data, Channel]) = Helpers.getChannelId(a.stateData)
def mutualClose(s: TestFSMRef[State, Data, Channel], r: TestFSMRef[State, Data, Channel], s2r: TestProbe, r2s: TestProbe, s2blockchain: TestProbe, r2blockchain: TestProbe): Unit = {
val sender = TestProbe()
// s initiates a closing
sender.send(s, CMD_CLOSE(None))
s2r.expectMsgType[Shutdown]
s2r.forward(r)
r2s.expectMsgType[Shutdown]
r2s.forward(s)
// agreeing on a closing fee
var sCloseFee, rCloseFee = 0.sat
do {
sCloseFee = s2r.expectMsgType[ClosingSigned].feeSatoshis
s2r.forward(r)
rCloseFee = r2s.expectMsgType[ClosingSigned].feeSatoshis
r2s.forward(s)
} while (sCloseFee != rCloseFee)
s2blockchain.expectMsgType[PublishAsap]
s2blockchain.expectMsgType[WatchConfirmed]
r2blockchain.expectMsgType[PublishAsap]
r2blockchain.expectMsgType[WatchConfirmed]
awaitCond(s.stateName == CLOSING)
awaitCond(r.stateName == CLOSING)
// both nodes are now in CLOSING state with a mutual close tx pending for confirmation
}
def localClose(s: TestFSMRef[State, Data, Channel], s2blockchain: TestProbe): LocalCommitPublished = {
// an error occurs and s publishes its commit tx
val commitTx = s.stateData.asInstanceOf[DATA_NORMAL].commitments.localCommit.publishableTxs.commitTx.tx
s ! Error(ByteVector32.Zeroes, "oops")
s2blockchain.expectMsg(PublishAsap(commitTx))
awaitCond(s.stateName == CLOSING)
val closingState = s.stateData.asInstanceOf[DATA_CLOSING]
assert(closingState.localCommitPublished.isDefined)
val localCommitPublished = closingState.localCommitPublished.get
// if s has a main output in the commit tx (when it has a non-dust balance), it should be claimed
localCommitPublished.claimMainDelayedOutputTx.foreach(tx => s2blockchain.expectMsg(PublishAsap(tx)))
// all htlcs success/timeout should be published
s2blockchain.expectMsgAllOf((localCommitPublished.htlcSuccessTxs ++ localCommitPublished.htlcTimeoutTxs).map(PublishAsap): _*)
// and their outputs should be claimed
s2blockchain.expectMsgAllOf(localCommitPublished.claimHtlcDelayedTxs.map(PublishAsap): _*)
// we watch the confirmation of the "final" transactions that send funds to our wallets (main delayed output and 2nd stage htlc transactions)
assert(s2blockchain.expectMsgType[WatchConfirmed].event === BITCOIN_TX_CONFIRMED(commitTx))
localCommitPublished.claimMainDelayedOutputTx.foreach(tx => assert(s2blockchain.expectMsgType[WatchConfirmed].event === BITCOIN_TX_CONFIRMED(tx)))
assert(localCommitPublished.claimHtlcDelayedTxs.map(_ => s2blockchain.expectMsgType[WatchConfirmed].event).toSet === localCommitPublished.claimHtlcDelayedTxs.map(BITCOIN_TX_CONFIRMED).toSet)
// we watch outputs of the commitment tx that both parties may spend
val htlcOutputIndexes = (localCommitPublished.htlcSuccessTxs ++ localCommitPublished.htlcTimeoutTxs).map(tx => tx.txIn.head.outPoint.index)
val spentWatches = htlcOutputIndexes.map(_ => s2blockchain.expectMsgType[WatchSpent])
spentWatches.foreach(ws => assert(ws.event === BITCOIN_OUTPUT_SPENT))
spentWatches.foreach(ws => assert(ws.txId === commitTx.txid))
assert(spentWatches.map(_.outputIndex).toSet === htlcOutputIndexes.toSet)
s2blockchain.expectNoMsg(1 second)
// s is now in CLOSING state with txes pending for confirmation before going in CLOSED state
s.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get
}
def remoteClose(rCommitTx: Transaction, s: TestFSMRef[State, Data, Channel], s2blockchain: TestProbe): RemoteCommitPublished = {
// we make s believe r unilaterally closed the channel
s ! WatchEventSpent(BITCOIN_FUNDING_SPENT, rCommitTx)
awaitCond(s.stateName == CLOSING)
val closingData = s.stateData.asInstanceOf[DATA_CLOSING]
def getRemoteCommitPublished(d: DATA_CLOSING): Option[RemoteCommitPublished] = d.remoteCommitPublished.orElse(d.nextRemoteCommitPublished).orElse(d.futureRemoteCommitPublished)
assert(getRemoteCommitPublished(closingData).isDefined)
assert(closingData.localCommitPublished.isEmpty)
val remoteCommitPublished = getRemoteCommitPublished(closingData).get
// if s has a main output in the commit tx (when it has a non-dust balance), it should be claimed
remoteCommitPublished.claimMainOutputTx.foreach(tx => {
Transaction.correctlySpends(tx, rCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
s2blockchain.expectMsg(PublishAsap(tx))
})
// all htlcs success/timeout should be claimed
val claimHtlcTxes = remoteCommitPublished.claimHtlcSuccessTxs ++ remoteCommitPublished.claimHtlcTimeoutTxs
claimHtlcTxes.foreach(tx => Transaction.correctlySpends(tx, rCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS))
s2blockchain.expectMsgAllOf(claimHtlcTxes.map(PublishAsap): _*)
// we watch the confirmation of the "final" transactions that send funds to our wallets (main delayed output and 2nd stage htlc transactions)
assert(s2blockchain.expectMsgType[WatchConfirmed].event === BITCOIN_TX_CONFIRMED(rCommitTx))
remoteCommitPublished.claimMainOutputTx.foreach(tx => assert(s2blockchain.expectMsgType[WatchConfirmed].event === BITCOIN_TX_CONFIRMED(tx)))
// we watch outputs of the commitment tx that both parties may spend
val htlcOutputIndexes = claimHtlcTxes.map(tx => tx.txIn.head.outPoint.index)
val spentWatches = htlcOutputIndexes.map(_ => s2blockchain.expectMsgType[WatchSpent])
spentWatches.foreach(ws => assert(ws.event === BITCOIN_OUTPUT_SPENT))
spentWatches.foreach(ws => assert(ws.txId === rCommitTx.txid))
assert(spentWatches.map(_.outputIndex).toSet === htlcOutputIndexes.toSet)
s2blockchain.expectNoMsg(1 second)
// s is now in CLOSING state with txes pending for confirmation before going in CLOSED state
getRemoteCommitPublished(s.stateData.asInstanceOf[DATA_CLOSING]).get
}
def channelId(a: TestFSMRef[State, Data, Channel]): ByteVector32 = Helpers.getChannelId(a.stateData)
// @formatter:off
implicit class ChannelWithTestFeeConf(a: TestFSMRef[State, Data, Channel]) {
def feeEstimator: TestFeeEstimator = a.underlyingActor.nodeParams.onChainFeeConf.feeEstimator.asInstanceOf[TestFeeEstimator]
def feeTargets: FeeTargets = a.underlyingActor.nodeParams.onChainFeeConf.feeTargets
}
implicit class PeerWithTestFeeConf(a: TestFSMRef[Peer.State, Peer.Data, Peer]) {
def feeEstimator: TestFeeEstimator = a.underlyingActor.nodeParams.onChainFeeConf.feeEstimator.asInstanceOf[TestFeeEstimator]
def feeTargets: FeeTargets = a.underlyingActor.nodeParams.onChainFeeConf.feeTargets
}
// @formatter:on
}

View File

@ -1067,7 +1067,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
bob2alice.expectMsgType[RevokeAndAck]
bob2alice.forward(alice)
// alice will forward the fail upstream
val forward = relayerA.expectMsgType[ForwardFail]
val forward = relayerA.expectMsgType[ForwardRemoteFail]
assert(forward.fail === fail)
assert(forward.htlc === htlc)
}
@ -1096,7 +1096,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
bob2alice.expectMsgType[RevokeAndAck]
bob2alice.forward(alice)
// alice will forward the fail upstream
val forward = relayerA.expectMsgType[ForwardFailMalformed]
val forward = relayerA.expectMsgType[ForwardRemoteFailMalformed]
assert(forward.fail === fail)
assert(forward.htlc === htlc)
}
@ -1183,7 +1183,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
awaitCond(alice.stateData == initialState.copy(
commitments = initialState.commitments.copy(remoteChanges = initialState.commitments.remoteChanges.copy(initialState.commitments.remoteChanges.proposed :+ fulfill))))
// alice immediately propagates the fulfill upstream
val forward = relayerA.expectMsgType[ForwardFulfill]
val forward = relayerA.expectMsgType[ForwardRemoteFulfill]
assert(forward.fulfill === fulfill)
assert(forward.htlc === htlc)
}
@ -1840,7 +1840,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
val ChannelErrorOccurred(_, _, _, _, LocalError(err), isFatal) = listener.expectMsgType[ChannelErrorOccurred]
assert(isFatal)
assert(err.isInstanceOf[HtlcWillTimeoutUpstream])
assert(err.isInstanceOf[HtlcsWillTimeoutUpstream])
bob2blockchain.expectMsg(PublishAsap(initialCommitTx))
bob2blockchain.expectMsgType[PublishAsap] // main delayed
@ -1875,7 +1875,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
val ChannelErrorOccurred(_, _, _, _, LocalError(err), isFatal) = listener.expectMsgType[ChannelErrorOccurred]
assert(isFatal)
assert(err.isInstanceOf[HtlcWillTimeoutUpstream])
assert(err.isInstanceOf[HtlcsWillTimeoutUpstream])
bob2blockchain.expectMsg(PublishAsap(initialCommitTx))
bob2blockchain.expectMsgType[PublishAsap] // main delayed
@ -1915,7 +1915,7 @@ class NormalStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
val ChannelErrorOccurred(_, _, _, _, LocalError(err), isFatal) = listener.expectMsgType[ChannelErrorOccurred]
assert(isFatal)
assert(err.isInstanceOf[HtlcWillTimeoutUpstream])
assert(err.isInstanceOf[HtlcsWillTimeoutUpstream])
bob2blockchain.expectMsg(PublishAsap(initialCommitTx))
bob2blockchain.expectMsgType[PublishAsap] // main delayed

View File

@ -434,7 +434,7 @@ class OfflineStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
val ChannelErrorOccurred(_, _, _, _, LocalError(err), isFatal) = listener.expectMsgType[ChannelErrorOccurred]
assert(isFatal)
assert(err.isInstanceOf[HtlcWillTimeoutUpstream])
assert(err.isInstanceOf[HtlcsWillTimeoutUpstream])
bob2blockchain.expectMsg(PublishAsap(initialCommitTx))
bob2blockchain.expectMsgType[PublishAsap] // main delayed

View File

@ -500,7 +500,7 @@ class ShutdownStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
bob2alice.expectMsgType[RevokeAndAck]
bob2alice.forward(alice)
// alice will forward the fail upstream
val forward = relayerA.expectMsgType[ForwardFail]
val forward = relayerA.expectMsgType[ForwardRemoteFail]
assert(forward.fail === fail)
}
@ -526,7 +526,7 @@ class ShutdownStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
bob2alice.expectMsgType[RevokeAndAck]
bob2alice.forward(alice)
// alice will forward the fail upstream
val forward = relayerA.expectMsgType[ForwardFailMalformed]
val forward = relayerA.expectMsgType[ForwardRemoteFailMalformed]
assert(forward.fail === fail)
}

View File

@ -18,7 +18,6 @@ package fr.acinq.eclair.channel.states.h
import java.util.UUID
import akka.actor.Status
import akka.actor.Status.Failure
import akka.testkit.{TestFSMRef, TestProbe}
import fr.acinq.bitcoin.Crypto.PrivateKey
@ -111,36 +110,6 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
}
}
def mutualClose(alice: TestFSMRef[State, Data, Channel],
bob: TestFSMRef[State, Data, Channel],
alice2bob: TestProbe,
bob2alice: TestProbe,
alice2blockchain: TestProbe,
bob2blockchain: TestProbe): Unit = {
val sender = TestProbe()
// alice initiates a closing
sender.send(alice, CMD_CLOSE(None))
alice2bob.expectMsgType[Shutdown]
alice2bob.forward(bob)
bob2alice.expectMsgType[Shutdown]
bob2alice.forward(alice)
// agreeing on a closing fee
var aliceCloseFee, bobCloseFee = 0.sat
do {
aliceCloseFee = alice2bob.expectMsgType[ClosingSigned].feeSatoshis
alice2bob.forward(bob)
bobCloseFee = bob2alice.expectMsgType[ClosingSigned].feeSatoshis
bob2alice.forward(alice)
} while (aliceCloseFee != bobCloseFee)
alice2blockchain.expectMsgType[PublishAsap]
alice2blockchain.expectMsgType[WatchConfirmed]
bob2blockchain.expectMsgType[PublishAsap]
bob2blockchain.expectMsgType[WatchConfirmed]
awaitCond(alice.stateName == CLOSING)
awaitCond(bob.stateName == CLOSING)
// both nodes are now in CLOSING state with a mutual close tx pending for confirmation
}
test("start fee negotiation from configured block target") { f =>
import f._
@ -364,11 +333,7 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
import f._
// an error occurs and alice publishes her commit tx
val aliceCommitTx = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.localCommit.publishableTxs.commitTx.tx
alice ! Error(ByteVector32.Zeroes, "oops")
alice2blockchain.expectMsg(PublishAsap(aliceCommitTx))
alice2blockchain.expectMsgType[PublishAsap]
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === aliceCommitTx.txid)
awaitCond(alice.stateName == CLOSING)
localClose(alice, alice2blockchain)
val initialState = alice.stateData.asInstanceOf[DATA_CLOSING]
assert(initialState.localCommitPublished.isDefined)
@ -384,18 +349,7 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
val (ra1, htlca1) = addHtlc(50000000 msat, alice, bob, alice2bob, bob2alice)
crossSign(alice, bob, alice2bob, bob2alice)
relayerB.expectMsgType[ForwardAdd]
// an error occurs and alice publishes her commit tx
val aliceCommitTx = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.localCommit.publishableTxs.commitTx.tx
alice ! Error(ByteVector32.Zeroes, "oops")
alice2blockchain.expectMsg(PublishAsap(aliceCommitTx)) // commit tx
alice2blockchain.expectMsgType[PublishAsap] // main-delayed-output
alice2blockchain.expectMsgType[PublishAsap] // htlc-timeout
alice2blockchain.expectMsgType[PublishAsap] // claim-delayed-output
assert(alice2blockchain.expectMsgType[WatchConfirmed].event === BITCOIN_TX_CONFIRMED(aliceCommitTx))
assert(alice2blockchain.expectMsgType[WatchConfirmed].event.isInstanceOf[BITCOIN_TX_CONFIRMED]) // main-delayed-output
assert(alice2blockchain.expectMsgType[WatchConfirmed].event.isInstanceOf[BITCOIN_TX_CONFIRMED]) // claim-delayed-output
assert(alice2blockchain.expectMsgType[WatchSpent].event === BITCOIN_OUTPUT_SPENT)
awaitCond(alice.stateName == CLOSING)
localClose(alice, alice2blockchain)
val initialState = alice.stateData.asInstanceOf[DATA_CLOSING]
assert(initialState.localCommitPublished.isDefined)
@ -405,46 +359,56 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
// scenario 1: bob claims the htlc output from the commit tx using its preimage
val claimHtlcSuccessFromCommitTx = Transaction(version = 0, txIn = TxIn(outPoint = OutPoint(randomBytes32, 0), signatureScript = ByteVector.empty, sequence = 0, witness = Scripts.witnessClaimHtlcSuccessFromCommitTx(Transactions.PlaceHolderSig, ra1, ByteVector.fill(130)(33))) :: Nil, txOut = Nil, lockTime = 0)
alice ! WatchEventSpent(BITCOIN_OUTPUT_SPENT, claimHtlcSuccessFromCommitTx)
assert(relayerA.expectMsgType[ForwardFulfill].fulfill === UpdateFulfillHtlc(htlca1.channelId, htlca1.id, ra1))
val fulfill1 = relayerA.expectMsgType[ForwardOnChainFulfill]
assert(fulfill1.htlc === htlca1)
assert(fulfill1.paymentPreimage === ra1)
// scenario 2: bob claims the htlc output from his own commit tx using its preimage (let's assume both parties had published their commitment tx)
val claimHtlcSuccessTx = Transaction(version = 0, txIn = TxIn(outPoint = OutPoint(randomBytes32, 0), signatureScript = ByteVector.empty, sequence = 0, witness = Scripts.witnessHtlcSuccess(Transactions.PlaceHolderSig, Transactions.PlaceHolderSig, ra1, ByteVector.fill(130)(33))) :: Nil, txOut = Nil, lockTime = 0)
alice ! WatchEventSpent(BITCOIN_OUTPUT_SPENT, claimHtlcSuccessTx)
assert(relayerA.expectMsgType[ForwardFulfill].fulfill === UpdateFulfillHtlc(htlca1.channelId, htlca1.id, ra1))
val fulfill2 = relayerA.expectMsgType[ForwardOnChainFulfill]
assert(fulfill2.htlc === htlca1)
assert(fulfill2.paymentPreimage === ra1)
assert(alice.stateData == initialState) // this was a no-op
}
test("recv BITCOIN_TX_CONFIRMED (local commit)") { f =>
import f._
val listener = TestProbe()
system.eventStream.subscribe(listener.ref, classOf[LocalCommitConfirmed])
system.eventStream.subscribe(listener.ref, classOf[PaymentSettlingOnChain])
// alice sends an htlc to bob
val (_, htlca1) = addHtlc(50000000 msat, alice, bob, alice2bob, bob2alice)
// alice sends an htlc below dust to bob
val amountBelowDust = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.localParams.dustLimit - 100.msat
val (_, htlca2) = addHtlc(amountBelowDust, alice, bob, alice2bob, bob2alice)
crossSign(alice, bob, alice2bob, bob2alice)
// an error occurs and alice publishes her commit tx
val aliceCommitTx = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.localCommit.publishableTxs.commitTx.tx
alice ! Error(ByteVector32.Zeroes, "oops")
alice2blockchain.expectMsg(PublishAsap(aliceCommitTx)) // commit tx
val claimMainDelayedTx = alice2blockchain.expectMsgType[PublishAsap].tx // main-delayed-output
val htlcTimeoutTx = alice2blockchain.expectMsgType[PublishAsap].tx // htlc-timeout
val claimDelayedTx = alice2blockchain.expectMsgType[PublishAsap].tx // claim-delayed-output
assert(alice2blockchain.expectMsgType[WatchConfirmed].event === BITCOIN_TX_CONFIRMED(aliceCommitTx))
assert(alice2blockchain.expectMsgType[WatchConfirmed].event.isInstanceOf[BITCOIN_TX_CONFIRMED]) // main-delayed-output
assert(alice2blockchain.expectMsgType[WatchConfirmed].event.isInstanceOf[BITCOIN_TX_CONFIRMED]) // claim-delayed-output
assert(alice2blockchain.expectMsgType[WatchSpent].event === BITCOIN_OUTPUT_SPENT)
awaitCond(alice.stateName == CLOSING)
val initialState = alice.stateData.asInstanceOf[DATA_CLOSING]
assert(initialState.localCommitPublished.isDefined)
val closingState = localClose(alice, alice2blockchain)
// actual test starts here
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(aliceCommitTx), 42, 0, aliceCommitTx)
assert(closingState.claimMainDelayedOutputTx.isDefined)
assert(closingState.htlcSuccessTxs.isEmpty)
assert(closingState.htlcTimeoutTxs.length === 1)
assert(closingState.claimHtlcDelayedTxs.length === 1)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(closingState.commitTx), 42, 0, closingState.commitTx)
assert(listener.expectMsgType[LocalCommitConfirmed].refundAtBlock == 42 + TestConstants.Bob.channelParams.toSelfDelay.toInt)
assert(listener.expectMsgType[PaymentSettlingOnChain].paymentHash == htlca1.paymentHash)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(claimMainDelayedTx), 200, 0, claimMainDelayedTx)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(htlcTimeoutTx), 201, 0, htlcTimeoutTx)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(claimDelayedTx), 202, 0, claimDelayedTx)
// htlcs below dust will never reach the chain, once the commit tx is confirmed we can consider them failed
assert(relayerA.expectMsgType[ForwardOnChainFail].htlc === htlca2)
relayerA.expectNoMsg(100 millis)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(closingState.claimMainDelayedOutputTx.get), 200, 0, closingState.claimMainDelayedOutputTx.get)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(closingState.htlcTimeoutTxs.head), 201, 0, closingState.htlcTimeoutTxs.head)
assert(alice.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get.irrevocablySpent.values.toSet === Set(
closingState.commitTx.txid,
closingState.claimMainDelayedOutputTx.get.txid,
closingState.htlcTimeoutTxs.head.txid
))
assert(relayerA.expectMsgType[ForwardOnChainFail].htlc === htlca1)
relayerA.expectNoMsg(100 millis)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(closingState.claimHtlcDelayedTxs.head), 202, 0, closingState.claimHtlcDelayedTxs.head)
awaitCond(alice.stateName == CLOSED)
}
@ -462,21 +426,19 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
alice2bob.expectMsgType[CommitSig]
// note that bob doesn't receive the new sig!
// then we make alice unilaterally close the channel
alice ! Error(ByteVector32.Zeroes, "oops")
alice2blockchain.expectMsg(PublishAsap(aliceCommitTx))
awaitCond(alice.stateName == CLOSING)
val aliceData = alice.stateData.asInstanceOf[DATA_CLOSING]
assert(aliceData.localCommitPublished.isDefined)
channelUpdateListener.expectMsgType[LocalChannelDown]
val closingState = localClose(alice, alice2blockchain)
// actual test starts here
channelUpdateListener.expectMsgType[LocalChannelDown]
assert(closingState.htlcSuccessTxs.isEmpty && closingState.htlcTimeoutTxs.isEmpty && closingState.claimHtlcDelayedTxs.isEmpty)
// when the commit tx is signed, alice knows that the htlc she sent right before the unilateral close will never reach the chain
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(aliceCommitTx), 0, 0, aliceCommitTx)
// so she fails it
val origin = alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id)
relayerA.expectMsg(Status.Failure(AddHtlcFailed(aliceData.channelId, htlc.paymentHash, HtlcOverridenByLocalCommit(aliceData.channelId), origin, None, None)))
relayerA.expectMsg(ForwardOnChainFail(HtlcOverriddenByLocalCommit(channelId(alice), htlc), origin, htlc))
// the htlc will not settle on chain
listener.expectNoMsg(2 seconds)
relayerA.expectNoMsg(100 millis)
}
test("recv BITCOIN_TX_CONFIRMED (remote commit with htlcs only signed by local in next remote commit)") { f =>
@ -491,19 +453,17 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
sender.send(alice, CMD_SIGN)
sender.expectMsg(ChannelCommandResponse.Ok)
alice2bob.expectMsgType[CommitSig]
// then we make alice believe bob unilaterally close the channel
alice ! WatchEventSpent(BITCOIN_FUNDING_SPENT, bobCommitTx)
awaitCond(alice.stateName == CLOSING)
val aliceData = alice.stateData.asInstanceOf[DATA_CLOSING]
assert(aliceData.remoteCommitPublished.isDefined)
channelUpdateListener.expectMsgType[LocalChannelDown]
val closingState = remoteClose(bobCommitTx, alice, alice2blockchain)
// actual test starts here
channelUpdateListener.expectMsgType[LocalChannelDown]
assert(closingState.claimMainOutputTx.nonEmpty)
assert(closingState.claimHtlcSuccessTxs.isEmpty && closingState.claimHtlcTimeoutTxs.isEmpty)
// when the commit tx is signed, alice knows that the htlc she sent right before the unilateral close will never reach the chain
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(bobCommitTx), 0, 0, bobCommitTx)
// so she fails it
val origin = alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id)
relayerA.expectMsg(Status.Failure(AddHtlcFailed(aliceData.channelId, htlc.paymentHash, HtlcOverridenByLocalCommit(aliceData.channelId), origin, None, None)))
relayerA.expectMsg(ForwardOnChainFail(HtlcOverriddenByLocalCommit(channelId(alice), htlc), origin, htlc))
// the htlc will not settle on chain
listener.expectNoMsg(2 seconds)
}
@ -515,12 +475,9 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
// bob publishes his last current commit tx, the one it had when entering NEGOTIATING state
val bobCommitTx = bobCommitTxes.last.commitTx.tx
assert(bobCommitTx.txOut.size == 2) // two main outputs
alice ! WatchEventSpent(BITCOIN_FUNDING_SPENT, bobCommitTx)
alice2blockchain.expectMsgType[PublishAsap]
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === bobCommitTx.txid)
awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].remoteCommitPublished.isDefined)
val closingState = remoteClose(bobCommitTx, alice, alice2blockchain)
assert(closingState.claimMainOutputTx.nonEmpty)
assert(closingState.claimHtlcSuccessTxs.isEmpty && closingState.claimHtlcTimeoutTxs.isEmpty)
assert(alice.stateData.asInstanceOf[DATA_CLOSING].copy(remoteCommitPublished = None) == initialState)
}
@ -531,15 +488,14 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
// bob publishes his last current commit tx, the one it had when entering NEGOTIATING state
val bobCommitTx = bobCommitTxes.last.commitTx.tx
assert(bobCommitTx.txOut.size == 2) // two main outputs
alice ! WatchEventSpent(BITCOIN_FUNDING_SPENT, bobCommitTx)
val claimMainTx = alice2blockchain.expectMsgType[PublishAsap].tx
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === bobCommitTx.txid)
awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].remoteCommitPublished.isDefined)
assert(alice.stateData.asInstanceOf[DATA_CLOSING].copy(remoteCommitPublished = None) == initialState)
val closingState = remoteClose(bobCommitTx, alice, alice2blockchain)
// actual test starts here
assert(closingState.claimMainOutputTx.nonEmpty)
assert(closingState.claimHtlcSuccessTxs.isEmpty && closingState.claimHtlcTimeoutTxs.isEmpty)
assert(alice.stateData.asInstanceOf[DATA_CLOSING].copy(remoteCommitPublished = None) == initialState)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(bobCommitTx), 0, 0, bobCommitTx)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(claimMainTx), 0, 0, claimMainTx)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(closingState.claimMainOutputTx.get), 0, 0, closingState.claimMainOutputTx.get)
awaitCond(alice.stateName == CLOSED)
}
@ -551,40 +507,46 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
relayerA.expectMsgType[ForwardAdd]
// An HTLC Alice -> Bob is only signed by Alice: Bob has two spendable commit tx.
addHtlc(95000000 msat, alice, bob, alice2bob, bob2alice)
val (_, htlc2) = addHtlc(95000000 msat, alice, bob, alice2bob, bob2alice)
alice ! CMD_SIGN
alice2bob.expectMsgType[CommitSig] // We stop here: Alice sent her CommitSig, but doesn't hear back from Bob.
// Now Bob publishes the first commit tx (force-close).
val bobCommitTx = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.localCommit.publishableTxs.commitTx.tx
assert(bobCommitTx.txOut.length === 3) // two main outputs + 1 HTLC
alice ! WatchEventSpent(BITCOIN_FUNDING_SPENT, bobCommitTx)
// Alice can claim her main output.
val claimMainTx = alice2blockchain.expectMsgType[PublishAsap].tx
Transaction.correctlySpends(claimMainTx, bobCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === bobCommitTx.txid)
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === claimMainTx.txid)
alice2blockchain.expectNoMsg(100 millis)
val closingState = remoteClose(bobCommitTx, alice, alice2blockchain)
assert(closingState.claimMainOutputTx.nonEmpty)
assert(closingState.claimHtlcSuccessTxs.isEmpty && closingState.claimHtlcTimeoutTxs.isEmpty) // we don't have the preimage to claim the htlc-success yet
// Alice receives the preimage for the first HTLC from downstream; she can now claim the corresponding HTLC output.
alice ! CMD_FULFILL_HTLC(htlc1.id, r1, commit = true)
assert(alice2blockchain.expectMsgType[PublishAsap].tx.txid === claimMainTx.txid)
val claimHtlcSuccessTx = alice2blockchain.expectMsgType[PublishAsap].tx
alice2blockchain.expectMsg(PublishAsap(closingState.claimMainOutputTx.get))
val claimHtlcSuccessTx = alice.stateData.asInstanceOf[DATA_CLOSING].remoteCommitPublished.get.claimHtlcSuccessTxs.head
Transaction.correctlySpends(claimHtlcSuccessTx, bobCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === bobCommitTx.txid)
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === claimMainTx.txid)
assert(alice2blockchain.expectMsgType[WatchSpent].txId === bobCommitTx.txid)
alice2blockchain.expectMsg(PublishAsap(claimHtlcSuccessTx))
// Alice resets watches on all relevant transactions.
assert(alice2blockchain.expectMsgType[WatchConfirmed].event === BITCOIN_TX_CONFIRMED(bobCommitTx))
assert(alice2blockchain.expectMsgType[WatchConfirmed].event === BITCOIN_TX_CONFIRMED(closingState.claimMainOutputTx.get))
val watchHtlcSuccess = alice2blockchain.expectMsgType[WatchSpent]
assert(watchHtlcSuccess.event === BITCOIN_OUTPUT_SPENT)
assert(watchHtlcSuccess.txId === bobCommitTx.txid)
assert(watchHtlcSuccess.outputIndex === claimHtlcSuccessTx.txIn.head.outPoint.index)
alice2blockchain.expectNoMsg(100 millis)
val claimedOutputs = (claimMainTx.txIn ++ claimHtlcSuccessTx.txIn).filter(_.outPoint.txid == bobCommitTx.txid).map(_.outPoint.index)
assert(claimedOutputs.length === 2)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(bobCommitTx), 0, 0, bobCommitTx)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(claimMainTx), 0, 0, claimMainTx)
// The second htlc was not included in the commit tx published on-chain, so we can consider it failed
assert(relayerA.expectMsgType[ForwardOnChainFail].htlc === htlc2)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(closingState.claimMainOutputTx.get), 0, 0, closingState.claimMainOutputTx.get)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(claimHtlcSuccessTx), 0, 0, claimHtlcSuccessTx)
// TODO: can we also verify that we correctly sweep the HTLC success after the delay?
assert(alice.stateData.asInstanceOf[DATA_CLOSING].remoteCommitPublished.get.irrevocablySpent.values.toSet === Set(
bobCommitTx.txid,
closingState.claimMainOutputTx.get.txid,
claimHtlcSuccessTx.txid
))
awaitCond(alice.stateName == CLOSED)
alice2blockchain.expectNoMsg(100 millis)
relayerA.expectNoMsg(100 millis)
}
test("recv BITCOIN_TX_CONFIRMED (next remote commit) followed by CMD_FULFILL_HTLC") { f =>
@ -595,7 +557,7 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
relayerA.expectMsgType[ForwardAdd]
// An HTLC Alice -> Bob is only signed by Alice: Bob has two spendable commit tx.
addHtlc(95000000 msat, alice, bob, alice2bob, bob2alice)
val (_, htlc2) = addHtlc(95000000 msat, alice, bob, alice2bob, bob2alice)
alice ! CMD_SIGN
alice2bob.expectMsgType[CommitSig]
alice2bob.forward(bob)
@ -605,39 +567,36 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
// Now Bob publishes the next commit tx (force-close).
val bobCommitTx = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.localCommit.publishableTxs.commitTx.tx
assert(bobCommitTx.txOut.length === 4) // two main outputs + 2 HTLCs
alice ! WatchEventSpent(BITCOIN_FUNDING_SPENT, bobCommitTx)
// Alice can claim her main output.
val claimMainTx = alice2blockchain.expectMsgType[PublishAsap].tx
Transaction.correctlySpends(claimMainTx, bobCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
val claimHtlcTimeoutTx = alice2blockchain.expectMsgType[PublishAsap].tx
Transaction.correctlySpends(claimHtlcTimeoutTx, bobCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === bobCommitTx.txid)
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === claimMainTx.txid)
assert(alice2blockchain.expectMsgType[WatchSpent].txId === bobCommitTx.txid)
alice2blockchain.expectNoMsg(100 millis)
val closingState = remoteClose(bobCommitTx, alice, alice2blockchain)
assert(closingState.claimMainOutputTx.nonEmpty)
assert(closingState.claimHtlcSuccessTxs.isEmpty) // we don't have the preimage to claim the htlc-success yet
assert(closingState.claimHtlcTimeoutTxs.length === 1)
val claimHtlcTimeoutTx = closingState.claimHtlcTimeoutTxs.head
// Alice receives the preimage for the first HTLC from downstream; she can now claim the corresponding HTLC output.
alice ! CMD_FULFILL_HTLC(htlc1.id, r1, commit = true)
assert(alice2blockchain.expectMsgType[PublishAsap].tx.txid === claimMainTx.txid)
val claimHtlcSuccessTx = alice2blockchain.expectMsgType[PublishAsap].tx
alice2blockchain.expectMsg(PublishAsap(closingState.claimMainOutputTx.get))
val claimHtlcSuccessTx = alice.stateData.asInstanceOf[DATA_CLOSING].nextRemoteCommitPublished.get.claimHtlcSuccessTxs.head
Transaction.correctlySpends(claimHtlcSuccessTx, bobCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
assert(alice2blockchain.expectMsgType[PublishAsap].tx.txid === claimHtlcTimeoutTx.txid)
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === bobCommitTx.txid)
assert(alice2blockchain.expectMsgType[WatchConfirmed].txId === claimMainTx.txid)
assert(alice2blockchain.expectMsgType[WatchSpent].txId === bobCommitTx.txid)
assert(alice2blockchain.expectMsgType[WatchSpent].txId === bobCommitTx.txid)
alice2blockchain.expectMsg(PublishAsap(claimHtlcSuccessTx))
alice2blockchain.expectMsg(PublishAsap(claimHtlcTimeoutTx))
assert(alice2blockchain.expectMsgType[WatchConfirmed].event === BITCOIN_TX_CONFIRMED(bobCommitTx))
assert(alice2blockchain.expectMsgType[WatchConfirmed].event === BITCOIN_TX_CONFIRMED(closingState.claimMainOutputTx.get))
val watchHtlcs = alice2blockchain.expectMsgType[WatchSpent] :: alice2blockchain.expectMsgType[WatchSpent] :: Nil
watchHtlcs.foreach(ws => assert(ws.event === BITCOIN_OUTPUT_SPENT))
watchHtlcs.foreach(ws => assert(ws.txId === bobCommitTx.txid))
assert(watchHtlcs.map(_.outputIndex).toSet === (claimHtlcSuccessTx :: closingState.claimHtlcTimeoutTxs).map(_.txIn.head.outPoint.index).toSet)
alice2blockchain.expectNoMsg(100 millis)
val claimedOutputs = (claimMainTx.txIn ++ claimHtlcSuccessTx.txIn ++ claimHtlcTimeoutTx.txIn).filter(_.outPoint.txid == bobCommitTx.txid).map(_.outPoint.index)
assert(claimedOutputs.length === 3)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(bobCommitTx), 0, 0, bobCommitTx)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(claimMainTx), 0, 0, claimMainTx)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(closingState.claimMainOutputTx.get), 0, 0, closingState.claimMainOutputTx.get)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(claimHtlcSuccessTx), 0, 0, claimHtlcSuccessTx)
alice ! WatchEventConfirmed(BITCOIN_TX_CONFIRMED(claimHtlcTimeoutTx), 0, 0, claimHtlcTimeoutTx)
// TODO: can we also verify that we correctly sweep the HTLC success and timeout after the delay?
assert(relayerA.expectMsgType[ForwardOnChainFail].htlc === htlc2)
awaitCond(alice.stateName == CLOSED)
alice2blockchain.expectNoMsg(100 millis)
relayerA.expectNoMsg(100 millis)
}
test("recv BITCOIN_TX_CONFIRMED (future remote commit)") { f =>
@ -711,7 +670,7 @@ class ClosingStateSpec extends TestkitBaseClass with StateTestsHelperMethods {
import f._
mutualClose(alice, bob, alice2bob, bob2alice, alice2blockchain, bob2blockchain)
// bob publishes multiple revoked txes (last one isn't revoked)
alice ! WatchEventSpent(BITCOIN_FUNDING_SPENT, bobCommitTxes(0).commitTx.tx)
alice ! WatchEventSpent(BITCOIN_FUNDING_SPENT, bobCommitTxes.head.commitTx.tx)
// alice publishes and watches the penalty tx
alice2blockchain.expectMsgType[PublishAsap] // claim-main
alice2blockchain.expectMsgType[PublishAsap] // main-penalty

View File

@ -283,14 +283,14 @@ class NodeRelayerSpec extends TestkitBaseClass {
// A first downstream HTLC is fulfilled.
val ff1 = createDownstreamFulfill(outgoingPayFSM.ref)
relayer.send(nodeRelayer, ff1)
outgoingPayFSM.expectMsg(ff1.fulfill)
outgoingPayFSM.expectMsg(ff1)
// We should immediately forward the fulfill upstream.
incomingMultiPart.foreach(p => commandBuffer.expectMsg(CommandBuffer.CommandSend(p.add.channelId, CMD_FULFILL_HTLC(p.add.id, paymentPreimage, commit = true))))
// A second downstream HTLC is fulfilled.
val ff2 = createDownstreamFulfill(outgoingPayFSM.ref)
relayer.send(nodeRelayer, ff2)
outgoingPayFSM.expectMsg(ff2.fulfill)
outgoingPayFSM.expectMsg(ff2)
// We should not fulfill a second time upstream.
commandBuffer.expectNoMsg(100 millis)
@ -316,7 +316,7 @@ class NodeRelayerSpec extends TestkitBaseClass {
val ff = createDownstreamFulfill(outgoingPayFSM.ref)
relayer.send(nodeRelayer, ff)
outgoingPayFSM.expectMsg(ff.fulfill)
outgoingPayFSM.expectMsg(ff)
val incomingAdd = incomingSinglePart.add
commandBuffer.expectMsg(CommandBuffer.CommandSend(incomingAdd.channelId, CMD_FULFILL_HTLC(incomingAdd.id, paymentPreimage, commit = true)))
@ -353,7 +353,7 @@ class NodeRelayerSpec extends TestkitBaseClass {
val ff = createDownstreamFulfill(outgoingPayFSM.ref)
relayer.send(nodeRelayer, ff)
outgoingPayFSM.expectMsg(ff.fulfill)
outgoingPayFSM.expectMsg(ff)
incomingMultiPart.foreach(p => commandBuffer.expectMsg(CommandBuffer.CommandSend(p.add.channelId, CMD_FULFILL_HTLC(p.add.id, paymentPreimage, commit = true))))
outgoingPayFSM.send(nodeRelayer, createSuccessEvent(outgoingCfg.id))
@ -386,7 +386,7 @@ class NodeRelayerSpec extends TestkitBaseClass {
val ff = createDownstreamFulfill(outgoingPayFSM.ref)
relayer.send(nodeRelayer, ff)
outgoingPayFSM.expectMsg(ff.fulfill)
outgoingPayFSM.expectMsg(ff)
incomingMultiPart.foreach(p => commandBuffer.expectMsg(CommandBuffer.CommandSend(p.add.channelId, CMD_FULFILL_HTLC(p.add.id, paymentPreimage, commit = true))))
outgoingPayFSM.send(nodeRelayer, createSuccessEvent(outgoingCfg.id))
@ -450,7 +450,7 @@ object NodeRelayerSpec {
def createDownstreamFulfill(payFSM: ActorRef): Relayer.ForwardFulfill = {
val origin = Origin.TrampolineRelayed(null, Some(payFSM))
Relayer.ForwardFulfill(UpdateFulfillHtlc(randomBytes32, Random.nextInt(100), paymentPreimage), origin, null)
Relayer.ForwardRemoteFulfill(UpdateFulfillHtlc(randomBytes32, Random.nextInt(100), paymentPreimage), origin, null)
}
def createSuccessEvent(id: UUID): PaymentSent =

View File

@ -33,6 +33,7 @@ import fr.acinq.eclair.io.Peer.PeerRoutingMessage
import fr.acinq.eclair.payment.PaymentRequest.ExtraHop
import fr.acinq.eclair.payment.PaymentSent.PartialPayment
import fr.acinq.eclair.payment.relay.Origin.Local
import fr.acinq.eclair.payment.relay.{Origin, Relayer}
import fr.acinq.eclair.payment.send.PaymentInitiator.{SendPaymentConfig, SendPaymentRequest}
import fr.acinq.eclair.payment.send.PaymentLifecycle
import fr.acinq.eclair.payment.send.PaymentLifecycle._
@ -55,6 +56,7 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
val defaultExpiry = Channel.MIN_CLTV_EXPIRY_DELTA.toCltvExpiry(40000)
val defaultPaymentPreimage = randomBytes32
val defaultPaymentHash = Crypto.sha256(defaultPaymentPreimage)
val defaultOrigin = Origin.Local(UUID.randomUUID(), None)
val defaultExternalId = UUID.randomUUID().toString
val defaultPaymentRequest = SendPaymentRequest(defaultAmountMsat, defaultPaymentHash, d, 1, externalId = Some(defaultExternalId))
@ -96,7 +98,7 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
awaitCond(nodeParams.db.payments.getOutgoingPayment(id).exists(_.status == OutgoingPaymentStatus.Pending))
val Some(outgoing) = nodeParams.db.payments.getOutgoingPayment(id)
assert(outgoing.copy(createdAt = 0) === OutgoingPayment(id, parentId, Some(defaultExternalId), defaultPaymentHash, PaymentType.Standard, defaultAmountMsat, defaultAmountMsat, d, 0, None, OutgoingPaymentStatus.Pending))
sender.send(paymentFSM, UpdateFulfillHtlc(ByteVector32.Zeroes, 0, defaultPaymentHash))
sender.send(paymentFSM, Relayer.ForwardRemoteFulfill(UpdateFulfillHtlc(ByteVector32.Zeroes, 0, defaultPaymentPreimage), defaultOrigin, UpdateAddHtlc(ByteVector32.Zeroes, 0, defaultAmountMsat, defaultPaymentHash, defaultExpiry, TestConstants.emptyOnionPacket)))
val ps = sender.expectMsgType[PaymentSent]
assert(ps.id === parentId)
@ -132,7 +134,7 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
val Transition(_, WAITING_FOR_ROUTE, WAITING_FOR_PAYMENT_COMPLETE) = monitor.expectMsgClass(classOf[Transition[_]])
// Payment accepted by the recipient.
sender.send(paymentFSM, UpdateFulfillHtlc(ByteVector32.Zeroes, 0, defaultPaymentHash))
sender.send(paymentFSM, Relayer.ForwardOnChainFulfill(defaultPaymentPreimage, defaultOrigin, UpdateAddHtlc(ByteVector32.Zeroes, 0, defaultAmountMsat, defaultPaymentHash, defaultExpiry, TestConstants.emptyOnionPacket)))
val ps = sender.expectMsgType[PaymentSent]
assert(ps.id === parentId)
@ -228,7 +230,7 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
val WaitingForComplete(_, _, cmd1, Nil, _, _, _, hops) = paymentFSM.stateData
register.expectMsg(ForwardShortId(channelId_ab, cmd1))
sender.send(paymentFSM, UpdateFailHtlc(ByteVector32.Zeroes, 0, defaultPaymentHash)) // unparsable message
sender.send(paymentFSM, Relayer.ForwardRemoteFail(UpdateFailHtlc(ByteVector32.Zeroes, 0, randomBytes32), defaultOrigin, UpdateAddHtlc(ByteVector32.Zeroes, 0, defaultAmountMsat, defaultPaymentHash, defaultExpiry, TestConstants.emptyOnionPacket))) // unparsable message
// then the payment lifecycle will ask for a new route excluding all intermediate nodes
routerForwarder.expectMsg(RouteRequest(nodeParams.nodeId, d, defaultAmountMsat, ignoreNodes = Set(c), ignoreChannels = Set.empty))
@ -465,7 +467,7 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
awaitCond(nodeParams.db.payments.getOutgoingPayment(id).exists(_.status === OutgoingPaymentStatus.Pending))
val Some(outgoing) = nodeParams.db.payments.getOutgoingPayment(id)
assert(outgoing.copy(createdAt = 0) === OutgoingPayment(id, parentId, Some(defaultExternalId), defaultPaymentHash, PaymentType.Standard, defaultAmountMsat, defaultAmountMsat, d, 0, None, OutgoingPaymentStatus.Pending))
sender.send(paymentFSM, UpdateFulfillHtlc(ByteVector32.Zeroes, 0, defaultPaymentPreimage))
sender.send(paymentFSM, Relayer.ForwardRemoteFulfill(UpdateFulfillHtlc(ByteVector32.Zeroes, 0, defaultPaymentPreimage), defaultOrigin, UpdateAddHtlc(ByteVector32.Zeroes, 0, defaultAmountMsat, defaultPaymentHash, defaultExpiry, TestConstants.emptyOnionPacket)))
val ps = eventListener.expectMsgType[PaymentSent]
assert(ps.id === parentId)
@ -513,7 +515,7 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
routerForwarder.forward(router)
val Transition(_, WAITING_FOR_ROUTE, WAITING_FOR_PAYMENT_COMPLETE) = monitor.expectMsgClass(classOf[Transition[_]])
sender.send(paymentFSM, UpdateFulfillHtlc(ByteVector32.Zeroes, 0, defaultPaymentHash))
sender.send(paymentFSM, Relayer.ForwardOnChainFulfill(defaultPaymentPreimage, defaultOrigin, UpdateAddHtlc(ByteVector32.Zeroes, 0, defaultAmountMsat, defaultPaymentHash, defaultExpiry, TestConstants.emptyOnionPacket)))
val paymentOK = sender.expectMsgType[PaymentSent]
val PaymentSent(_, _, paymentOK.paymentPreimage, finalAmount, _, PartialPayment(_, request.finalPayload.amount, fee, ByteVector32.Zeroes, _, _) :: Nil) = eventListener.expectMsgType[PaymentSent]
assert(finalAmount === defaultAmountMsat)
@ -543,7 +545,7 @@ class PaymentLifecycleSpec extends BaseRouterSpec {
val Transition(_, WAITING_FOR_ROUTE, WAITING_FOR_PAYMENT_COMPLETE) = monitor.expectMsgClass(classOf[Transition[_]])
assert(nodeParams.db.payments.getOutgoingPayment(id) === None)
sender.send(paymentFSM, UpdateFulfillHtlc(ByteVector32.Zeroes, 0, defaultPaymentPreimage))
sender.send(paymentFSM, Relayer.ForwardRemoteFulfill(UpdateFulfillHtlc(ByteVector32.Zeroes, 0, defaultPaymentPreimage), defaultOrigin, UpdateAddHtlc(ByteVector32.Zeroes, 0, defaultAmountMsat, defaultPaymentHash, defaultExpiry, TestConstants.emptyOnionPacket)))
sender.expectMsgType[PaymentSent]
assert(nodeParams.db.payments.getOutgoingPayment(id) === None)
eventListener.expectNoMsg(100 millis)

View File

@ -25,7 +25,6 @@ import fr.acinq.eclair.channel._
import fr.acinq.eclair.db.{OutgoingPayment, OutgoingPaymentStatus, PaymentType}
import fr.acinq.eclair.payment.OutgoingPacket.buildCommand
import fr.acinq.eclair.payment.PaymentPacketSpec._
import fr.acinq.eclair.payment.relay.Relayer.{ForwardFail, ForwardFulfill}
import fr.acinq.eclair.payment.relay.{CommandBuffer, Origin, Relayer}
import fr.acinq.eclair.router.ChannelHop
import fr.acinq.eclair.transactions.{DirectedHtlc, Direction, IN, OUT}
@ -379,12 +378,12 @@ object PostRestartHtlcCleanerSpec {
}
def buildForwardFail(add: UpdateAddHtlc, origin: Origin) =
ForwardFail(UpdateFailHtlc(add.channelId, add.id, ByteVector.empty), origin, add)
Relayer.ForwardRemoteFail(UpdateFailHtlc(add.channelId, add.id, ByteVector.empty), origin, add)
def buildForwardFulfill(add: UpdateAddHtlc, origin: Origin, preimage: ByteVector32) =
ForwardFulfill(UpdateFulfillHtlc(add.channelId, add.id, preimage), origin, add)
Relayer.ForwardRemoteFulfill(UpdateFulfillHtlc(add.channelId, add.id, preimage), origin, add)
case class LocalPaymentTest(parentId: UUID, childIds: Seq[UUID], fails: Seq[ForwardFail], fulfills: Seq[ForwardFulfill])
case class LocalPaymentTest(parentId: UUID, childIds: Seq[UUID], fails: Seq[Relayer.ForwardFail], fulfills: Seq[Relayer.ForwardFulfill])
/**
* We setup two outgoing payments:

View File

@ -197,7 +197,7 @@ class RelayerSpec extends TestkitBaseClass {
val origin2 = TrampolineRelayed((channelId_ab, 561L) :: (channelId_ab, 565L) :: Nil, Some(register.lastSender))
val add_bc = UpdateAddHtlc(channelId_bc, 72, cmd1.amount + cmd2.amount, paymentHash, cmd1.cltvExpiry, onionRoutingPacket = TestConstants.emptyOnionPacket)
val fulfill_ba = UpdateFulfillHtlc(channelId_bc, 72, paymentPreimage)
sender.send(relayer, ForwardFulfill(fulfill_ba, origin2, add_bc))
sender.send(relayer, ForwardRemoteFulfill(fulfill_ba, origin2, add_bc))
// it should trigger a fulfill on the upstream HTLCs
register.expectMsg(Register.Forward(channelId_ab, CMD_FULFILL_HTLC(561, paymentPreimage, commit = true)))
@ -468,10 +468,14 @@ class RelayerSpec extends TestkitBaseClass {
sender.send(relayer, Status.Failure(AddHtlcFailed(channelId_bc, paymentHash, ChannelUnavailable(channelId_bc), origin, Some(channelUpdate_bc_disabled), None)))
assert(register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message.reason === Right(ChannelDisabled(channelUpdate_bc_disabled.messageFlags, channelUpdate_bc_disabled.channelFlags, channelUpdate_bc_disabled)))
sender.send(relayer, Status.Failure(AddHtlcFailed(channelId_bc, paymentHash, HtlcTimedout(channelId_bc, Set.empty), origin, None, None)))
assert(register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message.reason === Right(PermanentChannelFailure))
val add = UpdateAddHtlc(channelId_bc, 7, 1090000 msat, paymentHash, CltvExpiry(42), TestConstants.emptyOnionPacket)
sender.send(relayer, ForwardRemoteFail(UpdateFailHtlc(channelId_bc, 7, ByteVector.fill(12)(3)), origin, add))
assert(register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message.reason === Left(ByteVector.fill(12)(3)))
sender.send(relayer, Status.Failure(AddHtlcFailed(channelId_bc, paymentHash, HtlcTimedout(channelId_bc, Set.empty), origin, Some(channelUpdate_bc), None)))
sender.send(relayer, ForwardRemoteFailMalformed(UpdateFailMalformedHtlc(channelId_bc, 7, ByteVector32.One, FailureMessageCodecs.BADONION), origin, add))
assert(register.expectMsgType[Register.Forward[CMD_FAIL_MALFORMED_HTLC]].message.onionHash === ByteVector32.One)
sender.send(relayer, ForwardOnChainFail(HtlcOverriddenByLocalCommit(channelId_bc, add), origin, add))
assert(register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]].message.reason === Right(PermanentChannelFailure))
register.expectNoMsg(50 millis)
@ -487,17 +491,26 @@ class RelayerSpec extends TestkitBaseClass {
val add_bc = UpdateAddHtlc(channelId = channelId_bc, id = 72, amountMsat = 10000000 msat, paymentHash = ByteVector32.Zeroes, CltvExpiry(4200), onionRoutingPacket = TestConstants.emptyOnionPacket)
val fulfill_ba = UpdateFulfillHtlc(channelId = channelId_bc, id = 72, paymentPreimage = ByteVector32.Zeroes)
val origin = Relayed(channelId_ab, 42, 11000000 msat, 10000000 msat)
sender.send(relayer, ForwardFulfill(fulfill_ba, origin, add_bc))
for (fulfill <- Seq(ForwardRemoteFulfill(fulfill_ba, origin, add_bc), ForwardOnChainFulfill(ByteVector32.Zeroes, origin, add_bc))) {
sender.send(relayer, fulfill)
val fwd = register.expectMsgType[Register.Forward[CMD_FULFILL_HTLC]]
assert(fwd.channelId === origin.originChannelId)
assert(fwd.message.id === origin.originHtlcId)
val fwd = register.expectMsgType[Register.Forward[CMD_FULFILL_HTLC]]
assert(fwd.channelId === origin.originChannelId)
assert(fwd.message.id === origin.originHtlcId)
val paymentRelayed = eventListener.expectMsgType[ChannelPaymentRelayed]
assert(paymentRelayed.copy(timestamp = 0) === ChannelPaymentRelayed(origin.amountIn, origin.amountOut, add_bc.paymentHash, channelId_ab, channelId_bc, timestamp = 0))
val paymentRelayed = eventListener.expectMsgType[ChannelPaymentRelayed]
assert(paymentRelayed.copy(timestamp = 0) === ChannelPaymentRelayed(origin.amountIn, origin.amountOut, add_bc.paymentHash, channelId_ab, channelId_bc, timestamp = 0))
}
}
test("relay a trampoline htlc-fulfill") { f =>
testRelayTrampolineHtlcFulfill(f, onChain = false)
}
test("relay a trampoline on-chain htlc-fulfill") { f =>
testRelayTrampolineHtlcFulfill(f, onChain = true)
}
def testRelayTrampolineHtlcFulfill(f: FixtureParam, onChain: Boolean): Unit = {
import f._
// A sends a multi-payment to trampoline node B.
@ -516,11 +529,16 @@ class RelayerSpec extends TestkitBaseClass {
// We simulate a fake htlc fulfill for the downstream channel.
val payFSM = TestProbe()
val fulfill_ba = UpdateFulfillHtlc(channelId_bc, 72, preimage)
sender.send(relayer, ForwardFulfill(fulfill_ba, TrampolineRelayed(null, Some(payFSM.ref)), UpdateAddHtlc(channelId_bc, 13, 1000 msat, paymentHash, CltvExpiry(1), null)))
val add_bc = UpdateAddHtlc(channelId_bc, 72, 1000 msat, paymentHash, CltvExpiry(1), null)
val forwardFulfill: ForwardFulfill = if (onChain) {
ForwardOnChainFulfill(preimage, TrampolineRelayed(null, Some(payFSM.ref)), add_bc)
} else {
ForwardRemoteFulfill(UpdateFulfillHtlc(add_bc.channelId, add_bc.id, preimage), TrampolineRelayed(null, Some(payFSM.ref)), add_bc)
}
sender.send(relayer, forwardFulfill)
// the FSM responsible for the payment should receive the fulfill.
payFSM.expectMsg(fulfill_ba)
payFSM.expectMsg(forwardFulfill)
// the payment should be immediately fulfilled upstream.
val upstream1 = register.expectMsgType[Register.Forward[CMD_FULFILL_HTLC]]
@ -541,11 +559,12 @@ class RelayerSpec extends TestkitBaseClass {
val add_bc = UpdateAddHtlc(channelId = channelId_bc, id = 72, amountMsat = 10000000 msat, paymentHash = ByteVector32.Zeroes, CltvExpiry(4200), onionRoutingPacket = TestConstants.emptyOnionPacket)
val fail_ba = UpdateFailHtlc(channelId = channelId_bc, id = 72, reason = Sphinx.FailurePacket.create(ByteVector32(ByteVector.fill(32)(1)), TemporaryChannelFailure(channelUpdate_cd)))
val origin = Relayed(channelId_ab, 42, 11000000 msat, 10000000 msat)
sender.send(relayer, ForwardFail(fail_ba, origin, add_bc))
val fwd = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]]
assert(fwd.channelId === origin.originChannelId)
assert(fwd.message.id === origin.originHtlcId)
for (fail <- Seq(ForwardRemoteFail(fail_ba, origin, add_bc), ForwardOnChainFail(HtlcOverriddenByLocalCommit(channelId_bc, add_bc), origin, add_bc))) {
sender.send(relayer, fail)
val fwd = register.expectMsgType[Register.Forward[CMD_FAIL_HTLC]]
assert(fwd.channelId === origin.originChannelId)
assert(fwd.message.id === origin.originHtlcId)
}
}
test("relay a trampoline htlc-fail") { f =>
@ -557,10 +576,18 @@ class RelayerSpec extends TestkitBaseClass {
val add_bc = UpdateAddHtlc(channelId = channelId_bc, id = 72, amountMsat = 10000000 msat, paymentHash = ByteVector32.Zeroes, CltvExpiry(4200), onionRoutingPacket = TestConstants.emptyOnionPacket)
val fail_ba = UpdateFailHtlc(channelId = channelId_bc, id = 72, reason = Sphinx.FailurePacket.create(ByteVector32(ByteVector.fill(32)(1)), TemporaryChannelFailure(channelUpdate_cd)))
val origin = TrampolineRelayed(List((channelId_ab, 42), (randomBytes32, 7)), Some(payFSM.ref))
sender.send(relayer, ForwardFail(fail_ba, origin, add_bc))
val remoteFailure = ForwardRemoteFail(fail_ba, origin, add_bc)
// we forward to the FSM responsible for the payment to trigger the retry mechanism.
payFSM.expectMsg(fail_ba)
// a remote failure should be forwarded to the FSM responsible for the payment to trigger the retry mechanism.
sender.send(relayer, remoteFailure)
payFSM.expectMsg(remoteFailure)
payFSM.expectNoMsg(100 millis)
// same for an on-chain downstream failure.
val onChainFailure = ForwardOnChainFail(HtlcsTimedoutDownstream(channelId_bc, Set(add_bc)), origin, add_bc)
sender.send(relayer, onChainFailure)
payFSM.expectMsg(onChainFailure)
payFSM.expectNoMsg(100 millis)
}
test("get outgoing channels") { f =>