mirror of
https://github.com/ACINQ/eclair.git
synced 2025-02-22 22:25:26 +01:00
Implement option_simple_close
(#2967)
We add support for the latest channel closing protocol described in https://github.com/lightning/bolts/pull/1205. This is a prerequisite for taproot channels. We introduce a new `NEGOTIATING_SIMPLE` state where we exchange the `closing_complete` and `closing_sig` messages, and allow RBF-ing previous transactions and updating our closing script. We stay in that state until one of the transactions confirms, or a force close is detected. This is important to ensure we're able to correctly reconnect and negotiate RBF candidates. We keep this separate from the previous `NEGOTIATING` state to make it easier to remove support for the older mutual close protocols once we're confident the network has been upgraded.
This commit is contained in:
parent
372222d9f8
commit
3aac8da146
50 changed files with 1361 additions and 308 deletions
|
@ -4,7 +4,17 @@
|
|||
|
||||
## Major changes
|
||||
|
||||
<insert changes>
|
||||
### Simplified mutual close
|
||||
|
||||
This release includes support for the latest [mutual close protocol](https://github.com/lightning/bolts/pull/1205).
|
||||
This protocol allows both channel participants to decide exactly how much fees they're willing to pay to close the channel.
|
||||
Each participant obtains a channel closing transaction where they are paying the fees.
|
||||
|
||||
Once closing transactions are broadcast, they can be RBF-ed by calling the `close` RPC again with a higher feerate:
|
||||
|
||||
```sh
|
||||
./eclair-cli close --channelId=<channel_id> --preferredFeerateSatByte=<rbf_feerate>
|
||||
```
|
||||
|
||||
### Peer storage
|
||||
|
||||
|
|
|
@ -84,6 +84,7 @@ eclair {
|
|||
// node that you trust using override-init-features (see below).
|
||||
option_zeroconf = disabled
|
||||
keysend = disabled
|
||||
option_simple_close=optional
|
||||
trampoline_payment_prototype = disabled
|
||||
async_payment_prototype = disabled
|
||||
on_the_fly_funding = disabled
|
||||
|
@ -132,8 +133,7 @@ eclair {
|
|||
|
||||
to-remote-delay-blocks = 720 // number of blocks that the other node's to-self outputs must be delayed (720 ~ 5 days)
|
||||
max-to-local-delay-blocks = 2016 // maximum number of blocks that we are ready to accept for our own delayed outputs (2016 ~ 2 weeks)
|
||||
min-depth-funding-blocks = 6 // minimum number of confirmations for funding transactions
|
||||
min-depth-closing-blocks = 3 // minimum number of confirmations for closing transactions
|
||||
min-depth-blocks = 6 // minimum number of confirmations for channel transactions, which we will additionally scale based on the amount at stake
|
||||
expiry-delta-blocks = 144
|
||||
max-expiry-delta-blocks = 2016 // we won't forward HTLCs with timeouts greater than this delta
|
||||
// When we receive the preimage for an HTLC and want to fulfill it but the upstream peer stops responding, we want to
|
||||
|
|
|
@ -305,6 +305,11 @@ object Features {
|
|||
val mandatory = 54
|
||||
}
|
||||
|
||||
case object SimpleClose extends Feature with InitFeature with NodeFeature {
|
||||
val rfcName = "option_simple_close"
|
||||
val mandatory = 60
|
||||
}
|
||||
|
||||
/** This feature bit indicates that the node is a mobile wallet that can be woken up via push notifications. */
|
||||
case object WakeUpNotificationClient extends Feature with InitFeature {
|
||||
val rfcName = "wake_up_notification_client"
|
||||
|
@ -375,6 +380,7 @@ object Features {
|
|||
PaymentMetadata,
|
||||
ZeroConf,
|
||||
KeySend,
|
||||
SimpleClose,
|
||||
WakeUpNotificationClient,
|
||||
TrampolinePaymentPrototype,
|
||||
AsyncPaymentPrototype,
|
||||
|
@ -393,6 +399,7 @@ object Features {
|
|||
RouteBlinding -> (VariableLengthOnion :: Nil),
|
||||
TrampolinePaymentPrototype -> (PaymentSecret :: Nil),
|
||||
KeySend -> (VariableLengthOnion :: Nil),
|
||||
SimpleClose -> (ShutdownAnySegwit :: Nil),
|
||||
AsyncPaymentPrototype -> (TrampolinePaymentPrototype :: Nil),
|
||||
OnTheFlyFunding -> (SplicePrototype :: Nil),
|
||||
FundingFeeCredit -> (OnTheFlyFunding :: Nil)
|
||||
|
|
|
@ -319,7 +319,7 @@ object NodeParams extends Logging {
|
|||
"on-chain-fees.target-blocks.safe-utxos-threshold" -> "on-chain-fees.safe-utxos-threshold",
|
||||
"on-chain-fees.target-blocks" -> "on-chain-fees.confirmation-priority",
|
||||
// v0.12.0
|
||||
"channel.mindepth-blocks" -> "channel.min-depth-funding-blocks",
|
||||
"channel.mindepth-blocks" -> "channel.min-depth-blocks",
|
||||
"sync-whitelist" -> "router.sync.whitelist",
|
||||
)
|
||||
deprecatedKeyPaths.foreach {
|
||||
|
@ -362,7 +362,7 @@ object NodeParams extends Logging {
|
|||
require(fulfillSafetyBeforeTimeout * 2 < expiryDelta, "channel.fulfill-safety-before-timeout-blocks must be smaller than channel.expiry-delta-blocks / 2 because it effectively reduces that delta; if you want to increase this value, you may want to increase expiry-delta-blocks as well")
|
||||
val minFinalExpiryDelta = CltvExpiryDelta(config.getInt("channel.min-final-expiry-delta-blocks"))
|
||||
require(minFinalExpiryDelta > fulfillSafetyBeforeTimeout, "channel.min-final-expiry-delta-blocks must be strictly greater than channel.fulfill-safety-before-timeout-blocks; otherwise it may lead to undesired channel closure")
|
||||
require(config.getInt("channel.min-depth-funding-blocks") >= 6, "channel.min-depth-funding-blocks must be at least 6 to ensure that channels are safe from reorgs, otherwise funds can be stolen")
|
||||
require(config.getInt("channel.min-depth-blocks") >= 6, "channel.min-depth-blocks must be at least 6 to ensure that channels are safe from reorgs, otherwise funds can be stolen")
|
||||
|
||||
val nodeAlias = config.getString("node-alias")
|
||||
require(nodeAlias.getBytes("UTF-8").length <= 32, "invalid alias, too long (max allowed 32 bytes)")
|
||||
|
@ -575,8 +575,7 @@ object NodeParams extends Logging {
|
|||
minFundingPrivateSatoshis = Satoshi(config.getLong("channel.min-private-funding-satoshis")),
|
||||
toRemoteDelay = offeredCLTV,
|
||||
maxToLocalDelay = maxToLocalCLTV,
|
||||
minDepthFunding = config.getInt("channel.min-depth-funding-blocks"),
|
||||
minDepthClosing = config.getInt("channel.min-depth-closing-blocks"),
|
||||
minDepth = config.getInt("channel.min-depth-blocks"),
|
||||
expiryDelta = expiryDelta,
|
||||
maxExpiryDelta = maxExpiryDelta,
|
||||
fulfillSafetyBeforeTimeout = fulfillSafetyBeforeTimeout,
|
||||
|
|
|
@ -196,6 +196,7 @@ object CheckBalance {
|
|||
case (r, d: DATA_NORMAL) => r.modify(_.normal).using(updateMainAndHtlcBalance(d.commitments, knownPreimages))
|
||||
case (r, d: DATA_SHUTDOWN) => r.modify(_.shutdown).using(updateMainAndHtlcBalance(d.commitments, knownPreimages))
|
||||
case (r, d: DATA_NEGOTIATING) => r.modify(_.negotiating).using(updateMainBalance(d.commitments.latest.localCommit))
|
||||
case (r, d: DATA_NEGOTIATING_SIMPLE) => r.modify(_.negotiating).using(updateMainBalance(d.commitments.latest.localCommit))
|
||||
case (r, d: DATA_CLOSING) =>
|
||||
Closing.isClosingTypeAlreadyKnown(d) match {
|
||||
case None if d.mutualClosePublished.nonEmpty && d.localCommitPublished.isEmpty && d.remoteCommitPublished.isEmpty && d.nextRemoteCommitPublished.isEmpty && d.revokedCommitPublished.isEmpty =>
|
||||
|
|
|
@ -133,8 +133,8 @@ object ZmqWatcher {
|
|||
case class WatchFundingSpent(replyTo: ActorRef[WatchFundingSpentTriggered], txId: TxId, outputIndex: Int, hints: Set[TxId]) extends WatchSpent[WatchFundingSpentTriggered]
|
||||
case class WatchFundingSpentTriggered(spendingTx: Transaction) extends WatchSpentTriggered
|
||||
|
||||
case class WatchOutputSpent(replyTo: ActorRef[WatchOutputSpentTriggered], txId: TxId, outputIndex: Int, hints: Set[TxId]) extends WatchSpent[WatchOutputSpentTriggered]
|
||||
case class WatchOutputSpentTriggered(spendingTx: Transaction) extends WatchSpentTriggered
|
||||
case class WatchOutputSpent(replyTo: ActorRef[WatchOutputSpentTriggered], txId: TxId, outputIndex: Int, amount: Satoshi, hints: Set[TxId]) extends WatchSpent[WatchOutputSpentTriggered]
|
||||
case class WatchOutputSpentTriggered(amount: Satoshi, spendingTx: Transaction) extends WatchSpentTriggered
|
||||
|
||||
/** Waiting for a wallet transaction to be published guarantees that bitcoind won't double-spend it in the future, unless we explicitly call abandontransaction. */
|
||||
case class WatchPublished(replyTo: ActorRef[WatchPublishedTriggered], txId: TxId) extends Watch[WatchPublishedTriggered]
|
||||
|
@ -233,7 +233,7 @@ private class ZmqWatcher(nodeParams: NodeParams, blockHeight: AtomicLong, client
|
|||
.foreach {
|
||||
case w: WatchExternalChannelSpent => context.self ! TriggerEvent(w.replyTo, w, WatchExternalChannelSpentTriggered(w.shortChannelId, tx))
|
||||
case w: WatchFundingSpent => context.self ! TriggerEvent(w.replyTo, w, WatchFundingSpentTriggered(tx))
|
||||
case w: WatchOutputSpent => context.self ! TriggerEvent(w.replyTo, w, WatchOutputSpentTriggered(tx))
|
||||
case w: WatchOutputSpent => context.self ! TriggerEvent(w.replyTo, w, WatchOutputSpentTriggered(w.amount, tx))
|
||||
case _: WatchPublished => // nothing to do
|
||||
case _: WatchConfirmed[_] => // nothing to do
|
||||
}
|
||||
|
|
|
@ -72,6 +72,7 @@ case object WAIT_FOR_DUAL_FUNDING_READY extends ChannelState
|
|||
case object NORMAL extends ChannelState
|
||||
case object SHUTDOWN extends ChannelState
|
||||
case object NEGOTIATING extends ChannelState
|
||||
case object NEGOTIATING_SIMPLE extends ChannelState
|
||||
case object CLOSING extends ChannelState
|
||||
case object CLOSED extends ChannelState
|
||||
case object OFFLINE extends ChannelState
|
||||
|
@ -643,6 +644,16 @@ final case class DATA_NEGOTIATING(commitments: Commitments,
|
|||
require(closingTxProposed.nonEmpty, "there must always be a list for the current negotiation")
|
||||
require(!commitments.params.localParams.paysClosingFees || closingTxProposed.forall(_.nonEmpty), "initiator must have at least one closing signature for every negotiation attempt because it initiates the closing")
|
||||
}
|
||||
final case class DATA_NEGOTIATING_SIMPLE(commitments: Commitments,
|
||||
lastClosingFeerate: FeeratePerKw,
|
||||
localScriptPubKey: ByteVector, remoteScriptPubKey: ByteVector,
|
||||
// Closing transactions we created, where we pay the fees (unsigned).
|
||||
proposedClosingTxs: List[ClosingTxs],
|
||||
// Closing transactions we published: this contains our local transactions for
|
||||
// which they sent a signature, and their closing transactions that we signed.
|
||||
publishedClosingTxs: List[ClosingTx]) extends ChannelDataWithCommitments {
|
||||
def findClosingTx(tx: Transaction): Option[ClosingTx] = publishedClosingTxs.find(_.tx.txid == tx.txid).orElse(proposedClosingTxs.flatMap(_.all).find(_.tx.txid == tx.txid))
|
||||
}
|
||||
final case class DATA_CLOSING(commitments: Commitments,
|
||||
waitingSince: BlockHeight, // how long since we initiated the closing
|
||||
finalScriptPubKey: ByteVector, // where to send all on-chain funds
|
||||
|
|
|
@ -116,7 +116,10 @@ case class FeerateTooDifferent (override val channelId: Byte
|
|||
case class InvalidAnnouncementSignatures (override val channelId: ByteVector32, annSigs: AnnouncementSignatures) extends ChannelException(channelId, s"invalid announcement signatures: $annSigs")
|
||||
case class InvalidCommitmentSignature (override val channelId: ByteVector32, fundingTxId: TxId, fundingTxIndex: Long, unsignedCommitTx: Transaction) extends ChannelException(channelId, s"invalid commitment signature: fundingTxId=$fundingTxId fundingTxIndex=$fundingTxIndex commitTxId=${unsignedCommitTx.txid} commitTx=$unsignedCommitTx")
|
||||
case class InvalidHtlcSignature (override val channelId: ByteVector32, txId: TxId) extends ChannelException(channelId, s"invalid htlc signature: txId=$txId")
|
||||
case class CannotGenerateClosingTx (override val channelId: ByteVector32) extends ChannelException(channelId, "failed to generate closing transaction: all outputs are trimmed")
|
||||
case class MissingCloseSignature (override val channelId: ByteVector32) extends ChannelException(channelId, "closing_complete is missing a signature for a closing transaction including our output")
|
||||
case class InvalidCloseSignature (override val channelId: ByteVector32, txId: TxId) extends ChannelException(channelId, s"invalid close signature: txId=$txId")
|
||||
case class InvalidCloseeScript (override val channelId: ByteVector32, received: ByteVector, expected: ByteVector) extends ChannelException(channelId, s"invalid closee script used in closing_complete: our latest script is $expected, you're using $received")
|
||||
case class InvalidCloseAmountBelowDust (override val channelId: ByteVector32, txId: TxId) extends ChannelException(channelId, s"invalid closing tx: some outputs are below dust: txId=$txId")
|
||||
case class CommitSigCountMismatch (override val channelId: ByteVector32, expected: Int, actual: Int) extends ChannelException(channelId, s"commit sig count mismatch: expected=$expected actual=$actual")
|
||||
case class HtlcSigCountMismatch (override val channelId: ByteVector32, expected: Int, actual: Int) extends ChannelException(channelId, s"htlc sig count mismatch: expected=$expected actual=$actual")
|
||||
|
|
|
@ -114,10 +114,11 @@ case class ChannelParams(channelId: ByteVector32,
|
|||
// README: if we set our bitcoin node to generate taproot addresses and our peer does not support option_shutdown_anysegwit, we will not be able to mutual-close
|
||||
// channels as the isValidFinalScriptPubkey() check would fail.
|
||||
val allowAnySegwit = Features.canUseFeature(localParams.initFeatures, remoteParams.initFeatures, Features.ShutdownAnySegwit)
|
||||
val allowOpReturn = Features.canUseFeature(localParams.initFeatures, remoteParams.initFeatures, Features.SimpleClose)
|
||||
val mustUseUpfrontShutdownScript = channelFeatures.hasFeature(Features.UpfrontShutdownScript)
|
||||
// we only enforce using the pre-generated shutdown script if option_upfront_shutdown_script is set
|
||||
if (mustUseUpfrontShutdownScript && localParams.upfrontShutdownScript_opt.exists(_ != localScriptPubKey)) Left(InvalidFinalScript(channelId))
|
||||
else if (!Closing.MutualClose.isValidFinalScriptPubkey(localScriptPubKey, allowAnySegwit)) Left(InvalidFinalScript(channelId))
|
||||
else if (!Closing.MutualClose.isValidFinalScriptPubkey(localScriptPubKey, allowAnySegwit, allowOpReturn)) Left(InvalidFinalScript(channelId))
|
||||
else Right(localScriptPubKey)
|
||||
}
|
||||
|
||||
|
@ -128,10 +129,11 @@ case class ChannelParams(channelId: ByteVector32,
|
|||
def validateRemoteShutdownScript(remoteScriptPubKey: ByteVector): Either[ChannelException, ByteVector] = {
|
||||
// to check whether shutdown_any_segwit is active we check features in local and remote parameters, which are negotiated each time we connect to our peer.
|
||||
val allowAnySegwit = Features.canUseFeature(localParams.initFeatures, remoteParams.initFeatures, Features.ShutdownAnySegwit)
|
||||
val allowOpReturn = Features.canUseFeature(localParams.initFeatures, remoteParams.initFeatures, Features.SimpleClose)
|
||||
val mustUseUpfrontShutdownScript = channelFeatures.hasFeature(Features.UpfrontShutdownScript)
|
||||
// we only enforce using the pre-generated shutdown script if option_upfront_shutdown_script is set
|
||||
if (mustUseUpfrontShutdownScript && remoteParams.upfrontShutdownScript_opt.exists(_ != remoteScriptPubKey)) Left(InvalidFinalScript(channelId))
|
||||
else if (!Closing.MutualClose.isValidFinalScriptPubkey(remoteScriptPubKey, allowAnySegwit)) Left(InvalidFinalScript(channelId))
|
||||
else if (!Closing.MutualClose.isValidFinalScriptPubkey(remoteScriptPubKey, allowAnySegwit, allowOpReturn)) Left(InvalidFinalScript(channelId))
|
||||
else Right(remoteScriptPubKey)
|
||||
}
|
||||
|
||||
|
|
|
@ -59,6 +59,7 @@ object Helpers {
|
|||
case d: DATA_NORMAL => d.modify(_.commitments.params).using(_.updateFeatures(localInit, remoteInit))
|
||||
case d: DATA_SHUTDOWN => d.modify(_.commitments.params).using(_.updateFeatures(localInit, remoteInit))
|
||||
case d: DATA_NEGOTIATING => d.modify(_.commitments.params).using(_.updateFeatures(localInit, remoteInit))
|
||||
case d: DATA_NEGOTIATING_SIMPLE => d.modify(_.commitments.params).using(_.updateFeatures(localInit, remoteInit))
|
||||
case d: DATA_CLOSING => d.modify(_.commitments.params).using(_.updateFeatures(localInit, remoteInit))
|
||||
case d: DATA_WAIT_FOR_REMOTE_PUBLISH_FUTURE_COMMITMENT => d.modify(_.commitments.params).using(_.updateFeatures(localInit, remoteInit))
|
||||
}
|
||||
|
@ -67,14 +68,15 @@ object Helpers {
|
|||
private def extractShutdownScript(channelId: ByteVector32, localFeatures: Features[InitFeature], remoteFeatures: Features[InitFeature], upfrontShutdownScript_opt: Option[ByteVector]): Either[ChannelException, Option[ByteVector]] = {
|
||||
val canUseUpfrontShutdownScript = Features.canUseFeature(localFeatures, remoteFeatures, Features.UpfrontShutdownScript)
|
||||
val canUseAnySegwit = Features.canUseFeature(localFeatures, remoteFeatures, Features.ShutdownAnySegwit)
|
||||
extractShutdownScript(channelId, canUseUpfrontShutdownScript, canUseAnySegwit, upfrontShutdownScript_opt)
|
||||
val canUseOpReturn = Features.canUseFeature(localFeatures, remoteFeatures, Features.SimpleClose)
|
||||
extractShutdownScript(channelId, canUseUpfrontShutdownScript, canUseAnySegwit, canUseOpReturn, upfrontShutdownScript_opt)
|
||||
}
|
||||
|
||||
private def extractShutdownScript(channelId: ByteVector32, hasOptionUpfrontShutdownScript: Boolean, allowAnySegwit: Boolean, upfrontShutdownScript_opt: Option[ByteVector]): Either[ChannelException, Option[ByteVector]] = {
|
||||
private def extractShutdownScript(channelId: ByteVector32, hasOptionUpfrontShutdownScript: Boolean, allowAnySegwit: Boolean, allowOpReturn: Boolean, upfrontShutdownScript_opt: Option[ByteVector]): Either[ChannelException, Option[ByteVector]] = {
|
||||
(hasOptionUpfrontShutdownScript, upfrontShutdownScript_opt) match {
|
||||
case (true, None) => Left(MissingUpfrontShutdownScript(channelId))
|
||||
case (true, Some(script)) if script.isEmpty => Right(None) // but the provided script can be empty
|
||||
case (true, Some(script)) if !Closing.MutualClose.isValidFinalScriptPubkey(script, allowAnySegwit) => Left(InvalidFinalScript(channelId))
|
||||
case (true, Some(script)) if !Closing.MutualClose.isValidFinalScriptPubkey(script, allowAnySegwit, allowOpReturn) => Left(InvalidFinalScript(channelId))
|
||||
case (true, Some(script)) => Right(Some(script))
|
||||
case (false, Some(_)) => Right(None) // they provided a script but the feature is not active, we just ignore it
|
||||
case _ => Right(None)
|
||||
|
@ -626,13 +628,14 @@ object Helpers {
|
|||
|
||||
object MutualClose {
|
||||
|
||||
def isValidFinalScriptPubkey(scriptPubKey: ByteVector, allowAnySegwit: Boolean): Boolean = {
|
||||
def isValidFinalScriptPubkey(scriptPubKey: ByteVector, allowAnySegwit: Boolean, allowOpReturn: Boolean): Boolean = {
|
||||
Try(Script.parse(scriptPubKey)) match {
|
||||
case Success(OP_DUP :: OP_HASH160 :: OP_PUSHDATA(pubkeyHash, _) :: OP_EQUALVERIFY :: OP_CHECKSIG :: Nil) if pubkeyHash.size == 20 => true
|
||||
case Success(OP_HASH160 :: OP_PUSHDATA(scriptHash, _) :: OP_EQUAL :: Nil) if scriptHash.size == 20 => true
|
||||
case Success(OP_0 :: OP_PUSHDATA(pubkeyHash, _) :: Nil) if pubkeyHash.size == 20 => true
|
||||
case Success(OP_0 :: OP_PUSHDATA(scriptHash, _) :: Nil) if scriptHash.size == 32 => true
|
||||
case Success((OP_1 | OP_2 | OP_3 | OP_4 | OP_5 | OP_6 | OP_7 | OP_8 | OP_9 | OP_10 | OP_11 | OP_12 | OP_13 | OP_14 | OP_15 | OP_16) :: OP_PUSHDATA(program, _) :: Nil) if allowAnySegwit && 2 <= program.length && program.length <= 40 => true
|
||||
case Success(OP_RETURN :: OP_PUSHDATA(data, code) :: Nil) if allowOpReturn => OP_PUSHDATA.isMinimal(data, code) && data.size >= 6 && data.size <= 80
|
||||
case _ => false
|
||||
}
|
||||
}
|
||||
|
@ -693,22 +696,102 @@ object Helpers {
|
|||
}
|
||||
}
|
||||
|
||||
/** We are the closer: we sign closing transactions for which we pay the fees. */
|
||||
def makeSimpleClosingTx(currentBlockHeight: BlockHeight, keyManager: ChannelKeyManager, commitment: FullCommitment, localScriptPubkey: ByteVector, remoteScriptPubkey: ByteVector, feerate: FeeratePerKw): Either[ChannelException, (ClosingTxs, ClosingComplete)] = {
|
||||
// We must convert the feerate to a fee: we must build dummy transactions to compute their weight.
|
||||
val closingFee = {
|
||||
val dummyClosingTxs = Transactions.makeSimpleClosingTxs(commitment.commitInput, commitment.localCommit.spec, SimpleClosingTxFee.PaidByUs(0 sat), currentBlockHeight.toLong, localScriptPubkey, remoteScriptPubkey)
|
||||
dummyClosingTxs.preferred_opt match {
|
||||
case Some(dummyTx) =>
|
||||
val dummySignedTx = Transactions.addSigs(dummyTx, Transactions.PlaceHolderPubKey, Transactions.PlaceHolderPubKey, Transactions.PlaceHolderSig, Transactions.PlaceHolderSig)
|
||||
SimpleClosingTxFee.PaidByUs(Transactions.weight2fee(feerate, dummySignedTx.tx.weight()))
|
||||
case None => return Left(CannotGenerateClosingTx(commitment.channelId))
|
||||
}
|
||||
}
|
||||
// Now that we know the fee we're ready to pay, we can create our closing transactions.
|
||||
val closingTxs = Transactions.makeSimpleClosingTxs(commitment.commitInput, commitment.localCommit.spec, closingFee, currentBlockHeight.toLong, localScriptPubkey, remoteScriptPubkey)
|
||||
closingTxs.preferred_opt match {
|
||||
case Some(closingTx) if closingTx.fee > 0.sat => ()
|
||||
case _ => return Left(CannotGenerateClosingTx(commitment.channelId))
|
||||
}
|
||||
val localFundingPubKey = keyManager.fundingPublicKey(commitment.localParams.fundingKeyPath, commitment.fundingTxIndex)
|
||||
val closingComplete = ClosingComplete(commitment.channelId, localScriptPubkey, remoteScriptPubkey, closingFee.fee, currentBlockHeight.toLong, TlvStream(Set(
|
||||
closingTxs.localAndRemote_opt.map(tx => ClosingTlv.CloserAndCloseeOutputs(keyManager.sign(tx, localFundingPubKey, TxOwner.Local, commitment.params.commitmentFormat))),
|
||||
closingTxs.localOnly_opt.map(tx => ClosingTlv.CloserOutputOnly(keyManager.sign(tx, localFundingPubKey, TxOwner.Local, commitment.params.commitmentFormat))),
|
||||
closingTxs.remoteOnly_opt.map(tx => ClosingTlv.CloseeOutputOnly(keyManager.sign(tx, localFundingPubKey, TxOwner.Local, commitment.params.commitmentFormat))),
|
||||
).flatten[ClosingTlv]))
|
||||
Right(closingTxs, closingComplete)
|
||||
}
|
||||
|
||||
/**
|
||||
* We are the closee: we choose one of the closer's transactions and sign it back.
|
||||
*
|
||||
* Callers should ignore failures: since the protocol is fully asynchronous, failures here simply mean that they
|
||||
* are not using our latest script (race condition between our closing_complete and theirs).
|
||||
*/
|
||||
def signSimpleClosingTx(keyManager: ChannelKeyManager, commitment: FullCommitment, localScriptPubkey: ByteVector, remoteScriptPubkey: ByteVector, closingComplete: ClosingComplete): Either[ChannelException, (ClosingTx, ClosingSig)] = {
|
||||
val closingFee = SimpleClosingTxFee.PaidByThem(closingComplete.fees)
|
||||
val closingTxs = Transactions.makeSimpleClosingTxs(commitment.commitInput, commitment.localCommit.spec, closingFee, closingComplete.lockTime, localScriptPubkey, remoteScriptPubkey)
|
||||
// If our output isn't dust, they must provide a signature for a transaction that includes it.
|
||||
// Note that we're the closee, so we look for signatures including the closee output.
|
||||
(closingTxs.localAndRemote_opt, closingTxs.localOnly_opt) match {
|
||||
case (Some(_), Some(_)) if closingComplete.closerAndCloseeOutputsSig_opt.isEmpty && closingComplete.closeeOutputOnlySig_opt.isEmpty => return Left(MissingCloseSignature(commitment.channelId))
|
||||
case (Some(_), None) if closingComplete.closerAndCloseeOutputsSig_opt.isEmpty => return Left(MissingCloseSignature(commitment.channelId))
|
||||
case (None, Some(_)) if closingComplete.closeeOutputOnlySig_opt.isEmpty => return Left(MissingCloseSignature(commitment.channelId))
|
||||
case _ => ()
|
||||
}
|
||||
// We choose the closing signature that matches our preferred closing transaction.
|
||||
val closingTxsWithSigs = Seq(
|
||||
closingComplete.closerAndCloseeOutputsSig_opt.flatMap(remoteSig => closingTxs.localAndRemote_opt.map(tx => (tx, remoteSig, localSig => ClosingTlv.CloserAndCloseeOutputs(localSig)))),
|
||||
closingComplete.closeeOutputOnlySig_opt.flatMap(remoteSig => closingTxs.localOnly_opt.map(tx => (tx, remoteSig, localSig => ClosingTlv.CloseeOutputOnly(localSig)))),
|
||||
closingComplete.closerOutputOnlySig_opt.flatMap(remoteSig => closingTxs.remoteOnly_opt.map(tx => (tx, remoteSig, localSig => ClosingTlv.CloserOutputOnly(localSig)))),
|
||||
).flatten
|
||||
closingTxsWithSigs.headOption match {
|
||||
case Some((closingTx, remoteSig, sigToTlv)) =>
|
||||
val localFundingPubKey = keyManager.fundingPublicKey(commitment.localParams.fundingKeyPath, commitment.fundingTxIndex)
|
||||
val localSig = keyManager.sign(closingTx, localFundingPubKey, TxOwner.Local, commitment.params.commitmentFormat)
|
||||
val signedClosingTx = Transactions.addSigs(closingTx, localFundingPubKey.publicKey, commitment.remoteFundingPubKey, localSig, remoteSig)
|
||||
Transactions.checkSpendable(signedClosingTx) match {
|
||||
case Failure(_) => Left(InvalidCloseSignature(commitment.channelId, signedClosingTx.tx.txid))
|
||||
case Success(_) => Right(signedClosingTx, ClosingSig(commitment.channelId, remoteScriptPubkey, localScriptPubkey, closingComplete.fees, closingComplete.lockTime, TlvStream(sigToTlv(localSig))))
|
||||
}
|
||||
case None => Left(MissingCloseSignature(commitment.channelId))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* We are the closer: they sent us their signature so we should now have a fully signed closing transaction.
|
||||
*
|
||||
* Callers should ignore failures: since the protocol is fully asynchronous, failures here simply mean that we
|
||||
* sent another closing_complete before receiving their closing_sig, which is now obsolete: we ignore it and wait
|
||||
* for their next closing_sig that will match our latest closing_complete.
|
||||
*/
|
||||
def receiveSimpleClosingSig(keyManager: ChannelKeyManager, commitment: FullCommitment, closingTxs: ClosingTxs, closingSig: ClosingSig): Either[ChannelException, ClosingTx] = {
|
||||
val closingTxsWithSig = Seq(
|
||||
closingSig.closerAndCloseeOutputsSig_opt.flatMap(sig => closingTxs.localAndRemote_opt.map(tx => (tx, sig))),
|
||||
closingSig.closerOutputOnlySig_opt.flatMap(sig => closingTxs.localOnly_opt.map(tx => (tx, sig))),
|
||||
closingSig.closeeOutputOnlySig_opt.flatMap(sig => closingTxs.remoteOnly_opt.map(tx => (tx, sig))),
|
||||
).flatten
|
||||
closingTxsWithSig.headOption match {
|
||||
case Some((closingTx, remoteSig)) =>
|
||||
val localFundingPubKey = keyManager.fundingPublicKey(commitment.localParams.fundingKeyPath, commitment.fundingTxIndex)
|
||||
val localSig = keyManager.sign(closingTx, localFundingPubKey, TxOwner.Local, commitment.params.commitmentFormat)
|
||||
val signedClosingTx = Transactions.addSigs(closingTx, localFundingPubKey.publicKey, commitment.remoteFundingPubKey, localSig, remoteSig)
|
||||
Transactions.checkSpendable(signedClosingTx) match {
|
||||
case Failure(_) => Left(InvalidCloseSignature(commitment.channelId, signedClosingTx.tx.txid))
|
||||
case Success(_) => Right(signedClosingTx)
|
||||
}
|
||||
case None => Left(MissingCloseSignature(commitment.channelId))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that all closing outputs are above bitcoin's dust limit for their script type, otherwise there is a risk
|
||||
* that the closing transaction will not be relayed to miners' mempool and will not confirm.
|
||||
* The various dust limits are detailed in https://github.com/lightningnetwork/lightning-rfc/blob/master/03-transactions.md#dust-limits
|
||||
*/
|
||||
def checkClosingDustAmounts(closingTx: ClosingTx): Boolean = {
|
||||
closingTx.tx.txOut.forall(txOut => {
|
||||
Try(Script.parse(txOut.publicKeyScript)) match {
|
||||
case Success(OP_DUP :: OP_HASH160 :: OP_PUSHDATA(pubkeyHash, _) :: OP_EQUALVERIFY :: OP_CHECKSIG :: Nil) if pubkeyHash.size == 20 => txOut.amount >= 546.sat
|
||||
case Success(OP_HASH160 :: OP_PUSHDATA(scriptHash, _) :: OP_EQUAL :: Nil) if scriptHash.size == 20 => txOut.amount >= 540.sat
|
||||
case Success(OP_0 :: OP_PUSHDATA(pubkeyHash, _) :: Nil) if pubkeyHash.size == 20 => txOut.amount >= 294.sat
|
||||
case Success(OP_0 :: OP_PUSHDATA(scriptHash, _) :: Nil) if scriptHash.size == 32 => txOut.amount >= 330.sat
|
||||
case Success((OP_1 | OP_2 | OP_3 | OP_4 | OP_5 | OP_6 | OP_7 | OP_8 | OP_9 | OP_10 | OP_11 | OP_12 | OP_13 | OP_14 | OP_15 | OP_16) :: OP_PUSHDATA(program, _) :: Nil) if 2 <= program.length && program.length <= 40 => txOut.amount >= 354.sat
|
||||
case _ => txOut.amount >= 546.sat
|
||||
}
|
||||
})
|
||||
closingTx.tx.txOut.forall(txOut => txOut.amount >= Transactions.dustLimit(txOut.publicKeyScript))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -86,8 +86,7 @@ object Channel {
|
|||
minFundingPrivateSatoshis: Satoshi,
|
||||
toRemoteDelay: CltvExpiryDelta,
|
||||
maxToLocalDelay: CltvExpiryDelta,
|
||||
minDepthFunding: Int,
|
||||
minDepthClosing: Int,
|
||||
minDepth: Int,
|
||||
expiryDelta: CltvExpiryDelta,
|
||||
maxExpiryDelta: CltvExpiryDelta,
|
||||
fulfillSafetyBeforeTimeout: CltvExpiryDelta,
|
||||
|
@ -113,6 +112,9 @@ object Channel {
|
|||
require(balanceThresholds.sortBy(_.available) == balanceThresholds, "channel-update.balance-thresholds must be sorted by available-sat")
|
||||
|
||||
def minFundingSatoshis(flags: ChannelFlags): Satoshi = if (flags.announceChannel) minFundingPublicSatoshis else minFundingPrivateSatoshis
|
||||
|
||||
/** The number of confirmations required to be safe from reorgs is always scaled based on the amount at risk. */
|
||||
def minDepthScaled(amount: Satoshi): Int = ChannelParams.minDepthScaled(minDepth, amount)
|
||||
}
|
||||
|
||||
trait TxPublisherFactory {
|
||||
|
@ -308,11 +310,11 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
watchFundingConfirmed(commitment.fundingTxId, Some(singleFundingMinDepth(data)), herdDelay_opt)
|
||||
case fundingTx: LocalFundingStatus.DualFundedUnconfirmedFundingTx =>
|
||||
publishFundingTx(fundingTx)
|
||||
val minDepth_opt = data.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepthFunding, fundingTx.sharedTx.tx)
|
||||
val minDepth_opt = data.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepth, fundingTx.sharedTx.tx)
|
||||
watchFundingConfirmed(fundingTx.sharedTx.txId, minDepth_opt, herdDelay_opt)
|
||||
case fundingTx: LocalFundingStatus.ZeroconfPublishedFundingTx =>
|
||||
// those are zero-conf channels, the min-depth isn't critical, we use the default
|
||||
watchFundingConfirmed(fundingTx.tx.txid, Some(nodeParams.channelConf.minDepthFunding.toLong), herdDelay_opt)
|
||||
// This is a zero-conf channel, the min-depth isn't critical: we use the default.
|
||||
watchFundingConfirmed(fundingTx.tx.txid, Some(nodeParams.channelConf.minDepth.toLong), herdDelay_opt)
|
||||
case _: LocalFundingStatus.ConfirmedFundingTx =>
|
||||
data match {
|
||||
case closing: DATA_CLOSING if Closing.nothingAtStake(closing) || Closing.isClosingTypeAlreadyKnown(closing).isDefined =>
|
||||
|
@ -321,6 +323,12 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
case closing: DATA_CLOSING =>
|
||||
// in all other cases we need to be ready for any type of closing
|
||||
watchFundingSpent(commitment, closing.spendingTxs.map(_.txid).toSet, herdDelay_opt)
|
||||
case negotiating: DATA_NEGOTIATING =>
|
||||
val closingTxs = negotiating.closingTxProposed.flatten.map(_.unsignedTx.tx.txid).toSet
|
||||
watchFundingSpent(commitment, additionalKnownSpendingTxs = closingTxs, herdDelay_opt)
|
||||
case negotiating: DATA_NEGOTIATING_SIMPLE =>
|
||||
val closingTxs = negotiating.proposedClosingTxs.flatMap(_.all).map(_.tx.txid).toSet ++ negotiating.publishedClosingTxs.map(_.tx.txid).toSet
|
||||
watchFundingSpent(commitment, additionalKnownSpendingTxs = closingTxs, herdDelay_opt)
|
||||
case _ =>
|
||||
// Children splice transactions may already spend that confirmed funding transaction.
|
||||
val spliceSpendingTxs = data.commitments.all.collect { case c if c.fundingTxIndex == commitment.fundingTxIndex + 1 => c.fundingTxId }
|
||||
|
@ -366,10 +374,8 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
}
|
||||
// no need to go OFFLINE, we can directly switch to CLOSING
|
||||
goto(CLOSING) using closing
|
||||
|
||||
case normal: DATA_NORMAL =>
|
||||
context.system.eventStream.publish(ShortChannelIdAssigned(self, normal.channelId, normal.lastAnnouncement_opt, normal.aliases, remoteNodeId))
|
||||
|
||||
// we check the configuration because the values for channel_update may have changed while eclair was down
|
||||
val fees = getRelayFees(nodeParams, remoteNodeId, normal.commitments.announceChannel)
|
||||
if (fees.feeBase != normal.channelUpdate.feeBaseMsat ||
|
||||
|
@ -382,9 +388,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
// we take into account the date of the last update so that we don't send superfluous updates when we restart the app
|
||||
val periodicRefreshInitialDelay = Helpers.nextChannelUpdateRefresh(normal.channelUpdate.timestamp)
|
||||
context.system.scheduler.scheduleWithFixedDelay(initialDelay = periodicRefreshInitialDelay, delay = REFRESH_CHANNEL_UPDATE_INTERVAL, receiver = self, message = BroadcastChannelUpdate(PeriodicRefresh))
|
||||
|
||||
goto(OFFLINE) using normal
|
||||
|
||||
case _ =>
|
||||
goto(OFFLINE) using data
|
||||
}
|
||||
|
@ -595,7 +599,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
// We don't have their tx_sigs, but they have ours, and could publish the funding tx without telling us.
|
||||
// That's why we move on immediately to the next step, and will update our unsigned funding tx when we
|
||||
// receive their tx_sigs.
|
||||
val minDepth_opt = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepthFunding, signingSession1.fundingTx.sharedTx.tx)
|
||||
val minDepth_opt = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepth, signingSession1.fundingTx.sharedTx.tx)
|
||||
watchFundingConfirmed(signingSession.fundingTx.txId, minDepth_opt, delay_opt = None)
|
||||
val commitments1 = d.commitments.add(signingSession1.commitment)
|
||||
val d1 = d.copy(commitments = commitments1, spliceStatus = SpliceStatus.NoSplice)
|
||||
|
@ -748,10 +752,13 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
}
|
||||
// are there pending signed changes on either side? we need to have received their last revocation!
|
||||
if (d.commitments.hasNoPendingHtlcsOrFeeUpdate) {
|
||||
// there are no pending signed changes, let's go directly to NEGOTIATING
|
||||
if (d.commitments.params.localParams.paysClosingFees) {
|
||||
// there are no pending signed changes, let's directly negotiate a closing transaction
|
||||
if (Features.canUseFeature(d.commitments.params.localParams.initFeatures, d.commitments.params.remoteParams.initFeatures, Features.SimpleClose)) {
|
||||
val (d1, closingComplete_opt) = startSimpleClose(d.commitments, localShutdown, remoteShutdown, d.closingFeerates)
|
||||
goto(NEGOTIATING_SIMPLE) using d1 storing() sending sendList ++ closingComplete_opt.toSeq
|
||||
} else if (d.commitments.params.localParams.paysClosingFees) {
|
||||
// we pay the closing fees, so we initiate the negotiation by sending the first closing_signed
|
||||
val (closingTx, closingSigned) = Closing.MutualClose.makeFirstClosingTx(keyManager, d.commitments.latest, localShutdown.scriptPubKey, remoteShutdownScript, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf, d.closingFeerates)
|
||||
val (closingTx, closingSigned) = MutualClose.makeFirstClosingTx(keyManager, d.commitments.latest, localShutdown.scriptPubKey, remoteShutdownScript, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf, d.closingFeerates)
|
||||
goto(NEGOTIATING) using DATA_NEGOTIATING(d.commitments, localShutdown, remoteShutdown, List(List(ClosingTxProposed(closingTx, closingSigned))), bestUnpublishedClosingTx_opt = None) storing() sending sendList :+ closingSigned
|
||||
} else {
|
||||
// we are not the channel initiator, will wait for their closing_signed
|
||||
|
@ -1316,7 +1323,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
rollbackFundingAttempt(signingSession.fundingTx.tx, previousTxs = Seq.empty) // no splice rbf yet
|
||||
stay() using d.copy(spliceStatus = SpliceStatus.SpliceAborted) sending TxAbort(d.channelId, f.getMessage)
|
||||
case Right(signingSession1) =>
|
||||
val minDepth_opt = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepthFunding, signingSession1.fundingTx.sharedTx.tx)
|
||||
val minDepth_opt = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepth, signingSession1.fundingTx.sharedTx.tx)
|
||||
watchFundingConfirmed(signingSession.fundingTx.txId, minDepth_opt, delay_opt = None)
|
||||
val commitments1 = d.commitments.add(signingSession1.commitment)
|
||||
val d1 = d.copy(commitments = commitments1, spliceStatus = SpliceStatus.NoSplice)
|
||||
|
@ -1335,7 +1342,8 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
val fundingStatus = LocalFundingStatus.ZeroconfPublishedFundingTx(w.tx, d.commitments.localFundingSigs(w.tx.txid), d.commitments.liquidityPurchase(w.tx.txid))
|
||||
d.commitments.updateLocalFundingStatus(w.tx.txid, fundingStatus, d.lastAnnouncedFundingTxId_opt) match {
|
||||
case Right((commitments1, _)) =>
|
||||
watchFundingConfirmed(w.tx.txid, Some(nodeParams.channelConf.minDepthFunding), delay_opt = None)
|
||||
// This is a zero-conf channel, the min-depth isn't critical: we use the default.
|
||||
watchFundingConfirmed(w.tx.txid, Some(nodeParams.channelConf.minDepth), delay_opt = None)
|
||||
maybeEmitEventsPostSplice(d.aliases, d.commitments, commitments1, d.lastAnnouncement_opt)
|
||||
maybeUpdateMaxHtlcAmount(d.channelUpdate.htlcMaximumMsat, commitments1)
|
||||
stay() using d.copy(commitments = commitments1) storing() sending SpliceLocked(d.channelId, w.tx.txid)
|
||||
|
@ -1531,9 +1539,12 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
log.debug("received a new sig:\n{}", commitments1.latest.specs2String)
|
||||
context.system.eventStream.publish(ChannelSignatureReceived(self, commitments1))
|
||||
if (commitments1.hasNoPendingHtlcsOrFeeUpdate) {
|
||||
if (d.commitments.params.localParams.paysClosingFees) {
|
||||
if (Features.canUseFeature(d.commitments.params.localParams.initFeatures, d.commitments.params.remoteParams.initFeatures, Features.SimpleClose)) {
|
||||
val (d1, closingComplete_opt) = startSimpleClose(d.commitments, localShutdown, remoteShutdown, d.closingFeerates)
|
||||
goto(NEGOTIATING_SIMPLE) using d1 storing() sending revocation +: closingComplete_opt.toSeq
|
||||
} else if (d.commitments.params.localParams.paysClosingFees) {
|
||||
// we pay the closing fees, so we initiate the negotiation by sending the first closing_signed
|
||||
val (closingTx, closingSigned) = Closing.MutualClose.makeFirstClosingTx(keyManager, commitments1.latest, localShutdown.scriptPubKey, remoteShutdown.scriptPubKey, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf, closingFeerates)
|
||||
val (closingTx, closingSigned) = MutualClose.makeFirstClosingTx(keyManager, commitments1.latest, localShutdown.scriptPubKey, remoteShutdown.scriptPubKey, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf, closingFeerates)
|
||||
goto(NEGOTIATING) using DATA_NEGOTIATING(commitments1, localShutdown, remoteShutdown, List(List(ClosingTxProposed(closingTx, closingSigned))), bestUnpublishedClosingTx_opt = None) storing() sending revocation :: closingSigned :: Nil
|
||||
} else {
|
||||
// we are not the channel initiator, will wait for their closing_signed
|
||||
|
@ -1573,9 +1584,12 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
}
|
||||
if (commitments1.hasNoPendingHtlcsOrFeeUpdate) {
|
||||
log.debug("switching to NEGOTIATING spec:\n{}", commitments1.latest.specs2String)
|
||||
if (d.commitments.params.localParams.paysClosingFees) {
|
||||
if (Features.canUseFeature(d.commitments.params.localParams.initFeatures, d.commitments.params.remoteParams.initFeatures, Features.SimpleClose)) {
|
||||
val (d1, closingComplete_opt) = startSimpleClose(d.commitments, localShutdown, remoteShutdown, d.closingFeerates)
|
||||
goto(NEGOTIATING_SIMPLE) using d1 storing() sending closingComplete_opt.toSeq
|
||||
} else if (d.commitments.params.localParams.paysClosingFees) {
|
||||
// we pay the closing fees, so we initiate the negotiation by sending the first closing_signed
|
||||
val (closingTx, closingSigned) = Closing.MutualClose.makeFirstClosingTx(keyManager, commitments1.latest, localShutdown.scriptPubKey, remoteShutdown.scriptPubKey, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf, closingFeerates)
|
||||
val (closingTx, closingSigned) = MutualClose.makeFirstClosingTx(keyManager, commitments1.latest, localShutdown.scriptPubKey, remoteShutdown.scriptPubKey, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf, closingFeerates)
|
||||
goto(NEGOTIATING) using DATA_NEGOTIATING(commitments1, localShutdown, remoteShutdown, List(List(ClosingTxProposed(closingTx, closingSigned))), bestUnpublishedClosingTx_opt = None) storing() sending closingSigned
|
||||
} else {
|
||||
// we are not the channel initiator, will wait for their closing_signed
|
||||
|
@ -1590,6 +1604,12 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
case Left(cause) => handleLocalError(cause, d, Some(revocation))
|
||||
}
|
||||
|
||||
case Event(shutdown: Shutdown, d: DATA_SHUTDOWN) =>
|
||||
if (shutdown.scriptPubKey != d.remoteShutdown.scriptPubKey) {
|
||||
log.debug("our peer updated their shutdown script (previous={}, current={})", d.remoteShutdown.scriptPubKey, shutdown.scriptPubKey)
|
||||
}
|
||||
stay() using d.copy(remoteShutdown = shutdown) storing()
|
||||
|
||||
case Event(r: RevocationTimeout, d: DATA_SHUTDOWN) => handleRevocationTimeout(r, d)
|
||||
|
||||
case Event(ProcessCurrentBlockHeight(c), d: DATA_SHUTDOWN) => handleNewBlock(c, d)
|
||||
|
@ -1597,17 +1617,18 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
case Event(c: CurrentFeerates.BitcoinCore, d: DATA_SHUTDOWN) => handleCurrentFeerate(c, d)
|
||||
|
||||
case Event(c: CMD_CLOSE, d: DATA_SHUTDOWN) =>
|
||||
c.feerates match {
|
||||
case Some(feerates) if c.feerates != d.closingFeerates =>
|
||||
if (c.scriptPubKey.nonEmpty && !c.scriptPubKey.contains(d.localShutdown.scriptPubKey)) {
|
||||
log.warning("cannot update closing script when closing is already in progress")
|
||||
handleCommandError(ClosingAlreadyInProgress(d.channelId), c)
|
||||
} else {
|
||||
log.info("updating our closing feerates: {}", feerates)
|
||||
handleCommandSuccess(c, d.copy(closingFeerates = c.feerates)) storing()
|
||||
}
|
||||
case _ =>
|
||||
handleCommandError(ClosingAlreadyInProgress(d.channelId), c)
|
||||
val useSimpleClose = Features.canUseFeature(d.commitments.params.localParams.initFeatures, d.commitments.params.remoteParams.initFeatures, Features.SimpleClose)
|
||||
val localShutdown_opt = c.scriptPubKey match {
|
||||
case Some(scriptPubKey) if scriptPubKey != d.localShutdown.scriptPubKey && useSimpleClose => Some(Shutdown(d.channelId, scriptPubKey))
|
||||
case _ => None
|
||||
}
|
||||
if (c.scriptPubKey.exists(_ != d.localShutdown.scriptPubKey) && !useSimpleClose) {
|
||||
handleCommandError(ClosingAlreadyInProgress(d.channelId), c)
|
||||
} else if (localShutdown_opt.nonEmpty || c.feerates.nonEmpty) {
|
||||
val d1 = d.copy(localShutdown = localShutdown_opt.getOrElse(d.localShutdown), closingFeerates = c.feerates.orElse(d.closingFeerates))
|
||||
handleCommandSuccess(c, d1) storing() sending localShutdown_opt.toSeq
|
||||
} else {
|
||||
handleCommandError(ClosingAlreadyInProgress(d.channelId), c)
|
||||
}
|
||||
|
||||
case Event(e: Error, d: DATA_SHUTDOWN) => handleRemoteError(e, d)
|
||||
|
@ -1615,17 +1636,18 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
})
|
||||
|
||||
when(NEGOTIATING)(handleExceptions {
|
||||
// Upon reconnection, nodes must re-transmit their shutdown message, so we may receive it now.
|
||||
case Event(remoteShutdown: Shutdown, d: DATA_NEGOTIATING) =>
|
||||
if (remoteShutdown != d.remoteShutdown) {
|
||||
// This is a spec violation: it will likely lead to a disagreement when exchanging closing_signed and a force-close.
|
||||
log.warning("received unexpected shutdown={} (previous={})", remoteShutdown, d.remoteShutdown)
|
||||
if (remoteShutdown.scriptPubKey != d.remoteShutdown.scriptPubKey) {
|
||||
// This may lead to a signature mismatch if our peer changed their script without using option_simple_close.
|
||||
log.warning("received shutdown changing remote script, this may lead to a signature mismatch: previous={}, current={}", d.remoteShutdown.scriptPubKey, remoteShutdown.scriptPubKey)
|
||||
stay() using d.copy(remoteShutdown = remoteShutdown) storing()
|
||||
} else {
|
||||
stay()
|
||||
}
|
||||
stay()
|
||||
|
||||
case Event(c: ClosingSigned, d: DATA_NEGOTIATING) =>
|
||||
val (remoteClosingFee, remoteSig) = (c.feeSatoshis, c.signature)
|
||||
Closing.MutualClose.checkClosingSignature(keyManager, d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, remoteClosingFee, remoteSig) match {
|
||||
MutualClose.checkClosingSignature(keyManager, d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, remoteClosingFee, remoteSig) match {
|
||||
case Right((signedClosingTx, closingSignedRemoteFees)) =>
|
||||
val lastLocalClosingSigned_opt = d.closingTxProposed.last.lastOption
|
||||
if (lastLocalClosingSigned_opt.exists(_.localClosingSigned.feeSatoshis == remoteClosingFee)) {
|
||||
|
@ -1648,7 +1670,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
case Some(ClosingSignedTlv.FeeRange(minFee, maxFee)) if !d.commitments.params.localParams.paysClosingFees =>
|
||||
// if we are not paying the closing fees and they proposed a fee range, we pick a value in that range and they should accept it without further negotiation
|
||||
// we don't care much about the closing fee since they're paying it (not us) and we can use CPFP if we want to speed up confirmation
|
||||
val localClosingFees = Closing.MutualClose.firstClosingFee(d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf)
|
||||
val localClosingFees = MutualClose.firstClosingFee(d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf)
|
||||
if (maxFee < localClosingFees.min) {
|
||||
log.warning("their highest closing fee is below our minimum fee: {} < {}", maxFee, localClosingFees.min)
|
||||
stay() sending Warning(d.channelId, s"closing fee range must not be below ${localClosingFees.min}")
|
||||
|
@ -1663,7 +1685,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
log.info("accepting their closing fee={}", remoteClosingFee)
|
||||
handleMutualClose(signedClosingTx, Left(d.copy(bestUnpublishedClosingTx_opt = Some(signedClosingTx)))) sending closingSignedRemoteFees
|
||||
} else {
|
||||
val (closingTx, closingSigned) = Closing.MutualClose.makeClosingTx(keyManager, d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, ClosingFees(closingFee, minFee, maxFee))
|
||||
val (closingTx, closingSigned) = MutualClose.makeClosingTx(keyManager, d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, ClosingFees(closingFee, minFee, maxFee))
|
||||
log.info("proposing closing fee={} in their fee range (min={} max={})", closingSigned.feeSatoshis, minFee, maxFee)
|
||||
val closingTxProposed1 = (d.closingTxProposed: @unchecked) match {
|
||||
case previousNegotiations :+ currentNegotiation => previousNegotiations :+ (currentNegotiation :+ ClosingTxProposed(closingTx, closingSigned))
|
||||
|
@ -1675,9 +1697,9 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
val lastLocalClosingFee_opt = lastLocalClosingSigned_opt.map(_.localClosingSigned.feeSatoshis)
|
||||
val (closingTx, closingSigned) = {
|
||||
// if we are not the channel initiator and we were waiting for them to send their first closing_signed, we don't have a lastLocalClosingFee, so we compute a firstClosingFee
|
||||
val localClosingFees = Closing.MutualClose.firstClosingFee(d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf)
|
||||
val nextPreferredFee = Closing.MutualClose.nextClosingFee(lastLocalClosingFee_opt.getOrElse(localClosingFees.preferred), remoteClosingFee)
|
||||
Closing.MutualClose.makeClosingTx(keyManager, d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, localClosingFees.copy(preferred = nextPreferredFee))
|
||||
val localClosingFees = MutualClose.firstClosingFee(d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf)
|
||||
val nextPreferredFee = MutualClose.nextClosingFee(lastLocalClosingFee_opt.getOrElse(localClosingFees.preferred), remoteClosingFee)
|
||||
MutualClose.makeClosingTx(keyManager, d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, localClosingFees.copy(preferred = nextPreferredFee))
|
||||
}
|
||||
val closingTxProposed1 = (d.closingTxProposed: @unchecked) match {
|
||||
case previousNegotiations :+ currentNegotiation => previousNegotiations :+ (currentNegotiation :+ ClosingTxProposed(closingTx, closingSigned))
|
||||
|
@ -1706,7 +1728,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
handleCommandError(ClosingAlreadyInProgress(d.channelId), c)
|
||||
} else {
|
||||
log.info("updating our closing feerates: {}", feerates)
|
||||
val (closingTx, closingSigned) = Closing.MutualClose.makeFirstClosingTx(keyManager, d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf, Some(feerates))
|
||||
val (closingTx, closingSigned) = MutualClose.makeFirstClosingTx(keyManager, d.commitments.latest, d.localShutdown.scriptPubKey, d.remoteShutdown.scriptPubKey, nodeParams.currentFeeratesForFundingClosing, nodeParams.onChainFeeConf, Some(feerates))
|
||||
val closingTxProposed1 = d.closingTxProposed match {
|
||||
case previousNegotiations :+ currentNegotiation => previousNegotiations :+ (currentNegotiation :+ ClosingTxProposed(closingTx, closingSigned))
|
||||
case previousNegotiations => previousNegotiations :+ List(ClosingTxProposed(closingTx, closingSigned))
|
||||
|
@ -1721,6 +1743,72 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
|
||||
})
|
||||
|
||||
when(NEGOTIATING_SIMPLE)(handleExceptions {
|
||||
case Event(shutdown: Shutdown, d: DATA_NEGOTIATING_SIMPLE) =>
|
||||
if (shutdown.scriptPubKey != d.remoteScriptPubKey) {
|
||||
// This may lead to a signature mismatch: peers must use closing_complete to update their closing script.
|
||||
log.warning("received shutdown changing remote script, this may lead to a signature mismatch: previous={}, current={}", d.remoteScriptPubKey, shutdown.scriptPubKey)
|
||||
stay() using d.copy(remoteScriptPubKey = shutdown.scriptPubKey) storing()
|
||||
} else {
|
||||
stay()
|
||||
}
|
||||
|
||||
case Event(c: CMD_CLOSE, d: DATA_NEGOTIATING_SIMPLE) =>
|
||||
val localScript = c.scriptPubKey.getOrElse(d.localScriptPubKey)
|
||||
val closingFeerate = c.feerates.map(_.preferred).getOrElse(nodeParams.onChainFeeConf.getClosingFeerate(nodeParams.currentBitcoinCoreFeerates))
|
||||
if (closingFeerate < d.lastClosingFeerate) {
|
||||
val err = InvalidRbfFeerate(d.channelId, closingFeerate, d.lastClosingFeerate * 1.2)
|
||||
handleCommandError(err, c)
|
||||
} else {
|
||||
MutualClose.makeSimpleClosingTx(nodeParams.currentBlockHeight, keyManager, d.commitments.latest, localScript, d.remoteScriptPubKey, closingFeerate) match {
|
||||
case Left(f) => handleCommandError(f, c)
|
||||
case Right((closingTxs, closingComplete)) =>
|
||||
log.debug("signing local mutual close transactions: {}", closingTxs)
|
||||
handleCommandSuccess(c, d.copy(lastClosingFeerate = closingFeerate, localScriptPubKey = localScript, proposedClosingTxs = d.proposedClosingTxs :+ closingTxs)) storing() sending closingComplete
|
||||
}
|
||||
}
|
||||
|
||||
case Event(closingComplete: ClosingComplete, d: DATA_NEGOTIATING_SIMPLE) =>
|
||||
// Note that if there is a failure here and we don't send our closing_sig, they may eventually disconnect.
|
||||
// On reconnection, we will retransmit shutdown with our latest scripts, so future signing attempts should work.
|
||||
if (closingComplete.closeeScriptPubKey != d.localScriptPubKey) {
|
||||
log.warning("their closing_complete is not using our latest script: this may happen if we changed our script while they were sending closing_complete")
|
||||
// No need to persist their latest script, they will re-sent it on reconnection.
|
||||
stay() using d.copy(remoteScriptPubKey = closingComplete.closerScriptPubKey) sending Warning(d.channelId, InvalidCloseeScript(d.channelId, closingComplete.closeeScriptPubKey, d.localScriptPubKey).getMessage)
|
||||
} else {
|
||||
MutualClose.signSimpleClosingTx(keyManager, d.commitments.latest, closingComplete.closeeScriptPubKey, closingComplete.closerScriptPubKey, closingComplete) match {
|
||||
case Left(f) =>
|
||||
log.warning("invalid closing_complete: {}", f.getMessage)
|
||||
stay() sending Warning(d.channelId, f.getMessage)
|
||||
case Right((signedClosingTx, closingSig)) =>
|
||||
log.debug("signing remote mutual close transaction: {}", signedClosingTx.tx)
|
||||
val d1 = d.copy(remoteScriptPubKey = closingComplete.closerScriptPubKey, publishedClosingTxs = d.publishedClosingTxs :+ signedClosingTx)
|
||||
stay() using d1 storing() calling doPublish(signedClosingTx, localPaysClosingFees = false) sending closingSig
|
||||
}
|
||||
}
|
||||
|
||||
case Event(closingSig: ClosingSig, d: DATA_NEGOTIATING_SIMPLE) =>
|
||||
// Note that if we sent two closing_complete in a row, without waiting for their closing_sig for the first one,
|
||||
// this will fail because we only care about our latest closing_complete. This is fine, we should receive their
|
||||
// closing_sig for the last closing_complete afterwards.
|
||||
MutualClose.receiveSimpleClosingSig(keyManager, d.commitments.latest, d.proposedClosingTxs.last, closingSig) match {
|
||||
case Left(f) =>
|
||||
log.warning("invalid closing_sig: {}", f.getMessage)
|
||||
stay() sending Warning(d.channelId, f.getMessage)
|
||||
case Right(signedClosingTx) =>
|
||||
log.debug("received signatures for local mutual close transaction: {}", signedClosingTx.tx)
|
||||
val d1 = d.copy(publishedClosingTxs = d.publishedClosingTxs :+ signedClosingTx)
|
||||
stay() using d1 storing() calling doPublish(signedClosingTx, localPaysClosingFees = true)
|
||||
}
|
||||
|
||||
case Event(_: AnnouncementSignatures, _: DATA_NEGOTIATING_SIMPLE) =>
|
||||
log.debug("ignoring announcement_signatures, we're negotiating closing transactions")
|
||||
stay()
|
||||
|
||||
case Event(e: Error, d: DATA_NEGOTIATING_SIMPLE) => handleRemoteError(e, d)
|
||||
|
||||
})
|
||||
|
||||
when(CLOSING)(handleExceptions {
|
||||
case Event(c: HtlcSettlementCommand, d: DATA_CLOSING) =>
|
||||
(c match {
|
||||
|
@ -1821,7 +1909,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
d.commitments.resolveCommitment(tx) match {
|
||||
case Some(commitment) =>
|
||||
log.warning("a commit tx for an older commitment has been published fundingTxId={} fundingTxIndex={}", tx.txid, commitment.fundingTxIndex)
|
||||
blockchain ! WatchAlternativeCommitTxConfirmed(self, tx.txid, nodeParams.channelConf.minDepthClosing)
|
||||
blockchain ! WatchAlternativeCommitTxConfirmed(self, tx.txid, nodeParams.channelConf.minDepthScaled(commitment.capacity))
|
||||
stay()
|
||||
case None =>
|
||||
// This must be a former funding tx that has already been pruned, because watches are unordered.
|
||||
|
@ -1887,10 +1975,10 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
stay()
|
||||
}
|
||||
|
||||
case Event(WatchOutputSpentTriggered(tx), d: DATA_CLOSING) =>
|
||||
case Event(WatchOutputSpentTriggered(amount, tx), d: DATA_CLOSING) =>
|
||||
// one of the outputs of the local/remote/revoked commit was spent
|
||||
// we just put a watch to be notified when it is confirmed
|
||||
blockchain ! WatchTxConfirmed(self, tx.txid, nodeParams.channelConf.minDepthClosing)
|
||||
blockchain ! WatchTxConfirmed(self, tx.txid, nodeParams.channelConf.minDepthScaled(amount))
|
||||
// when a remote or local commitment tx containing outgoing htlcs is published on the network,
|
||||
// we watch it in order to extract payment preimage if funds are pulled by the counterparty
|
||||
// we can then use these preimages to fulfill origin htlcs
|
||||
|
@ -1910,7 +1998,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
val revokedCommitPublished1 = d.revokedCommitPublished.map { rev =>
|
||||
val (rev1, penaltyTxs) = Closing.RevokedClose.claimHtlcTxOutputs(keyManager, d.commitments.params, d.commitments.remotePerCommitmentSecrets, rev, tx, nodeParams.currentBitcoinCoreFeerates, d.finalScriptPubKey)
|
||||
penaltyTxs.foreach(claimTx => txPublisher ! PublishFinalTx(claimTx, claimTx.fee, None))
|
||||
penaltyTxs.foreach(claimTx => blockchain ! WatchOutputSpent(self, tx.txid, claimTx.input.outPoint.index.toInt, hints = Set(claimTx.tx.txid)))
|
||||
penaltyTxs.foreach(claimTx => blockchain ! WatchOutputSpent(self, tx.txid, claimTx.input.outPoint.index.toInt, claimTx.amountIn, hints = Set(claimTx.tx.txid)))
|
||||
rev1
|
||||
}
|
||||
stay() using d.copy(revokedCommitPublished = revokedCommitPublished1) storing()
|
||||
|
@ -1925,7 +2013,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
val (localCommitPublished1, claimHtlcTx_opt) = Closing.LocalClose.claimHtlcDelayedOutput(localCommitPublished, keyManager, d.commitments.latest, tx, nodeParams.currentBitcoinCoreFeerates, nodeParams.onChainFeeConf, d.finalScriptPubKey)
|
||||
claimHtlcTx_opt.foreach(claimHtlcTx => {
|
||||
txPublisher ! PublishFinalTx(claimHtlcTx, claimHtlcTx.fee, None)
|
||||
blockchain ! WatchTxConfirmed(self, claimHtlcTx.tx.txid, nodeParams.channelConf.minDepthClosing, Some(RelativeDelay(tx.txid, d.commitments.params.remoteParams.toSelfDelay.toInt.toLong)))
|
||||
blockchain ! WatchTxConfirmed(self, claimHtlcTx.tx.txid, nodeParams.channelConf.minDepthScaled(claimHtlcTx.amountIn), Some(RelativeDelay(tx.txid, d.commitments.params.remoteParams.toSelfDelay.toInt.toLong)))
|
||||
})
|
||||
Closing.updateLocalCommitPublished(localCommitPublished1, tx)
|
||||
}),
|
||||
|
@ -1995,12 +2083,10 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
.onChainOutgoingHtlcs(d.commitments.latest.localCommit, d.commitments.latest.remoteCommit, d.commitments.latest.nextRemoteCommit_opt.map(_.commit), tx)
|
||||
.map(add => (add, d.commitments.originChannels.get(add.id).map(_.upstream).collect { case Upstream.Local(id) => id })) // we resolve the payment id if this was a local payment
|
||||
.collect { case (add, Some(id)) => context.system.eventStream.publish(PaymentSettlingOnChain(id, amount = add.amountMsat, add.paymentHash)) }
|
||||
// then let's see if any of the possible close scenarios can be considered done
|
||||
val closingType_opt = Closing.isClosed(d1, Some(tx))
|
||||
// finally, if one of the unilateral closes is done, we move to CLOSED state, otherwise we stay()
|
||||
closingType_opt match {
|
||||
Closing.isClosed(d1, Some(tx)) match {
|
||||
case Some(closingType) =>
|
||||
log.info(s"channel closed (type=${closingType_opt.map(c => EventType.Closed(c).label).getOrElse("UnknownYet")})")
|
||||
log.info("channel closed (type={})", EventType.Closed(closingType).label)
|
||||
context.system.eventStream.publish(ChannelClosed(self, d.channelId, closingType, d.commitments))
|
||||
goto(CLOSED) using d1 storing()
|
||||
case None =>
|
||||
|
@ -2405,6 +2491,11 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
goto(NEGOTIATING) using d.copy(closingTxProposed = closingTxProposed1) sending d.localShutdown
|
||||
}
|
||||
|
||||
case Event(_: ChannelReestablish, d: DATA_NEGOTIATING_SIMPLE) =>
|
||||
// We retransmit our shutdown: we may have updated our script and they may not have received it.
|
||||
val localShutdown = Shutdown(d.channelId, d.localScriptPubKey)
|
||||
goto(NEGOTIATING_SIMPLE) using d sending localShutdown
|
||||
|
||||
// This handler is a workaround for an issue in lnd: starting with versions 0.10 / 0.11, they sometimes fail to send
|
||||
// a channel_reestablish when reconnecting a channel that recently got confirmed, and instead send a channel_ready
|
||||
// first and then go silent. This is due to a race condition on their side, so we trigger a reconnection, hoping that
|
||||
|
@ -2545,7 +2636,8 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
d.commitments.updateLocalFundingStatus(w.tx.txid, fundingStatus, lastAnnouncedFundingTxId_opt) match {
|
||||
case Right((commitments1, _)) =>
|
||||
log.info("zero-conf funding txid={} has been published", w.tx.txid)
|
||||
watchFundingConfirmed(w.tx.txid, Some(nodeParams.channelConf.minDepthFunding), delay_opt = None)
|
||||
// This is a zero-conf channel, the min-depth isn't critical: we use the default.
|
||||
watchFundingConfirmed(w.tx.txid, Some(nodeParams.channelConf.minDepth), delay_opt = None)
|
||||
val d1 = d match {
|
||||
// NB: we discard remote's stashed channel_ready, they will send it back at reconnection
|
||||
case d: DATA_WAIT_FOR_FUNDING_CONFIRMED => DATA_WAIT_FOR_CHANNEL_READY(commitments1, aliases = createShortIdAliases(d.channelId))
|
||||
|
@ -2555,6 +2647,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
case d: DATA_NORMAL => d.copy(commitments = commitments1)
|
||||
case d: DATA_SHUTDOWN => d.copy(commitments = commitments1)
|
||||
case d: DATA_NEGOTIATING => d.copy(commitments = commitments1)
|
||||
case d: DATA_NEGOTIATING_SIMPLE => d.copy(commitments = commitments1)
|
||||
case d: DATA_WAIT_FOR_REMOTE_PUBLISH_FUTURE_COMMITMENT => d.copy(commitments = commitments1)
|
||||
case d: DATA_CLOSING => d.copy(commitments = commitments1)
|
||||
}
|
||||
|
@ -2576,6 +2669,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
case d: DATA_NORMAL => d.copy(commitments = commitments1)
|
||||
case d: DATA_SHUTDOWN => d.copy(commitments = commitments1)
|
||||
case d: DATA_NEGOTIATING => d.copy(commitments = commitments1)
|
||||
case d: DATA_NEGOTIATING_SIMPLE => d.copy(commitments = commitments1)
|
||||
case d: DATA_WAIT_FOR_REMOTE_PUBLISH_FUTURE_COMMITMENT => d.copy(commitments = commitments1)
|
||||
case d: DATA_CLOSING => d // there is a dedicated handler in CLOSING state
|
||||
}
|
||||
|
@ -2592,6 +2686,25 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
// if we were in the process of closing and already received a closing sig from the counterparty, it's always better to use that
|
||||
handleMutualClose(d.bestUnpublishedClosingTx_opt.get, Left(d))
|
||||
|
||||
case Event(WatchFundingSpentTriggered(tx), d: DATA_NEGOTIATING_SIMPLE) if d.findClosingTx(tx).nonEmpty =>
|
||||
if (!d.publishedClosingTxs.exists(_.tx.txid == tx.txid)) {
|
||||
// They published one of our closing transactions without sending us their signature (or we ignored them because
|
||||
// of a race with our closing_complete). We need to publish it ourselves to record the fees and watch for confirmation.
|
||||
val closingTx = d.findClosingTx(tx).get.copy(tx = tx)
|
||||
stay() using d.copy(publishedClosingTxs = d.publishedClosingTxs :+ closingTx) storing() calling doPublish(closingTx, localPaysClosingFees = true)
|
||||
} else {
|
||||
// This is one of the transactions we published.
|
||||
val closingTx = d.findClosingTx(tx).get
|
||||
blockchain ! WatchTxConfirmed(self, tx.txid, nodeParams.channelConf.minDepthScaled(closingTx.amountIn))
|
||||
stay()
|
||||
}
|
||||
|
||||
case Event(WatchTxConfirmedTriggered(_, _, tx), d: DATA_NEGOTIATING_SIMPLE) if d.findClosingTx(tx).nonEmpty =>
|
||||
val closingType = MutualClose(d.findClosingTx(tx).get)
|
||||
log.info("channel closed (type={})", EventType.Closed(closingType).label)
|
||||
context.system.eventStream.publish(ChannelClosed(self, d.channelId, closingType, d.commitments))
|
||||
goto(CLOSED) using d storing()
|
||||
|
||||
case Event(WatchFundingSpentTriggered(tx), d: ChannelDataWithCommitments) =>
|
||||
if (d.commitments.all.map(_.fundingTxId).contains(tx.txid)) {
|
||||
// if the spending tx is itself a funding tx, this is a splice and there is nothing to do
|
||||
|
@ -2609,8 +2722,8 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
d.commitments.resolveCommitment(tx) match {
|
||||
case Some(commitment) =>
|
||||
log.warning("a commit tx for an older commitment has been published fundingTxId={} fundingTxIndex={}", tx.txid, commitment.fundingTxIndex)
|
||||
// we watch the commitment tx, in the meantime we force close using the latest commitment
|
||||
blockchain ! WatchAlternativeCommitTxConfirmed(self, tx.txid, nodeParams.channelConf.minDepthClosing)
|
||||
// We watch the commitment tx, in the meantime we force close using the latest commitment.
|
||||
blockchain ! WatchAlternativeCommitTxConfirmed(self, tx.txid, nodeParams.channelConf.minDepthScaled(commitment.capacity))
|
||||
spendLocalCurrent(d)
|
||||
case None =>
|
||||
// This must be a former funding tx that has already been pruned, because watches are unordered.
|
||||
|
@ -2665,7 +2778,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
case (SYNCING, NORMAL, d1: DATA_NORMAL, d2: DATA_NORMAL) if d1.channelUpdate != d2.channelUpdate || d1.lastAnnouncement_opt != d2.lastAnnouncement_opt => Some(EmitLocalChannelUpdate("syncing->normal", d2, sendToPeer = d2.lastAnnouncement_opt.isEmpty))
|
||||
case (NORMAL, OFFLINE, d1: DATA_NORMAL, d2: DATA_NORMAL) if d1.channelUpdate != d2.channelUpdate || d1.lastAnnouncement_opt != d2.lastAnnouncement_opt => Some(EmitLocalChannelUpdate("normal->offline", d2, sendToPeer = false))
|
||||
case (OFFLINE, OFFLINE, d1: DATA_NORMAL, d2: DATA_NORMAL) if d1.channelUpdate != d2.channelUpdate || d1.lastAnnouncement_opt != d2.lastAnnouncement_opt => Some(EmitLocalChannelUpdate("offline->offline", d2, sendToPeer = false))
|
||||
case (NORMAL | SYNCING | OFFLINE, SHUTDOWN | NEGOTIATING | CLOSING | CLOSED | ERR_INFORMATION_LEAK | WAIT_FOR_REMOTE_PUBLISH_FUTURE_COMMITMENT, d: DATA_NORMAL, _) => Some(EmitLocalChannelDown(d))
|
||||
case (NORMAL | SYNCING | OFFLINE, SHUTDOWN | NEGOTIATING | NEGOTIATING_SIMPLE| CLOSING | CLOSED | ERR_INFORMATION_LEAK | WAIT_FOR_REMOTE_PUBLISH_FUTURE_COMMITMENT, d: DATA_NORMAL, _) => Some(EmitLocalChannelDown(d))
|
||||
case _ => None
|
||||
}
|
||||
emitEvent_opt.foreach {
|
||||
|
@ -3014,7 +3127,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
if (fundingContribution < 0.sat && parentCommitment.localCommit.spec.toLocal + fundingContribution < parentCommitment.localChannelReserve(d.commitments.params).max(commitTxFees)) {
|
||||
log.warning(s"cannot do splice: insufficient funds (commitTxFees=$commitTxFees reserve=${parentCommitment.localChannelReserve(d.commitments.params)})")
|
||||
Left(InvalidSpliceRequest(d.channelId))
|
||||
} else if (cmd.spliceOut_opt.map(_.scriptPubKey).exists(!MutualClose.isValidFinalScriptPubkey(_, allowAnySegwit = true))) {
|
||||
} else if (cmd.spliceOut_opt.map(_.scriptPubKey).exists(!MutualClose.isValidFinalScriptPubkey(_, allowAnySegwit = true, allowOpReturn = false))) {
|
||||
log.warning("cannot do splice: invalid splice-out script")
|
||||
Left(InvalidSpliceRequest(d.channelId))
|
||||
} else {
|
||||
|
|
|
@ -173,7 +173,7 @@ trait ChannelOpenDualFunded extends DualFundingHandlers with ErrorHandlers {
|
|||
// At this point, the min_depth is an estimate and may change after we know exactly how our peer contributes
|
||||
// to the funding transaction. Maybe they will contribute 0 satoshis to the shared output, but still add inputs
|
||||
// and outputs.
|
||||
val minDepth_opt = channelParams.minDepthFundee(nodeParams.channelConf.minDepthFunding, localAmount + remoteAmount)
|
||||
val minDepth_opt = channelParams.minDepthFundee(nodeParams.channelConf.minDepth, localAmount + remoteAmount)
|
||||
val upfrontShutdownScript_opt = localParams.upfrontShutdownScript_opt.map(scriptPubKey => ChannelTlv.UpfrontShutdownScriptTlv(scriptPubKey))
|
||||
val tlvs: Set[AcceptDualFundedChannelTlv] = Set(
|
||||
upfrontShutdownScript_opt,
|
||||
|
@ -390,7 +390,7 @@ trait ChannelOpenDualFunded extends DualFundingHandlers with ErrorHandlers {
|
|||
// We don't have their tx_sigs, but they have ours, and could publish the funding tx without telling us.
|
||||
// That's why we move on immediately to the next step, and will update our unsigned funding tx when we
|
||||
// receive their tx_sigs.
|
||||
val minDepth_opt = d.channelParams.minDepthDualFunding(nodeParams.channelConf.minDepthFunding, signingSession1.fundingTx.sharedTx.tx)
|
||||
val minDepth_opt = d.channelParams.minDepthDualFunding(nodeParams.channelConf.minDepth, signingSession1.fundingTx.sharedTx.tx)
|
||||
watchFundingConfirmed(d.signingSession.fundingTx.txId, minDepth_opt, delay_opt = None)
|
||||
val commitments = Commitments(
|
||||
params = d.channelParams,
|
||||
|
@ -413,7 +413,7 @@ trait ChannelOpenDualFunded extends DualFundingHandlers with ErrorHandlers {
|
|||
rollbackFundingAttempt(d.signingSession.fundingTx.tx, Nil)
|
||||
goto(CLOSED) sending Error(d.channelId, f.getMessage)
|
||||
case Right(signingSession) =>
|
||||
val minDepth_opt = d.channelParams.minDepthDualFunding(nodeParams.channelConf.minDepthFunding, signingSession.fundingTx.sharedTx.tx)
|
||||
val minDepth_opt = d.channelParams.minDepthDualFunding(nodeParams.channelConf.minDepth, signingSession.fundingTx.sharedTx.tx)
|
||||
watchFundingConfirmed(d.signingSession.fundingTx.txId, minDepth_opt, delay_opt = None)
|
||||
val commitments = Commitments(
|
||||
params = d.channelParams,
|
||||
|
@ -478,7 +478,7 @@ trait ChannelOpenDualFunded extends DualFundingHandlers with ErrorHandlers {
|
|||
rollbackRbfAttempt(signingSession, d)
|
||||
stay() using d.copy(status = DualFundingStatus.RbfAborted) sending TxAbort(d.channelId, f.getMessage)
|
||||
case Right(signingSession1) =>
|
||||
val minDepth_opt = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepthFunding, signingSession1.fundingTx.sharedTx.tx)
|
||||
val minDepth_opt = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepth, signingSession1.fundingTx.sharedTx.tx)
|
||||
watchFundingConfirmed(signingSession.fundingTx.txId, minDepth_opt, delay_opt = None)
|
||||
val commitments1 = d.commitments.add(signingSession1.commitment)
|
||||
val d1 = DATA_WAIT_FOR_DUAL_FUNDING_CONFIRMED(commitments1, d.localPushAmount, d.remotePushAmount, d.waitingSince, d.lastChecked, DualFundingStatus.WaitingForConfirmations, d.deferred)
|
||||
|
@ -495,7 +495,7 @@ trait ChannelOpenDualFunded extends DualFundingHandlers with ErrorHandlers {
|
|||
}
|
||||
|
||||
case Event(cmd: CMD_BUMP_FUNDING_FEE, d: DATA_WAIT_FOR_DUAL_FUNDING_CONFIRMED) =>
|
||||
val zeroConf = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepthFunding, d.latestFundingTx.sharedTx.tx).isEmpty
|
||||
val zeroConf = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepth, d.latestFundingTx.sharedTx.tx).isEmpty
|
||||
if (!d.latestFundingTx.fundingParams.isInitiator) {
|
||||
cmd.replyTo ! RES_FAILURE(cmd, InvalidRbfNonInitiator(d.channelId))
|
||||
stay()
|
||||
|
@ -524,7 +524,7 @@ trait ChannelOpenDualFunded extends DualFundingHandlers with ErrorHandlers {
|
|||
}
|
||||
|
||||
case Event(msg: TxInitRbf, d: DATA_WAIT_FOR_DUAL_FUNDING_CONFIRMED) =>
|
||||
val zeroConf = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepthFunding, d.latestFundingTx.sharedTx.tx).isEmpty
|
||||
val zeroConf = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepth, d.latestFundingTx.sharedTx.tx).isEmpty
|
||||
if (d.latestFundingTx.fundingParams.isInitiator) {
|
||||
// Only the initiator is allowed to initiate RBF.
|
||||
log.info("rejecting tx_init_rbf, we're the initiator, not them!")
|
||||
|
@ -661,7 +661,7 @@ trait ChannelOpenDualFunded extends DualFundingHandlers with ErrorHandlers {
|
|||
// No need to store their commit_sig, they will re-send it if we disconnect.
|
||||
stay() using d.copy(status = DualFundingStatus.RbfWaitingForSigs(signingSession1))
|
||||
case signingSession1: InteractiveTxSigningSession.SendingSigs =>
|
||||
val minDepth_opt = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepthFunding, signingSession1.fundingTx.sharedTx.tx)
|
||||
val minDepth_opt = d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepth, signingSession1.fundingTx.sharedTx.tx)
|
||||
watchFundingConfirmed(signingSession.fundingTx.txId, minDepth_opt, delay_opt = None)
|
||||
val commitments1 = d.commitments.add(signingSession1.commitment)
|
||||
val d1 = DATA_WAIT_FOR_DUAL_FUNDING_CONFIRMED(commitments1, d.localPushAmount, d.remotePushAmount, d.waitingSince, d.lastChecked, DualFundingStatus.WaitingForConfirmations, d.deferred)
|
||||
|
@ -726,8 +726,9 @@ trait ChannelOpenDualFunded extends DualFundingHandlers with ErrorHandlers {
|
|||
val fundingStatus = LocalFundingStatus.ZeroconfPublishedFundingTx(w.tx, d.commitments.localFundingSigs(w.tx.txid), d.commitments.liquidityPurchase(w.tx.txid))
|
||||
d.commitments.updateLocalFundingStatus(w.tx.txid, fundingStatus, lastAnnouncedFundingTxId_opt = None) match {
|
||||
case Right((commitments1, _)) =>
|
||||
// we still watch the funding tx for confirmation even if we can use the zero-conf channel right away
|
||||
watchFundingConfirmed(w.tx.txid, Some(nodeParams.channelConf.minDepthFunding), delay_opt = None)
|
||||
// We still watch the funding tx for confirmation even if we can use the zero-conf channel right away.
|
||||
// But since this is a zero-conf channel, the minimum depth isn't critical: we use the default one.
|
||||
watchFundingConfirmed(w.tx.txid, Some(nodeParams.channelConf.minDepth), delay_opt = None)
|
||||
val shortIds = createShortIdAliases(d.channelId)
|
||||
val channelReady = createChannelReady(shortIds, d.commitments.params)
|
||||
d.deferred.foreach(self ! _)
|
||||
|
|
|
@ -128,7 +128,7 @@ trait ChannelOpenSingleFunded extends SingleFundingHandlers with ErrorHandlers {
|
|||
val fundingPubkey = keyManager.fundingPublicKey(d.initFundee.localParams.fundingKeyPath, fundingTxIndex = 0).publicKey
|
||||
val channelKeyPath = keyManager.keyPath(d.initFundee.localParams, d.initFundee.channelConfig)
|
||||
val params = ChannelParams(d.initFundee.temporaryChannelId, d.initFundee.channelConfig, channelFeatures, d.initFundee.localParams, remoteParams, open.channelFlags)
|
||||
val minimumDepth = params.minDepthFundee(nodeParams.channelConf.minDepthFunding, open.fundingSatoshis)
|
||||
val minimumDepth = params.minDepthFundee(nodeParams.channelConf.minDepth, open.fundingSatoshis)
|
||||
log.info("will use fundingMinDepth={}", minimumDepth)
|
||||
// In order to allow TLV extensions and keep backwards-compatibility, we include an empty upfront_shutdown_script if this feature is not used.
|
||||
// See https://github.com/lightningnetwork/lightning-rfc/pull/714.
|
||||
|
@ -297,7 +297,7 @@ trait ChannelOpenSingleFunded extends SingleFundingHandlers with ErrorHandlers {
|
|||
context.system.eventStream.publish(ChannelSignatureReceived(self, commitments))
|
||||
// NB: we don't send a ChannelSignatureSent for the first commit
|
||||
log.info("waiting for them to publish the funding tx for channelId={} fundingTxid={}", channelId, commitment.fundingTxId)
|
||||
watchFundingConfirmed(commitment.fundingTxId, params.minDepthFundee(nodeParams.channelConf.minDepthFunding, fundingAmount), delay_opt = None)
|
||||
watchFundingConfirmed(commitment.fundingTxId, params.minDepthFundee(nodeParams.channelConf.minDepth, fundingAmount), delay_opt = None)
|
||||
goto(WAIT_FOR_FUNDING_CONFIRMED) using DATA_WAIT_FOR_FUNDING_CONFIRMED(commitments, nodeParams.currentBlockHeight, None, Right(fundingSigned)) storing() sending fundingSigned
|
||||
}
|
||||
}
|
||||
|
@ -395,8 +395,9 @@ trait ChannelOpenSingleFunded extends SingleFundingHandlers with ErrorHandlers {
|
|||
d.commitments.updateLocalFundingStatus(w.tx.txid, fundingStatus, lastAnnouncedFundingTxId_opt = None) match {
|
||||
case Right((commitments1, _)) =>
|
||||
log.info("funding txid={} was successfully published for zero-conf channelId={}", w.tx.txid, d.channelId)
|
||||
// we still watch the funding tx for confirmation even if we can use the zero-conf channel right away
|
||||
watchFundingConfirmed(w.tx.txid, Some(nodeParams.channelConf.minDepthFunding), delay_opt = None)
|
||||
// We still watch the funding tx for confirmation even if we can use the zero-conf channel right away.
|
||||
// But since this is a zero-conf channel, the minimum depth isn't critical: we use the default one.
|
||||
watchFundingConfirmed(w.tx.txid, Some(nodeParams.channelConf.minDepth), delay_opt = None)
|
||||
val shortIds = createShortIdAliases(d.channelId)
|
||||
val channelReady = createChannelReady(shortIds, d.commitments.params)
|
||||
d.deferred.foreach(self ! _)
|
||||
|
|
|
@ -16,13 +16,14 @@
|
|||
|
||||
package fr.acinq.eclair.channel.fsm
|
||||
|
||||
import akka.actor.{ActorRef, FSM, Status}
|
||||
import akka.actor.FSM
|
||||
import fr.acinq.bitcoin.scalacompat.{ByteVector32, Script}
|
||||
import fr.acinq.eclair.Features
|
||||
import fr.acinq.eclair.channel.Helpers.Closing.MutualClose
|
||||
import fr.acinq.eclair.channel._
|
||||
import fr.acinq.eclair.db.PendingCommandsDb
|
||||
import fr.acinq.eclair.io.Peer
|
||||
import fr.acinq.eclair.wire.protocol.{HtlcSettlementMessage, LightningMessage, UpdateMessage}
|
||||
import fr.acinq.eclair.wire.protocol.{ClosingComplete, HtlcSettlementMessage, LightningMessage, Shutdown, UpdateMessage}
|
||||
import scodec.bits.ByteVector
|
||||
|
||||
import scala.concurrent.duration.DurationInt
|
||||
|
@ -106,6 +107,7 @@ trait CommonHandlers {
|
|||
case d: DATA_NORMAL if d.localShutdown.isDefined => d.localShutdown.get.scriptPubKey
|
||||
case d: DATA_SHUTDOWN => d.localShutdown.scriptPubKey
|
||||
case d: DATA_NEGOTIATING => d.localShutdown.scriptPubKey
|
||||
case d: DATA_NEGOTIATING_SIMPLE => d.localScriptPubKey
|
||||
case d: DATA_CLOSING => d.finalScriptPubKey
|
||||
case d =>
|
||||
d.commitments.params.localParams.upfrontShutdownScript_opt match {
|
||||
|
@ -130,4 +132,20 @@ trait CommonHandlers {
|
|||
finalScriptPubKey
|
||||
}
|
||||
|
||||
def startSimpleClose(commitments: Commitments, localShutdown: Shutdown, remoteShutdown: Shutdown, closingFeerates: Option[ClosingFeerates]): (DATA_NEGOTIATING_SIMPLE, Option[ClosingComplete]) = {
|
||||
val localScript = localShutdown.scriptPubKey
|
||||
val remoteScript = remoteShutdown.scriptPubKey
|
||||
val closingFeerate = closingFeerates.map(_.preferred).getOrElse(nodeParams.onChainFeeConf.getClosingFeerate(nodeParams.currentBitcoinCoreFeerates))
|
||||
MutualClose.makeSimpleClosingTx(nodeParams.currentBlockHeight, keyManager, commitments.latest, localScript, remoteScript, closingFeerate) match {
|
||||
case Left(f) =>
|
||||
log.warning("cannot create local closing txs, waiting for remote closing_complete: {}", f.getMessage)
|
||||
val d = DATA_NEGOTIATING_SIMPLE(commitments, closingFeerate, localScript, remoteScript, Nil, Nil)
|
||||
(d, None)
|
||||
case Right((closingTxs, closingComplete)) =>
|
||||
log.debug("signing local mutual close transactions: {}", closingTxs)
|
||||
val d = DATA_NEGOTIATING_SIMPLE(commitments, closingFeerate, localScript, remoteScript, closingTxs :: Nil, Nil)
|
||||
(d, Some(closingComplete))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ trait DualFundingHandlers extends CommonFundingHandlers {
|
|||
|
||||
/** Return true if we should stop waiting for confirmations when receiving our peer's channel_ready. */
|
||||
def switchToZeroConf(remoteChannelReady: ChannelReady, d: DATA_WAIT_FOR_DUAL_FUNDING_CONFIRMED): Boolean = {
|
||||
if (d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepthFunding, d.latestFundingTx.sharedTx.tx).nonEmpty) {
|
||||
if (d.commitments.params.minDepthDualFunding(nodeParams.channelConf.minDepth, d.latestFundingTx.sharedTx.tx).nonEmpty) {
|
||||
// We're not using zero-conf, but our peer decided to trust us anyway. We can skip waiting for confirmations if:
|
||||
// - they provided a channel alias
|
||||
// - there is a single version of the funding tx (otherwise we don't know which one to use)
|
||||
|
|
|
@ -59,10 +59,9 @@ trait ErrorHandlers extends CommonHandlers {
|
|||
}
|
||||
|
||||
def doPublish(closingTx: ClosingTx, localPaysClosingFees: Boolean): Unit = {
|
||||
// the initiator pays the fee
|
||||
val fee = if (localPaysClosingFees) closingTx.fee else 0.sat
|
||||
txPublisher ! PublishFinalTx(closingTx, fee, None)
|
||||
blockchain ! WatchTxConfirmed(self, closingTx.tx.txid, nodeParams.channelConf.minDepthClosing)
|
||||
blockchain ! WatchTxConfirmed(self, closingTx.tx.txid, nodeParams.channelConf.minDepthScaled(closingTx.amountIn))
|
||||
}
|
||||
|
||||
def handleLocalError(cause: Throwable, d: ChannelData, msg: Option[Any]) = {
|
||||
|
@ -87,6 +86,10 @@ trait ErrorHandlers extends CommonHandlers {
|
|||
log.info(s"we have a valid closing tx, publishing it instead of our commitment: closingTxId=${bestUnpublishedClosingTx.tx.txid}")
|
||||
// if we were in the process of closing and already received a closing sig from the counterparty, it's always better to use that
|
||||
handleMutualClose(bestUnpublishedClosingTx, Left(negotiating))
|
||||
case negotiating: DATA_NEGOTIATING_SIMPLE if negotiating.publishedClosingTxs.nonEmpty =>
|
||||
// We have published at least one mutual close transaction, it's better to use it instead of our local commit.
|
||||
val closing = DATA_CLOSING(negotiating.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = negotiating.localScriptPubKey, mutualCloseProposed = negotiating.proposedClosingTxs.flatMap(_.all), mutualClosePublished = negotiating.publishedClosingTxs)
|
||||
goto(CLOSING) using closing storing()
|
||||
case dd: ChannelDataWithCommitments =>
|
||||
// We publish our commitment even if we have nothing at stake: it's a nice thing to do because it lets our peer
|
||||
// get their funds back without delays.
|
||||
|
@ -133,6 +136,10 @@ trait ErrorHandlers extends CommonHandlers {
|
|||
case negotiating@DATA_NEGOTIATING(_, _, _, _, Some(bestUnpublishedClosingTx)) =>
|
||||
// if we were in the process of closing and already received a closing sig from the counterparty, it's always better to use that
|
||||
handleMutualClose(bestUnpublishedClosingTx, Left(negotiating))
|
||||
case negotiating: DATA_NEGOTIATING_SIMPLE if negotiating.publishedClosingTxs.nonEmpty =>
|
||||
// We have published at least one mutual close transaction, it's better to use it instead of our local commit.
|
||||
val closing = DATA_CLOSING(negotiating.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = negotiating.localScriptPubKey, mutualCloseProposed = negotiating.proposedClosingTxs.flatMap(_.all), mutualClosePublished = negotiating.publishedClosingTxs)
|
||||
goto(CLOSING) using closing storing()
|
||||
// NB: we publish the commitment even if we have nothing at stake (in a dataloss situation our peer will send us an error just for that)
|
||||
case hasCommitments: ChannelDataWithCommitments =>
|
||||
if (e.toAscii == "internal error") {
|
||||
|
@ -173,7 +180,11 @@ trait ErrorHandlers extends CommonHandlers {
|
|||
*/
|
||||
private def watchConfirmedIfNeeded(txs: Iterable[Transaction], irrevocablySpent: Map[OutPoint, Transaction], relativeDelays: Map[TxId, RelativeDelay]): Unit = {
|
||||
val (skip, process) = txs.partition(Closing.inputsAlreadySpent(_, irrevocablySpent))
|
||||
process.foreach(tx => blockchain ! WatchTxConfirmed(self, tx.txid, nodeParams.channelConf.minDepthClosing, relativeDelays.get(tx.txid)))
|
||||
process.foreach(tx => {
|
||||
// Those are channel force-close transactions, which don't include a change output: every output is potentially at stake.
|
||||
val minDepth = nodeParams.channelConf.minDepthScaled(tx.txOut.map(_.amount).sum)
|
||||
blockchain ! WatchTxConfirmed(self, tx.txid, minDepth, relativeDelays.get(tx.txid))
|
||||
})
|
||||
skip.foreach(tx => log.debug(s"no need to watch txid=${tx.txid}, it has already been confirmed"))
|
||||
}
|
||||
|
||||
|
@ -188,7 +199,7 @@ trait ErrorHandlers extends CommonHandlers {
|
|||
require(output.txid == parentTx.txid && output.index < parentTx.txOut.size, s"output doesn't belong to the given parentTx: output=${output.txid}:${output.index} (expected txid=${parentTx.txid} index < ${parentTx.txOut.size})")
|
||||
}
|
||||
val (skip, process) = outputs.partition(irrevocablySpent.contains)
|
||||
process.foreach(output => blockchain ! WatchOutputSpent(self, parentTx.txid, output.index.toInt, Set.empty))
|
||||
process.foreach(output => blockchain ! WatchOutputSpent(self, parentTx.txid, output.index.toInt, parentTx.txOut(output.index.toInt).amount, Set.empty))
|
||||
skip.foreach(output => log.debug(s"no need to watch output=${output.txid}:${output.index}, it has already been spent by txid=${irrevocablySpent.get(output).map(_.txid)}"))
|
||||
}
|
||||
|
||||
|
@ -211,6 +222,7 @@ trait ErrorHandlers extends CommonHandlers {
|
|||
val nextData = d match {
|
||||
case closing: DATA_CLOSING => closing.copy(localCommitPublished = Some(localCommitPublished))
|
||||
case negotiating: DATA_NEGOTIATING => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, negotiating.closingTxProposed.flatten.map(_.unsignedTx), localCommitPublished = Some(localCommitPublished))
|
||||
case negotiating: DATA_NEGOTIATING_SIMPLE => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = negotiating.proposedClosingTxs.flatMap(_.all), mutualClosePublished = negotiating.publishedClosingTxs, localCommitPublished = Some(localCommitPublished))
|
||||
case _ => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = Nil, localCommitPublished = Some(localCommitPublished))
|
||||
}
|
||||
goto(CLOSING) using nextData storing() calling doPublish(localCommitPublished, commitment)
|
||||
|
@ -224,11 +236,11 @@ trait ErrorHandlers extends CommonHandlers {
|
|||
val publishQueue = commitment.params.commitmentFormat match {
|
||||
case Transactions.DefaultCommitmentFormat =>
|
||||
val redeemableHtlcTxs = htlcTxs.values.flatten.map(tx => PublishFinalTx(tx, tx.fee, Some(commitTx.txid)))
|
||||
List(PublishFinalTx(commitTx, commitment.commitInput.outPoint, "commit-tx", Closing.commitTxFee(commitment.commitInput, commitTx, localPaysCommitTxFees), None)) ++ (claimMainDelayedOutputTx.map(tx => PublishFinalTx(tx, tx.fee, None)) ++ redeemableHtlcTxs ++ claimHtlcDelayedTxs.map(tx => PublishFinalTx(tx, tx.fee, None)))
|
||||
List(PublishFinalTx(commitTx, commitment.commitInput.outPoint, commitment.capacity, "commit-tx", Closing.commitTxFee(commitment.commitInput, commitTx, localPaysCommitTxFees), None)) ++ (claimMainDelayedOutputTx.map(tx => PublishFinalTx(tx, tx.fee, None)) ++ redeemableHtlcTxs ++ claimHtlcDelayedTxs.map(tx => PublishFinalTx(tx, tx.fee, None)))
|
||||
case _: Transactions.AnchorOutputsCommitmentFormat =>
|
||||
val redeemableHtlcTxs = htlcTxs.values.flatten.map(tx => PublishReplaceableTx(tx, commitment))
|
||||
val claimLocalAnchor = claimAnchorTxs.collect { case tx: Transactions.ClaimLocalAnchorOutputTx if !localCommitPublished.isConfirmed => PublishReplaceableTx(tx, commitment) }
|
||||
List(PublishFinalTx(commitTx, commitment.commitInput.outPoint, "commit-tx", Closing.commitTxFee(commitment.commitInput, commitTx, localPaysCommitTxFees), None)) ++ claimLocalAnchor ++ claimMainDelayedOutputTx.map(tx => PublishFinalTx(tx, tx.fee, None)) ++ redeemableHtlcTxs ++ claimHtlcDelayedTxs.map(tx => PublishFinalTx(tx, tx.fee, None))
|
||||
List(PublishFinalTx(commitTx, commitment.commitInput.outPoint, commitment.capacity, "commit-tx", Closing.commitTxFee(commitment.commitInput, commitTx, localPaysCommitTxFees), None)) ++ claimLocalAnchor ++ claimMainDelayedOutputTx.map(tx => PublishFinalTx(tx, tx.fee, None)) ++ redeemableHtlcTxs ++ claimHtlcDelayedTxs.map(tx => PublishFinalTx(tx, tx.fee, None))
|
||||
}
|
||||
publishIfNeeded(publishQueue, irrevocablySpent)
|
||||
|
||||
|
@ -257,6 +269,7 @@ trait ErrorHandlers extends CommonHandlers {
|
|||
val nextData = d match {
|
||||
case closing: DATA_CLOSING => closing.copy(remoteCommitPublished = Some(remoteCommitPublished))
|
||||
case negotiating: DATA_NEGOTIATING => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = negotiating.closingTxProposed.flatten.map(_.unsignedTx), remoteCommitPublished = Some(remoteCommitPublished))
|
||||
case negotiating: DATA_NEGOTIATING_SIMPLE => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = negotiating.proposedClosingTxs.flatMap(_.all), mutualClosePublished = negotiating.publishedClosingTxs, remoteCommitPublished = Some(remoteCommitPublished))
|
||||
case _ => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = Nil, remoteCommitPublished = Some(remoteCommitPublished))
|
||||
}
|
||||
goto(CLOSING) using nextData storing() calling doPublish(remoteCommitPublished, commitments)
|
||||
|
@ -275,6 +288,7 @@ trait ErrorHandlers extends CommonHandlers {
|
|||
val nextData = d match {
|
||||
case closing: DATA_CLOSING => closing.copy(nextRemoteCommitPublished = Some(remoteCommitPublished))
|
||||
case negotiating: DATA_NEGOTIATING => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = negotiating.closingTxProposed.flatten.map(_.unsignedTx), nextRemoteCommitPublished = Some(remoteCommitPublished))
|
||||
case negotiating: DATA_NEGOTIATING_SIMPLE => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = negotiating.proposedClosingTxs.flatMap(_.all), mutualClosePublished = negotiating.publishedClosingTxs, remoteCommitPublished = Some(remoteCommitPublished))
|
||||
// NB: if there is a next commitment, we can't be in DATA_WAIT_FOR_FUNDING_CONFIRMED so we don't have the case where fundingTx is defined
|
||||
case _ => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = Nil, nextRemoteCommitPublished = Some(remoteCommitPublished))
|
||||
}
|
||||
|
@ -314,6 +328,7 @@ trait ErrorHandlers extends CommonHandlers {
|
|||
val nextData = d match {
|
||||
case closing: DATA_CLOSING => closing.copy(revokedCommitPublished = closing.revokedCommitPublished :+ revokedCommitPublished)
|
||||
case negotiating: DATA_NEGOTIATING => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = negotiating.closingTxProposed.flatten.map(_.unsignedTx), revokedCommitPublished = revokedCommitPublished :: Nil)
|
||||
case negotiating: DATA_NEGOTIATING_SIMPLE => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = negotiating.proposedClosingTxs.flatMap(_.all), mutualClosePublished = negotiating.publishedClosingTxs, revokedCommitPublished = revokedCommitPublished :: Nil)
|
||||
// NB: if there is a revoked commitment, we can't be in DATA_WAIT_FOR_FUNDING_CONFIRMED so we don't have the case where fundingTx is defined
|
||||
case _ => DATA_CLOSING(d.commitments, waitingSince = nodeParams.currentBlockHeight, finalScriptPubKey = finalScriptPubKey, mutualCloseProposed = Nil, revokedCommitPublished = revokedCommitPublished :: Nil)
|
||||
}
|
||||
|
|
|
@ -78,8 +78,8 @@ trait SingleFundingHandlers extends CommonFundingHandlers {
|
|||
case Some(fundingTx) =>
|
||||
// if we are funder, we never give up
|
||||
// we cannot correctly set the fee, but it was correctly set when we initially published the transaction
|
||||
log.debug(s"republishing the funding tx...")
|
||||
txPublisher ! PublishFinalTx(fundingTx, fundingTx.txIn.head.outPoint, "funding", 0 sat, None)
|
||||
log.debug("republishing the funding tx...")
|
||||
txPublisher ! PublishFinalTx(fundingTx, fundingTx.txIn.head.outPoint, 0 sat, "funding", 0 sat, None)
|
||||
// we also check if the funding tx has been double-spent
|
||||
checkDoubleSpent(fundingTx)
|
||||
context.system.scheduler.scheduleOnce(1 day, blockchain.toClassic, GetTxWithMeta(self, txid))
|
||||
|
@ -118,11 +118,11 @@ trait SingleFundingHandlers extends CommonFundingHandlers {
|
|||
val minDepth_opt = if (d.commitments.params.localParams.isChannelOpener) {
|
||||
d.commitments.params.minDepthFunder
|
||||
} else {
|
||||
// when we're not the channel initiator we scale the min_depth confirmations depending on the funding amount
|
||||
d.commitments.params.minDepthFundee(nodeParams.channelConf.minDepthFunding, d.commitments.latest.commitInput.txOut.amount)
|
||||
// When we're not the channel initiator we scale the min_depth confirmations depending on the funding amount.
|
||||
d.commitments.params.minDepthFundee(nodeParams.channelConf.minDepth, d.commitments.latest.commitInput.txOut.amount)
|
||||
}
|
||||
val minDepth = minDepth_opt.getOrElse {
|
||||
val defaultMinDepth = nodeParams.channelConf.minDepthFunding
|
||||
val defaultMinDepth = nodeParams.channelConf.minDepth
|
||||
// If we are in state WAIT_FOR_FUNDING_CONFIRMED, then the computed minDepth should be > 0, otherwise we would
|
||||
// have skipped this state. Maybe the computation method was changed and eclair was restarted?
|
||||
log.warning("min_depth should be defined since we're waiting for the funding tx to confirm, using default minDepth={}", defaultMinDepth)
|
||||
|
|
|
@ -578,7 +578,7 @@ private class InteractiveTxBuilder(replyTo: ActorRef[InteractiveTxBuilder.Respon
|
|||
Left(OutputBelowDust(fundingParams.channelId, addOutput.serialId, addOutput.amount, fundingParams.dustLimit))
|
||||
} else if (addOutput.pubkeyScript == fundingPubkeyScript && addOutput.amount != fundingParams.fundingAmount) {
|
||||
Left(InvalidSharedOutputAmount(fundingParams.channelId, addOutput.serialId, addOutput.amount, fundingParams.fundingAmount))
|
||||
} else if (!MutualClose.isValidFinalScriptPubkey(addOutput.pubkeyScript, allowAnySegwit = true)) {
|
||||
} else if (!MutualClose.isValidFinalScriptPubkey(addOutput.pubkeyScript, allowAnySegwit = true, allowOpReturn = false)) {
|
||||
Left(InvalidSpliceOutputScript(fundingParams.channelId, addOutput.serialId, addOutput.pubkeyScript))
|
||||
} else if (addOutput.pubkeyScript == fundingPubkeyScript) {
|
||||
Right(Output.Shared(addOutput.serialId, addOutput.pubkeyScript, purpose.previousLocalBalance + fundingParams.localContribution, purpose.previousRemoteBalance + fundingParams.remoteContribution, purpose.htlcBalance))
|
||||
|
|
|
@ -112,8 +112,9 @@ private class FinalTxPublisher(nodeParams: NodeParams,
|
|||
}
|
||||
|
||||
def publish(): Behavior[Command] = {
|
||||
val minDepth = nodeParams.channelConf.minDepthScaled(cmd.amount)
|
||||
val txMonitor = context.spawn(MempoolTxMonitor(nodeParams, bitcoinClient, txPublishContext), "mempool-tx-monitor")
|
||||
txMonitor ! MempoolTxMonitor.Publish(context.messageAdapter[MempoolTxMonitor.TxResult](WrappedTxResult), cmd.tx, cmd.input, cmd.desc, cmd.fee)
|
||||
txMonitor ! MempoolTxMonitor.Publish(context.messageAdapter[MempoolTxMonitor.TxResult](WrappedTxResult), cmd.tx, cmd.input, minDepth, cmd.desc, cmd.fee)
|
||||
Behaviors.receiveMessagePartial {
|
||||
case WrappedTxResult(txResult) =>
|
||||
txResult match {
|
||||
|
|
|
@ -38,7 +38,7 @@ object MempoolTxMonitor {
|
|||
|
||||
// @formatter:off
|
||||
sealed trait Command
|
||||
case class Publish(replyTo: ActorRef[TxResult], tx: Transaction, input: OutPoint, desc: String, fee: Satoshi) extends Command
|
||||
case class Publish(replyTo: ActorRef[TxResult], tx: Transaction, input: OutPoint, minDepth: Int, desc: String, fee: Satoshi) extends Command
|
||||
private case object PublishOk extends Command
|
||||
private case class PublishFailed(reason: Throwable) extends Command
|
||||
private case class InputStatus(spentConfirmed: Boolean, spentUnconfirmed: Boolean) extends Command
|
||||
|
@ -152,7 +152,7 @@ private class MempoolTxMonitor(nodeParams: NodeParams,
|
|||
case Failure(reason) => GetTxConfirmationsFailed(reason)
|
||||
}
|
||||
Behaviors.same
|
||||
} else if (confirmations < nodeParams.channelConf.minDepthClosing) {
|
||||
} else if (confirmations < cmd.minDepth) {
|
||||
log.debug("txid={} has {} confirmations, waiting to reach min depth", cmd.tx.txid, confirmations)
|
||||
cmd.replyTo ! TxRecentlyConfirmed(cmd.tx.txid, confirmations)
|
||||
Behaviors.same
|
||||
|
|
|
@ -50,6 +50,7 @@ object ReplaceableTxPrePublisher {
|
|||
private case object CommitTxAlreadyConfirmed extends RuntimeException with Command
|
||||
private case object RemoteCommitTxNotInMempool extends RuntimeException with Command
|
||||
private case object LocalCommitTxConfirmed extends Command
|
||||
private case object LocalCommitTxPublished extends Command
|
||||
private case object RemoteCommitTxConfirmed extends Command
|
||||
private case object RemoteCommitTxPublished extends Command
|
||||
private case object HtlcOutputAlreadySpent extends Command
|
||||
|
@ -211,7 +212,8 @@ private class ReplaceableTxPrePublisher(nodeParams: NodeParams,
|
|||
*/
|
||||
private def checkHtlcOutput(commitment: FullCommitment, htlcTx: HtlcTx): Future[Command] = {
|
||||
getRemoteCommitConfirmations(commitment).flatMap {
|
||||
case Some(depth) if depth >= nodeParams.channelConf.minDepthClosing => Future.successful(RemoteCommitTxConfirmed)
|
||||
case Some(depth) if depth >= nodeParams.channelConf.minDepthScaled(commitment.capacity) => Future.successful(RemoteCommitTxConfirmed)
|
||||
case Some(_) => Future.successful(RemoteCommitTxPublished)
|
||||
case _ => bitcoinClient.isTransactionOutputSpent(htlcTx.input.outPoint.txid, htlcTx.input.outPoint.index.toInt).map {
|
||||
case true => HtlcOutputAlreadySpent
|
||||
case false => ParentTxOk
|
||||
|
@ -232,6 +234,11 @@ private class ReplaceableTxPrePublisher(nodeParams: NodeParams,
|
|||
case None => replyTo ! PreconditionsFailed(TxPublisher.TxRejectedReason.TxSkipped(retryNextBlock = false))
|
||||
}
|
||||
Behaviors.stopped
|
||||
case RemoteCommitTxPublished =>
|
||||
log.info("cannot publish {}: remote commit has been published", cmd.desc)
|
||||
// We keep retrying until the remote commit reaches min-depth to protect against reorgs.
|
||||
replyTo ! PreconditionsFailed(TxPublisher.TxRejectedReason.TxSkipped(retryNextBlock = true))
|
||||
Behaviors.stopped
|
||||
case RemoteCommitTxConfirmed =>
|
||||
log.warn("cannot publish {}: remote commit has been confirmed", cmd.desc)
|
||||
replyTo ! PreconditionsFailed(TxPublisher.TxRejectedReason.ConflictingTxConfirmed)
|
||||
|
@ -289,7 +296,8 @@ private class ReplaceableTxPrePublisher(nodeParams: NodeParams,
|
|||
*/
|
||||
private def checkClaimHtlcOutput(commitment: FullCommitment, claimHtlcTx: ClaimHtlcTx): Future[Command] = {
|
||||
bitcoinClient.getTxConfirmations(commitment.localCommit.commitTxAndRemoteSig.commitTx.tx.txid).flatMap {
|
||||
case Some(depth) if depth >= nodeParams.channelConf.minDepthClosing => Future.successful(LocalCommitTxConfirmed)
|
||||
case Some(depth) if depth >= nodeParams.channelConf.minDepthScaled(commitment.capacity) => Future.successful(LocalCommitTxConfirmed)
|
||||
case Some(_) => Future.successful(LocalCommitTxPublished)
|
||||
case _ => bitcoinClient.isTransactionOutputSpent(claimHtlcTx.input.outPoint.txid, claimHtlcTx.input.outPoint.index.toInt).map {
|
||||
case true => HtlcOutputAlreadySpent
|
||||
case false => ParentTxOk
|
||||
|
@ -310,6 +318,11 @@ private class ReplaceableTxPrePublisher(nodeParams: NodeParams,
|
|||
case None => replyTo ! PreconditionsFailed(TxPublisher.TxRejectedReason.TxSkipped(retryNextBlock = false))
|
||||
}
|
||||
Behaviors.stopped
|
||||
case LocalCommitTxPublished =>
|
||||
log.info("cannot publish {}: local commit has been published", cmd.desc)
|
||||
// We keep retrying until the local commit reaches min-depth to protect against reorgs.
|
||||
replyTo ! PreconditionsFailed(TxPublisher.TxRejectedReason.TxSkipped(retryNextBlock = true))
|
||||
Behaviors.stopped
|
||||
case LocalCommitTxConfirmed =>
|
||||
log.warn("cannot publish {}: local commit has been confirmed", cmd.desc)
|
||||
replyTo ! PreconditionsFailed(TxPublisher.TxRejectedReason.ConflictingTxConfirmed)
|
||||
|
|
|
@ -178,8 +178,9 @@ private class ReplaceableTxPublisher(nodeParams: NodeParams,
|
|||
case ConfirmationTarget.Absolute(confirmBefore) => log.debug("publishing {} with confirmation target in {} blocks", cmd.desc, confirmBefore - nodeParams.currentBlockHeight)
|
||||
case ConfirmationTarget.Priority(priority) => log.debug("publishing {} with priority {}", cmd.desc, priority)
|
||||
}
|
||||
val minDepth = nodeParams.channelConf.minDepthScaled(cmd.txInfo.amountIn)
|
||||
val txMonitor = context.spawn(MempoolTxMonitor(nodeParams, bitcoinClient, txPublishContext), s"mempool-tx-monitor-${tx.signedTx.txid}")
|
||||
txMonitor ! MempoolTxMonitor.Publish(context.messageAdapter[MempoolTxMonitor.TxResult](WrappedTxResult), tx.signedTx, cmd.input, cmd.desc, tx.fee)
|
||||
txMonitor ! MempoolTxMonitor.Publish(context.messageAdapter[MempoolTxMonitor.TxResult](WrappedTxResult), tx.signedTx, cmd.input, minDepth, cmd.desc, tx.fee)
|
||||
wait(tx)
|
||||
case ReplaceableTxFunder.FundingFailed(reason) => sendResult(TxPublisher.TxRejected(txPublishContext.id, cmd, reason), None)
|
||||
}
|
||||
|
@ -286,8 +287,9 @@ private class ReplaceableTxPublisher(nodeParams: NodeParams,
|
|||
// Only one of them can be in the mempool, so we wait for the other to be rejected. Once that's done, we're back to a
|
||||
// situation where we have one transaction in the mempool and wait for it to confirm.
|
||||
def publishReplacement(previousTx: FundedTx, bumpedTx: FundedTx): Behavior[Command] = {
|
||||
val minDepth = nodeParams.channelConf.minDepthScaled(cmd.txInfo.amountIn)
|
||||
val txMonitor = context.spawn(MempoolTxMonitor(nodeParams, bitcoinClient, txPublishContext), s"mempool-tx-monitor-${bumpedTx.signedTx.txid}")
|
||||
txMonitor ! MempoolTxMonitor.Publish(context.messageAdapter[MempoolTxMonitor.TxResult](WrappedTxResult), bumpedTx.signedTx, cmd.input, cmd.desc, bumpedTx.fee)
|
||||
txMonitor ! MempoolTxMonitor.Publish(context.messageAdapter[MempoolTxMonitor.TxResult](WrappedTxResult), bumpedTx.signedTx, cmd.input, minDepth, cmd.desc, bumpedTx.fee)
|
||||
Behaviors.receiveMessagePartial {
|
||||
case WrappedTxResult(txResult) =>
|
||||
txResult match {
|
||||
|
|
|
@ -80,12 +80,13 @@ object TxPublisher {
|
|||
* NB: the parent tx should only be provided when it's being concurrently published, it's unnecessary when it is
|
||||
* confirmed or when the tx has a relative delay.
|
||||
*
|
||||
* @param amount amount we are claiming with this transaction.
|
||||
* @param fee the fee that we're actually paying: it must be set to the mining fee, unless our peer is paying it (in
|
||||
* which case it must be set to zero here).
|
||||
*/
|
||||
case class PublishFinalTx(tx: Transaction, input: OutPoint, desc: String, fee: Satoshi, parentTx_opt: Option[TxId]) extends PublishTx
|
||||
case class PublishFinalTx(tx: Transaction, input: OutPoint, amount: Satoshi, desc: String, fee: Satoshi, parentTx_opt: Option[TxId]) extends PublishTx
|
||||
object PublishFinalTx {
|
||||
def apply(txInfo: TransactionWithInputInfo, fee: Satoshi, parentTx_opt: Option[TxId]): PublishFinalTx = PublishFinalTx(txInfo.tx, txInfo.input.outPoint, txInfo.desc, fee, parentTx_opt)
|
||||
def apply(txInfo: TransactionWithInputInfo, fee: Satoshi, parentTx_opt: Option[TxId]): PublishFinalTx = PublishFinalTx(txInfo.tx, txInfo.input.outPoint, txInfo.amountIn, txInfo.desc, fee, parentTx_opt)
|
||||
}
|
||||
/** Publish an unsigned transaction that can be RBF-ed. */
|
||||
case class PublishReplaceableTx(txInfo: ReplaceableTransactionWithInputInfo, commitment: FullCommitment) extends PublishTx {
|
||||
|
|
|
@ -282,6 +282,7 @@ private class OpenChannelInterceptor(peer: ActorRef[Any],
|
|||
case _: DATA_NORMAL => false
|
||||
case _: DATA_SHUTDOWN => true
|
||||
case _: DATA_NEGOTIATING => true
|
||||
case _: DATA_NEGOTIATING_SIMPLE => true
|
||||
case _: DATA_CLOSING => true
|
||||
case _: DATA_WAIT_FOR_REMOTE_PUBLISH_FUTURE_COMMITMENT => true
|
||||
}
|
||||
|
|
|
@ -198,6 +198,7 @@ object PeerReadyNotifier {
|
|||
case channel.NORMAL => true
|
||||
case channel.SHUTDOWN => true
|
||||
case channel.NEGOTIATING => true
|
||||
case channel.NEGOTIATING_SIMPLE => true
|
||||
case channel.CLOSING => true
|
||||
case channel.CLOSED => true
|
||||
case channel.WAIT_FOR_REMOTE_PUBLISH_FUTURE_COMMITMENT => true
|
||||
|
|
|
@ -656,6 +656,7 @@ object CustomTypeHints {
|
|||
classOf[DATA_NORMAL],
|
||||
classOf[DATA_SHUTDOWN],
|
||||
classOf[DATA_NEGOTIATING],
|
||||
classOf[DATA_NEGOTIATING_SIMPLE],
|
||||
classOf[DATA_CLOSING],
|
||||
classOf[DATA_WAIT_FOR_REMOTE_PUBLISH_FUTURE_COMMITMENT]
|
||||
), typeHintFieldName = "type")
|
||||
|
|
|
@ -30,7 +30,7 @@ import fr.acinq.eclair.wire.protocol.UpdateAddHtlc
|
|||
import scodec.bits.ByteVector
|
||||
|
||||
import java.nio.ByteOrder
|
||||
import scala.util.Try
|
||||
import scala.util.{Success, Try}
|
||||
|
||||
/**
|
||||
* Created by PM on 15/12/2016.
|
||||
|
@ -271,6 +271,27 @@ object Transactions {
|
|||
*/
|
||||
def fee2rate(fee: Satoshi, weight: Int): FeeratePerKw = FeeratePerKw((fee * 1000L) / weight)
|
||||
|
||||
/** As defined in https://github.com/lightning/bolts/blob/master/03-transactions.md#dust-limits */
|
||||
def dustLimit(scriptPubKey: ByteVector): Satoshi = {
|
||||
Try(Script.parse(scriptPubKey)) match {
|
||||
case Success(OP_DUP :: OP_HASH160 :: OP_PUSHDATA(pubkeyHash, _) :: OP_EQUALVERIFY :: OP_CHECKSIG :: Nil) if pubkeyHash.size == 20 => 546.sat
|
||||
case Success(OP_HASH160 :: OP_PUSHDATA(scriptHash, _) :: OP_EQUAL :: Nil) if scriptHash.size == 20 => 540.sat
|
||||
case Success(OP_0 :: OP_PUSHDATA(pubkeyHash, _) :: Nil) if pubkeyHash.size == 20 => 294.sat
|
||||
case Success(OP_0 :: OP_PUSHDATA(scriptHash, _) :: Nil) if scriptHash.size == 32 => 330.sat
|
||||
case Success((OP_1 | OP_2 | OP_3 | OP_4 | OP_5 | OP_6 | OP_7 | OP_8 | OP_9 | OP_10 | OP_11 | OP_12 | OP_13 | OP_14 | OP_15 | OP_16) :: OP_PUSHDATA(program, _) :: Nil) if 2 <= program.length && program.length <= 40 => 354.sat
|
||||
case Success(OP_RETURN :: _) => 0.sat // OP_RETURN is never dust
|
||||
case _ => 546.sat
|
||||
}
|
||||
}
|
||||
|
||||
/** When an output is using OP_RETURN, we usually want to make sure its amount is 0, otherwise bitcoind won't accept it. */
|
||||
def isOpReturn(scriptPubKey: ByteVector): Boolean = {
|
||||
Try(Script.parse(scriptPubKey)) match {
|
||||
case Success(OP_RETURN :: _) => true
|
||||
case _ => false
|
||||
}
|
||||
}
|
||||
|
||||
/** Offered HTLCs below this amount will be trimmed. */
|
||||
def offeredHtlcTrimThreshold(dustLimit: Satoshi, spec: CommitmentSpec, commitmentFormat: CommitmentFormat): Satoshi =
|
||||
dustLimit + weight2fee(spec.htlcTxFeerate(commitmentFormat), commitmentFormat.htlcTimeoutWeight)
|
||||
|
@ -854,6 +875,77 @@ object Transactions {
|
|||
ClosingTx(commitTxInput, tx, toLocalOutput)
|
||||
}
|
||||
|
||||
// @formatter:off
|
||||
/** We always create multiple versions of each closing transaction, where fees are either paid by us or by our peer. */
|
||||
sealed trait SimpleClosingTxFee
|
||||
object SimpleClosingTxFee {
|
||||
case class PaidByUs(fee: Satoshi) extends SimpleClosingTxFee
|
||||
case class PaidByThem(fee: Satoshi) extends SimpleClosingTxFee
|
||||
}
|
||||
// @formatter:on
|
||||
|
||||
/** Each closing attempt can result in multiple potential closing transactions, depending on which outputs are included. */
|
||||
case class ClosingTxs(localAndRemote_opt: Option[ClosingTx], localOnly_opt: Option[ClosingTx], remoteOnly_opt: Option[ClosingTx]) {
|
||||
/** Preferred closing transaction for this closing attempt. */
|
||||
val preferred_opt: Option[ClosingTx] = localAndRemote_opt.orElse(localOnly_opt).orElse(remoteOnly_opt)
|
||||
val all: Seq[ClosingTx] = Seq(localAndRemote_opt, localOnly_opt, remoteOnly_opt).flatten
|
||||
|
||||
override def toString: String = s"localAndRemote=${localAndRemote_opt.map(_.tx.toString()).getOrElse("n/a")}, localOnly=${localOnly_opt.map(_.tx.toString()).getOrElse("n/a")}, remoteOnly=${remoteOnly_opt.map(_.tx.toString()).getOrElse("n/a")}"
|
||||
}
|
||||
|
||||
def makeSimpleClosingTxs(input: InputInfo, spec: CommitmentSpec, fee: SimpleClosingTxFee, lockTime: Long, localScriptPubKey: ByteVector, remoteScriptPubKey: ByteVector): ClosingTxs = {
|
||||
require(spec.htlcs.isEmpty, "there shouldn't be any pending htlcs")
|
||||
|
||||
val txNoOutput = Transaction(2, Seq(TxIn(input.outPoint, ByteVector.empty, sequence = 0xFFFFFFFDL)), Nil, lockTime)
|
||||
|
||||
// We compute the remaining balance for each side after paying the closing fees.
|
||||
// This lets us decide whether outputs can be included in the closing transaction or not.
|
||||
val (toLocalAmount, toRemoteAmount) = fee match {
|
||||
case SimpleClosingTxFee.PaidByUs(fee) => (spec.toLocal.truncateToSatoshi - fee, spec.toRemote.truncateToSatoshi)
|
||||
case SimpleClosingTxFee.PaidByThem(fee) => (spec.toLocal.truncateToSatoshi, spec.toRemote.truncateToSatoshi - fee)
|
||||
}
|
||||
|
||||
// An OP_RETURN script may be provided, but only when burning all of the peer's balance to fees.
|
||||
val toLocalOutput_opt = if (toLocalAmount >= dustLimit(localScriptPubKey)) {
|
||||
val amount = if (isOpReturn(localScriptPubKey)) 0.sat else toLocalAmount
|
||||
Some(TxOut(amount, localScriptPubKey))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
val toRemoteOutput_opt = if (toRemoteAmount >= dustLimit(remoteScriptPubKey)) {
|
||||
val amount = if (isOpReturn(remoteScriptPubKey)) 0.sat else toRemoteAmount
|
||||
Some(TxOut(amount, remoteScriptPubKey))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
// We may create multiple closing transactions based on which outputs may be included.
|
||||
(toLocalOutput_opt, toRemoteOutput_opt) match {
|
||||
case (Some(toLocalOutput), Some(toRemoteOutput)) =>
|
||||
val txLocalAndRemote = LexicographicalOrdering.sort(txNoOutput.copy(txOut = Seq(toLocalOutput, toRemoteOutput)))
|
||||
val Right(toLocalOutputInfo) = findPubKeyScriptIndex(txLocalAndRemote, localScriptPubKey).map(index => OutputInfo(index, toLocalOutput.amount, localScriptPubKey))
|
||||
ClosingTxs(
|
||||
localAndRemote_opt = Some(ClosingTx(input, txLocalAndRemote, Some(toLocalOutputInfo))),
|
||||
// We also provide a version of the transaction without the remote output, which they may want to omit if not economical to spend.
|
||||
localOnly_opt = Some(ClosingTx(input, txNoOutput.copy(txOut = Seq(toLocalOutput)), Some(OutputInfo(0, toLocalOutput.amount, localScriptPubKey)))),
|
||||
remoteOnly_opt = None
|
||||
)
|
||||
case (Some(toLocalOutput), None) =>
|
||||
ClosingTxs(
|
||||
localAndRemote_opt = None,
|
||||
localOnly_opt = Some(ClosingTx(input, txNoOutput.copy(txOut = Seq(toLocalOutput)), Some(OutputInfo(0, toLocalOutput.amount, localScriptPubKey)))),
|
||||
remoteOnly_opt = None
|
||||
)
|
||||
case (None, Some(toRemoteOutput)) =>
|
||||
ClosingTxs(
|
||||
localAndRemote_opt = None,
|
||||
localOnly_opt = None,
|
||||
remoteOnly_opt = Some(ClosingTx(input, txNoOutput.copy(txOut = Seq(toRemoteOutput)), None))
|
||||
)
|
||||
case (None, None) => ClosingTxs(None, None, None)
|
||||
}
|
||||
}
|
||||
|
||||
def findPubKeyScriptIndex(tx: Transaction, pubkeyScript: ByteVector): Either[TxGenerationSkipped, Int] = {
|
||||
val outputIndex = tx.txOut.indexWhere(_.publicKeyScript == pubkeyScript)
|
||||
if (outputIndex >= 0) {
|
||||
|
|
|
@ -780,6 +780,19 @@ private[channel] object ChannelCodecs4 {
|
|||
("closingTxProposed" | listOfN(uint16, listOfN(uint16, lengthDelimited(closingTxProposedCodec)))) ::
|
||||
("bestUnpublishedClosingTx_opt" | optional(bool8, closingTxCodec))).as[DATA_NEGOTIATING]
|
||||
|
||||
private val closingTxsCodec: Codec[ClosingTxs] = (
|
||||
("localAndRemote_opt" | optional(bool8, closingTxCodec)) ::
|
||||
("localOnly_opt" | optional(bool8, closingTxCodec)) ::
|
||||
("remoteOnly_opt" | optional(bool8, closingTxCodec))).as[ClosingTxs]
|
||||
|
||||
val DATA_NEGOTIATING_SIMPLE_17_Codec: Codec[DATA_NEGOTIATING_SIMPLE] = (
|
||||
("commitments" | commitmentsCodec) ::
|
||||
("lastClosingFeerate" | feeratePerKw) ::
|
||||
("localScriptPubKey" | varsizebinarydata) ::
|
||||
("remoteScriptPubKey" | varsizebinarydata) ::
|
||||
("proposedClosingTxs" | listOfN(uint16, closingTxsCodec)) ::
|
||||
("publishedClosingTxs" | listOfN(uint16, closingTxCodec))).as[DATA_NEGOTIATING_SIMPLE]
|
||||
|
||||
val DATA_CLOSING_07_Codec: Codec[DATA_CLOSING] = (
|
||||
("commitments" | commitmentsCodecWithoutFirstRemoteCommitIndex) ::
|
||||
("waitingSince" | blockHeight) ::
|
||||
|
@ -815,6 +828,7 @@ private[channel] object ChannelCodecs4 {
|
|||
|
||||
// Order matters!
|
||||
val channelDataCodec: Codec[PersistentChannelData] = discriminated[PersistentChannelData].by(uint16)
|
||||
.typecase(0x17, Codecs.DATA_NEGOTIATING_SIMPLE_17_Codec)
|
||||
.typecase(0x16, Codecs.DATA_WAIT_FOR_DUAL_FUNDING_READY_16_Codec)
|
||||
.typecase(0x15, Codecs.DATA_WAIT_FOR_CHANNEL_READY_15_Codec)
|
||||
.typecase(0x14, Codecs.DATA_NORMAL_14_Codec)
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
package fr.acinq.eclair.wire.protocol
|
||||
|
||||
import fr.acinq.bitcoin.scalacompat.{Satoshi, TxId}
|
||||
import fr.acinq.bitcoin.scalacompat.{ByteVector64, Satoshi, TxId}
|
||||
import fr.acinq.eclair.channel.{ChannelType, ChannelTypes}
|
||||
import fr.acinq.eclair.wire.protocol.CommonCodecs._
|
||||
import fr.acinq.eclair.wire.protocol.TlvCodecs.{tlvField, tlvStream, tmillisatoshi}
|
||||
|
@ -270,3 +270,23 @@ object ClosingSignedTlv {
|
|||
)
|
||||
|
||||
}
|
||||
|
||||
sealed trait ClosingTlv extends Tlv
|
||||
|
||||
object ClosingTlv {
|
||||
/** Signature for a closing transaction containing only the closer's output. */
|
||||
case class CloserOutputOnly(sig: ByteVector64) extends ClosingTlv
|
||||
|
||||
/** Signature for a closing transaction containing only the closee's output. */
|
||||
case class CloseeOutputOnly(sig: ByteVector64) extends ClosingTlv
|
||||
|
||||
/** Signature for a closing transaction containing the closer and closee's outputs. */
|
||||
case class CloserAndCloseeOutputs(sig: ByteVector64) extends ClosingTlv
|
||||
|
||||
val closingTlvCodec: Codec[TlvStream[ClosingTlv]] = tlvStream(discriminated[ClosingTlv].by(varint)
|
||||
.typecase(UInt64(1), tlvField(bytes64.as[CloserOutputOnly]))
|
||||
.typecase(UInt64(2), tlvField(bytes64.as[CloseeOutputOnly]))
|
||||
.typecase(UInt64(3), tlvField(bytes64.as[CloserAndCloseeOutputs]))
|
||||
)
|
||||
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import fr.acinq.eclair.wire.protocol.CommonCodecs._
|
|||
import fr.acinq.eclair.{Features, InitFeature, KamonExt}
|
||||
import scodec.bits.{BinStringSyntax, BitVector, ByteVector}
|
||||
import scodec.codecs._
|
||||
import scodec.{Attempt, Codec, Err}
|
||||
import scodec.{Attempt, Codec}
|
||||
|
||||
/**
|
||||
* Created by PM on 15/11/2016.
|
||||
|
@ -227,6 +227,22 @@ object LightningMessageCodecs {
|
|||
("signature" | bytes64) ::
|
||||
("tlvStream" | ClosingSignedTlv.closingSignedTlvCodec)).as[ClosingSigned]
|
||||
|
||||
val closingCompleteCodec: Codec[ClosingComplete] = (
|
||||
("channelId" | bytes32) ::
|
||||
("closerScriptPubKey" | varsizebinarydata) ::
|
||||
("closeeScriptPubKey" | varsizebinarydata) ::
|
||||
("fees" | satoshi) ::
|
||||
("lockTime" | uint32) ::
|
||||
("tlvStream" | ClosingTlv.closingTlvCodec)).as[ClosingComplete]
|
||||
|
||||
val closingSigCodec: Codec[ClosingSig] = (
|
||||
("channelId" | bytes32) ::
|
||||
("closerScriptPubKey" | varsizebinarydata) ::
|
||||
("closeeScriptPubKey" | varsizebinarydata) ::
|
||||
("fees" | satoshi) ::
|
||||
("lockTime" | uint32) ::
|
||||
("tlvStream" | ClosingTlv.closingTlvCodec)).as[ClosingSig]
|
||||
|
||||
val updateAddHtlcCodec: Codec[UpdateAddHtlc] = (
|
||||
("channelId" | bytes32) ::
|
||||
("id" | uint64overflow) ::
|
||||
|
@ -497,6 +513,8 @@ object LightningMessageCodecs {
|
|||
.typecase(36, channelReadyCodec)
|
||||
.typecase(38, shutdownCodec)
|
||||
.typecase(39, closingSignedCodec)
|
||||
.typecase(40, closingCompleteCodec)
|
||||
.typecase(41, closingSigCodec)
|
||||
.typecase(64, openDualFundedChannelCodec)
|
||||
.typecase(65, acceptDualFundedChannelCodec)
|
||||
.typecase(66, txAddInputCodec)
|
||||
|
|
|
@ -362,6 +362,18 @@ case class ClosingSigned(channelId: ByteVector32,
|
|||
val feeRange_opt = tlvStream.get[ClosingSignedTlv.FeeRange]
|
||||
}
|
||||
|
||||
case class ClosingComplete(channelId: ByteVector32, closerScriptPubKey: ByteVector, closeeScriptPubKey: ByteVector, fees: Satoshi, lockTime: Long, tlvStream: TlvStream[ClosingTlv] = TlvStream.empty) extends ChannelMessage with HasChannelId {
|
||||
val closerOutputOnlySig_opt: Option[ByteVector64] = tlvStream.get[ClosingTlv.CloserOutputOnly].map(_.sig)
|
||||
val closeeOutputOnlySig_opt: Option[ByteVector64] = tlvStream.get[ClosingTlv.CloseeOutputOnly].map(_.sig)
|
||||
val closerAndCloseeOutputsSig_opt: Option[ByteVector64] = tlvStream.get[ClosingTlv.CloserAndCloseeOutputs].map(_.sig)
|
||||
}
|
||||
|
||||
case class ClosingSig(channelId: ByteVector32, closerScriptPubKey: ByteVector, closeeScriptPubKey: ByteVector, fees: Satoshi, lockTime: Long, tlvStream: TlvStream[ClosingTlv] = TlvStream.empty) extends ChannelMessage with HasChannelId {
|
||||
val closerOutputOnlySig_opt: Option[ByteVector64] = tlvStream.get[ClosingTlv.CloserOutputOnly].map(_.sig)
|
||||
val closeeOutputOnlySig_opt: Option[ByteVector64] = tlvStream.get[ClosingTlv.CloseeOutputOnly].map(_.sig)
|
||||
val closerAndCloseeOutputsSig_opt: Option[ByteVector64] = tlvStream.get[ClosingTlv.CloserAndCloseeOutputs].map(_.sig)
|
||||
}
|
||||
|
||||
case class UpdateAddHtlc(channelId: ByteVector32,
|
||||
id: Long,
|
||||
amountMsat: MilliSatoshi,
|
||||
|
|
|
@ -130,8 +130,7 @@ object TestConstants {
|
|||
scanPreviousBlocksDepth = 3,
|
||||
maxChannelSpentRescanBlocks = 144,
|
||||
htlcMinimum = 0 msat,
|
||||
minDepthFunding = 6,
|
||||
minDepthClosing = 3,
|
||||
minDepth = 6,
|
||||
toRemoteDelay = CltvExpiryDelta(144),
|
||||
maxToLocalDelay = CltvExpiryDelta(1000),
|
||||
reserveToFundingRatio = 0.01, // note: not used (overridden below)
|
||||
|
@ -313,8 +312,7 @@ object TestConstants {
|
|||
scanPreviousBlocksDepth = 3,
|
||||
maxChannelSpentRescanBlocks = 144,
|
||||
htlcMinimum = 1000 msat,
|
||||
minDepthFunding = 3,
|
||||
minDepthClosing = 3,
|
||||
minDepth = 3,
|
||||
toRemoteDelay = CltvExpiryDelta(144),
|
||||
maxToLocalDelay = CltvExpiryDelta(1000),
|
||||
reserveToFundingRatio = 0.01, // note: not used (overridden below)
|
||||
|
|
|
@ -79,11 +79,12 @@ object TestDatabases {
|
|||
case d: DATA_WAIT_FOR_REMOTE_PUBLISH_FUTURE_COMMITMENT => d.copy(commitments = freeze2(d.commitments))
|
||||
case d: DATA_NORMAL => d.copy(commitments = freeze2(d.commitments))
|
||||
.modify(_.spliceStatus).using {
|
||||
case s: SpliceStatus.SpliceWaitingForSigs => s
|
||||
case _ => SpliceStatus.NoSplice
|
||||
}
|
||||
case s: SpliceStatus.SpliceWaitingForSigs => s
|
||||
case _ => SpliceStatus.NoSplice
|
||||
}
|
||||
case d: DATA_CLOSING => d.copy(commitments = freeze2(d.commitments))
|
||||
case d: DATA_NEGOTIATING => d.copy(commitments = freeze2(d.commitments))
|
||||
case d: DATA_NEGOTIATING_SIMPLE => d.copy(commitments = freeze2(d.commitments))
|
||||
case d: DATA_SHUTDOWN => d.copy(commitments = freeze2(d.commitments))
|
||||
}
|
||||
|
||||
|
@ -132,6 +133,7 @@ object TestDatabases {
|
|||
}
|
||||
|
||||
object TestPgDatabases {
|
||||
|
||||
import _root_.io.zonky.test.db.postgres.embedded.EmbeddedPostgres
|
||||
|
||||
/** single instance */
|
||||
|
|
|
@ -198,10 +198,10 @@ class CheckBalanceSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
))
|
||||
|
||||
alice2blockchain.expectMsgType[PublishFinalTx] // claim-main
|
||||
val htlcTx1 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
val htlcTx2 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
val htlcTx3 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
val htlcTx4 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
val htlcTx1 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
val htlcTx2 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
val htlcTx3 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
val htlcTx4 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
alice2blockchain.expectMsgType[WatchTxConfirmed] // commit tx
|
||||
alice2blockchain.expectMsgType[WatchTxConfirmed] // main-delayed
|
||||
alice2blockchain.expectMsgType[WatchOutputSpent] // htlc 1
|
||||
|
@ -212,11 +212,11 @@ class CheckBalanceSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
|
||||
// 3rd-stage txs are published when htlc-timeout txs confirm
|
||||
val claimHtlcDelayedTxs = Seq(htlcTx1, htlcTx2, htlcTx3, htlcTx4).map { htlcTimeoutTx =>
|
||||
alice ! WatchOutputSpentTriggered(htlcTimeoutTx)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == htlcTimeoutTx.txid)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(2701), 3, htlcTimeoutTx)
|
||||
val claimHtlcDelayedTx = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == claimHtlcDelayedTx.txid)
|
||||
alice ! WatchOutputSpentTriggered(htlcTimeoutTx.amount, htlcTimeoutTx.tx)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == htlcTimeoutTx.tx.txid)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(2701), 3, htlcTimeoutTx.tx)
|
||||
val claimHtlcDelayedTx = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == claimHtlcDelayedTx.tx.txid)
|
||||
claimHtlcDelayedTx
|
||||
}
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get.claimHtlcDelayedTxs.length == 4)
|
||||
|
@ -224,7 +224,7 @@ class CheckBalanceSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
assert(CheckBalance.computeLocalCloseBalance(commitments.changes, LocalClose(commitments.active.last.localCommit, alice.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get), commitments.originChannels, knownPreimages) ==
|
||||
PossiblyPublishedMainAndHtlcBalance(
|
||||
toLocal = Map(OutPoint(localCommitPublished.claimMainDelayedOutputTx.get.tx.txid, 0) -> localCommitPublished.claimMainDelayedOutputTx.get.tx.txOut.head.amount),
|
||||
htlcs = claimHtlcDelayedTxs.map(claimTx => OutPoint(claimTx.txid, 0) -> claimTx.txOut.head.amount.toBtc).toMap,
|
||||
htlcs = claimHtlcDelayedTxs.map(claimTx => OutPoint(claimTx.tx.txid, 0) -> claimTx.tx.txOut.head.amount.toBtc).toMap,
|
||||
htlcsUnpublished = 0.sat
|
||||
))
|
||||
}
|
||||
|
|
|
@ -335,12 +335,12 @@ class ZmqWatcherSpec extends TestKitBaseClass with AnyFunSuiteLike with Bitcoind
|
|||
watcher ! StopWatching(probe.ref)
|
||||
|
||||
// We should still find tx2 if the provided hint is wrong
|
||||
watcher ! WatchOutputSpent(probe.ref, tx1.txid, 0, Set(randomTxId()))
|
||||
watcher ! WatchOutputSpent(probe.ref, tx1.txid, 0, tx1.txOut(0).amount, Set(randomTxId()))
|
||||
probe.fishForMessage() { case m: WatchOutputSpentTriggered => m.spendingTx.txid == tx2.txid }
|
||||
watcher ! StopWatching(probe.ref)
|
||||
|
||||
// We should find txs that have already been confirmed
|
||||
watcher ! WatchOutputSpent(probe.ref, tx.txid, outputIndex, Set.empty)
|
||||
watcher ! WatchOutputSpent(probe.ref, tx.txid, outputIndex, tx.txOut(outputIndex).amount, Set.empty)
|
||||
probe.fishForMessage() { case m: WatchOutputSpentTriggered => m.spendingTx.txid == tx1.txid }
|
||||
watcher ! StopWatching(probe.ref)
|
||||
|
||||
|
@ -500,16 +500,16 @@ class ZmqWatcherSpec extends TestKitBaseClass with AnyFunSuiteLike with Bitcoind
|
|||
watcher ! WatchFundingConfirmed(actor1.ref, txid, 2)
|
||||
watcher ! WatchFundingConfirmed(actor1.ref, txid, 3)
|
||||
watcher ! WatchFundingConfirmed(actor1.ref, TxId(txid.value.reverse), 3)
|
||||
watcher ! WatchOutputSpent(actor1.ref, txid, 0, Set.empty)
|
||||
watcher ! WatchOutputSpent(actor1.ref, txid, 1, Set.empty)
|
||||
watcher ! WatchOutputSpent(actor1.ref, txid, 0, 0 sat, Set.empty)
|
||||
watcher ! WatchOutputSpent(actor1.ref, txid, 1, 0 sat, Set.empty)
|
||||
watcher ! ListWatches(actor1.ref)
|
||||
val watches1 = actor1.expectMsgType[Set[Watch[_]]]
|
||||
assert(watches1.size == 5)
|
||||
|
||||
watcher ! WatchFundingConfirmed(actor2.ref, txid, 2)
|
||||
watcher ! WatchFundingConfirmed(actor2.ref, TxId(txid.value.reverse), 3)
|
||||
watcher ! WatchOutputSpent(actor2.ref, txid, 0, Set.empty)
|
||||
watcher ! WatchOutputSpent(actor2.ref, txid, 1, Set.empty)
|
||||
watcher ! WatchOutputSpent(actor2.ref, txid, 0, 0 sat, Set.empty)
|
||||
watcher ! WatchOutputSpent(actor2.ref, txid, 1, 0 sat, Set.empty)
|
||||
watcher ! ListWatches(actor2.ref)
|
||||
val watches2 = actor2.expectMsgType[Set[Watch[_]]]
|
||||
assert(watches2.size == 9)
|
||||
|
|
|
@ -225,6 +225,7 @@ class HelpersSpec extends TestKitBaseClass with AnyFunSuiteLike with ChannelStat
|
|||
TxOut(294 sat, OP_0 :: OP_PUSHDATA(hex"0000000000000000000000000000000000000000") :: Nil),
|
||||
TxOut(330 sat, OP_0 :: OP_PUSHDATA(hex"0000000000000000000000000000000000000000000000000000000000000000") :: Nil),
|
||||
TxOut(354 sat, OP_3 :: OP_PUSHDATA(hex"0000000000") :: Nil),
|
||||
TxOut(0 sat, OP_RETURN :: OP_PUSHDATA(hex"deadbeef") :: Nil),
|
||||
)
|
||||
|
||||
def toClosingTx(txOut: Seq[TxOut]): ClosingTx = {
|
||||
|
|
|
@ -79,14 +79,14 @@ class FinalTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
createBlocks(5, probe)
|
||||
|
||||
val tx = createSpendP2WPKH(parentTx, priv, priv.publicKey, 2_500 sat, sequence = 5, lockTime = 0)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, "tx-time-locks", 0 sat, None)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, 125_000 sat, "tx-time-locks", 0 sat, None)
|
||||
publisher ! Publish(probe.ref, cmd)
|
||||
|
||||
// Time locks are satisfied, the transaction should be published:
|
||||
waitTxInMempool(bitcoinClient, tx.txid, probe)
|
||||
createBlocks(1, probe)
|
||||
probe.expectNoMessage(100 millis) // we don't notify the sender until min depth has been reached
|
||||
createBlocks(3, probe)
|
||||
createBlocks(TestConstants.Alice.nodeParams.channelConf.minDepth, probe)
|
||||
probe.expectMsg(TxConfirmed(cmd, tx))
|
||||
|
||||
// The actor should stop when requested:
|
||||
|
@ -103,7 +103,7 @@ class FinalTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
val ancestorTx = sendToAddress(address, 125_000 sat, probe)
|
||||
val parentTx = createSpendP2WPKH(ancestorTx, priv, priv.publicKey, 2_500 sat, 0, 0)
|
||||
val tx = createSpendP2WPKH(parentTx, priv, priv.publicKey, 2_000 sat, 0, 0)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, "tx-with-parent", 10 sat, Some(parentTx.txid))
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, 125_000 sat, "tx-with-parent", 10 sat, Some(parentTx.txid))
|
||||
publisher ! Publish(probe.ref, cmd)
|
||||
|
||||
// Since the parent is not published yet, we can't publish the child tx either:
|
||||
|
@ -114,7 +114,7 @@ class FinalTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
probe.expectMsg(parentTx.txid)
|
||||
waitTxInMempool(bitcoinClient, tx.txid, probe)
|
||||
|
||||
createBlocks(5, probe)
|
||||
createBlocks(TestConstants.Alice.nodeParams.channelConf.minDepth, probe)
|
||||
probe.expectMsg(TxConfirmed(cmd, tx))
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ class FinalTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
val (priv, address) = createExternalAddress()
|
||||
val parentTx = sendToAddress(address, 125_000 sat, probe)
|
||||
val tx1 = createSpendP2WPKH(parentTx, priv, priv.publicKey, 2_500 sat, 0, 0)
|
||||
val cmd = PublishFinalTx(tx1, tx1.txIn.head.outPoint, "tx-time-locks", 10 sat, None)
|
||||
val cmd = PublishFinalTx(tx1, tx1.txIn.head.outPoint, 125_000 sat, "tx-time-locks", 10 sat, None)
|
||||
publisher ! Publish(probe.ref, cmd)
|
||||
waitTxInMempool(bitcoinClient, tx1.txid, probe)
|
||||
|
||||
|
@ -150,7 +150,7 @@ class FinalTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
import f._
|
||||
|
||||
val tx = sendToAddress(getNewAddress(probe), 125_000 sat, probe)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, "final-tx", 10 sat, None)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, 125_000 sat, "final-tx", 10 sat, None)
|
||||
publisher ! Publish(probe.ref, cmd)
|
||||
|
||||
probe.watch(publisher.toClassic)
|
||||
|
|
|
@ -79,7 +79,7 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
import f._
|
||||
|
||||
val tx = createSpendP2WPKH(parentTx, priv, priv.publicKey, 1_000 sat, 0, 0)
|
||||
monitor ! Publish(probe.ref, tx, tx.txIn.head.outPoint, "test-tx", 50 sat)
|
||||
monitor ! Publish(probe.ref, tx, tx.txIn.head.outPoint, 3, "test-tx", 50 sat)
|
||||
assert(eventListener.expectMsgType[TransactionPublished].tx == tx)
|
||||
waitTxInMempool(bitcoinClient, tx.txid, probe)
|
||||
|
||||
|
@ -97,7 +97,7 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
generateBlocks(1)
|
||||
|
||||
val tx = createSpendP2WPKH(parentTx, priv, priv.publicKey, 1_000 sat, 0, 0)
|
||||
monitor ! Publish(probe.ref, tx, tx.txIn.head.outPoint, "test-tx", 50 sat)
|
||||
monitor ! Publish(probe.ref, tx, tx.txIn.head.outPoint, 6, "test-tx", 50 sat)
|
||||
assert(eventListener.expectMsgType[TransactionPublished].tx == tx)
|
||||
waitTxInMempool(bitcoinClient, tx.txid, probe)
|
||||
|
||||
|
@ -106,13 +106,17 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
probe.expectMsg(TxInMempool(tx.txid, currentBlockHeight(), parentConfirmed = true))
|
||||
probe.expectNoMessage(100 millis)
|
||||
|
||||
assert(TestConstants.Alice.nodeParams.channelConf.minDepthClosing > 1)
|
||||
generateBlocks(1)
|
||||
monitor ! WrappedCurrentBlockHeight(currentBlockHeight())
|
||||
probe.expectMsg(TxRecentlyConfirmed(tx.txid, 1))
|
||||
probe.expectNoMessage(100 millis) // we wait for more than one confirmation to protect against reorgs
|
||||
probe.expectNoMessage(100 millis) // we wait for more confirmations to protect against reorgs
|
||||
|
||||
generateBlocks(TestConstants.Alice.nodeParams.channelConf.minDepthClosing - 1)
|
||||
generateBlocks(4)
|
||||
monitor ! WrappedCurrentBlockHeight(currentBlockHeight())
|
||||
probe.expectMsg(TxRecentlyConfirmed(tx.txid, 5))
|
||||
probe.expectNoMessage(100 millis) // we wait for more confirmations to protect against reorgs
|
||||
|
||||
generateBlocks(1)
|
||||
monitor ! WrappedCurrentBlockHeight(currentBlockHeight())
|
||||
probe.expectMsg(TxDeeplyBuried(tx))
|
||||
}
|
||||
|
@ -126,10 +130,10 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
probe.expectMsg(tx1.txid)
|
||||
|
||||
val tx2 = createSpendP2WPKH(parentTx, priv, priv.publicKey, 10_000 sat, 0, 0)
|
||||
monitor ! Publish(probe.ref, tx2, tx2.txIn.head.outPoint, "test-tx", 10 sat)
|
||||
monitor ! Publish(probe.ref, tx2, tx2.txIn.head.outPoint, 3, "test-tx", 10 sat)
|
||||
waitTxInMempool(bitcoinClient, tx2.txid, probe)
|
||||
|
||||
generateBlocks(TestConstants.Alice.nodeParams.channelConf.minDepthClosing)
|
||||
generateBlocks(3)
|
||||
monitor ! WrappedCurrentBlockHeight(currentBlockHeight())
|
||||
probe.expectMsg(TxDeeplyBuried(tx2))
|
||||
}
|
||||
|
@ -143,7 +147,7 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
probe.expectMsg(tx1.txid)
|
||||
|
||||
val tx2 = createSpendP2WPKH(parentTx, priv, priv.publicKey, 7_500 sat, 0, 0)
|
||||
monitor ! Publish(probe.ref, tx2, tx2.txIn.head.outPoint, "test-tx", 25 sat)
|
||||
monitor ! Publish(probe.ref, tx2, tx2.txIn.head.outPoint, 3, "test-tx", 25 sat)
|
||||
probe.expectMsg(TxRejected(tx2.txid, ConflictingTxUnconfirmed))
|
||||
}
|
||||
|
||||
|
@ -157,7 +161,7 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
generateBlocks(1)
|
||||
|
||||
val tx2 = createSpendP2WPKH(parentTx, priv, priv.publicKey, 15_000 sat, 0, 0)
|
||||
monitor ! Publish(probe.ref, tx2, tx2.txIn.head.outPoint, "test-tx", 10 sat)
|
||||
monitor ! Publish(probe.ref, tx2, tx2.txIn.head.outPoint, 3, "test-tx", 10 sat)
|
||||
probe.expectMsg(TxRejected(tx2.txid, ConflictingTxConfirmed))
|
||||
}
|
||||
|
||||
|
@ -167,7 +171,7 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
|
||||
val tx = createSpendP2WPKH(parentTx, priv, priv.publicKey, 5_000 sat, 0, 0)
|
||||
val txUnknownInput = tx.copy(txIn = tx.txIn ++ Seq(TxIn(OutPoint(randomTxId(), 13), Nil, 0)))
|
||||
monitor ! Publish(probe.ref, txUnknownInput, txUnknownInput.txIn.head.outPoint, "test-tx", 10 sat)
|
||||
monitor ! Publish(probe.ref, txUnknownInput, txUnknownInput.txIn.head.outPoint, 3, "test-tx", 10 sat)
|
||||
probe.expectMsg(TxRejected(txUnknownInput.txid, InputGone))
|
||||
}
|
||||
|
||||
|
@ -180,7 +184,7 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
|
||||
val tx = createSpendP2WPKH(parentTx, priv, priv.publicKey, 5_000 sat, 0, 0)
|
||||
val txUnknownInput = tx.copy(txIn = tx.txIn ++ Seq(TxIn(OutPoint(randomTxId(), 13), Nil, 0)))
|
||||
monitor ! Publish(probe.ref, txUnknownInput, txUnknownInput.txIn.head.outPoint, "test-tx", 10 sat)
|
||||
monitor ! Publish(probe.ref, txUnknownInput, txUnknownInput.txIn.head.outPoint, 3, "test-tx", 10 sat)
|
||||
probe.expectMsg(TxRejected(txUnknownInput.txid, InputGone))
|
||||
}
|
||||
|
||||
|
@ -195,7 +199,7 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
generateBlocks(1) // we ensure the wallet input is already spent by a confirmed transaction
|
||||
|
||||
val tx = createSpendManyP2WPKH(Seq(parentTx, walletTx), priv, priv.publicKey, 5_000 sat, 0, 0)
|
||||
monitor ! Publish(probe.ref, tx, tx.txIn.head.outPoint, "test-tx", 10 sat)
|
||||
monitor ! Publish(probe.ref, tx, tx.txIn.head.outPoint, 3, "test-tx", 10 sat)
|
||||
probe.expectMsg(TxRejected(tx.txid, InputGone))
|
||||
}
|
||||
|
||||
|
@ -204,7 +208,7 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
import f._
|
||||
|
||||
val tx1 = createSpendP2WPKH(parentTx, priv, priv.publicKey, 5_000 sat, 0, 0)
|
||||
monitor ! Publish(probe.ref, tx1, tx1.txIn.head.outPoint, "test-tx", 0 sat)
|
||||
monitor ! Publish(probe.ref, tx1, tx1.txIn.head.outPoint, 3, "test-tx", 0 sat)
|
||||
waitTxInMempool(bitcoinClient, tx1.txid, probe)
|
||||
|
||||
val tx2 = createSpendP2WPKH(parentTx, priv, priv.publicKey, 15_000 sat, 0, 0)
|
||||
|
@ -221,7 +225,7 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
import f._
|
||||
|
||||
val tx1 = createSpendP2WPKH(parentTx, priv, priv.publicKey, 5_000 sat, 0, 0)
|
||||
monitor ! Publish(probe.ref, tx1, tx1.txIn.head.outPoint, "test-tx", 10 sat)
|
||||
monitor ! Publish(probe.ref, tx1, tx1.txIn.head.outPoint, 3, "test-tx", 10 sat)
|
||||
waitTxInMempool(bitcoinClient, tx1.txid, probe)
|
||||
|
||||
val tx2 = createSpendP2WPKH(parentTx, priv, priv.publicKey, 15_000 sat, 0, 0)
|
||||
|
@ -244,7 +248,7 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
probe.expectMsg(walletTx.txid)
|
||||
|
||||
val tx = createSpendManyP2WPKH(Seq(parentTx, walletTx), priv, priv.publicKey, 1_000 sat, 0, 0)
|
||||
monitor ! Publish(probe.ref, tx, tx.txIn.head.outPoint, "test-tx", 10 sat)
|
||||
monitor ! Publish(probe.ref, tx, tx.txIn.head.outPoint, 3, "test-tx", 10 sat)
|
||||
waitTxInMempool(bitcoinClient, tx.txid, probe)
|
||||
|
||||
// A transaction replaces our unconfirmed wallet input.
|
||||
|
@ -266,14 +270,14 @@ class MempoolTxMonitorSpec extends TestKitBaseClass with AnyFunSuiteLike with Bi
|
|||
generateBlocks(1)
|
||||
|
||||
val tx = createSpendP2WPKH(parentTx, priv, priv.publicKey, 1_000 sat, 0, 0)
|
||||
monitor ! Publish(probe.ref, tx, tx.txIn.head.outPoint, "test-tx", 15 sat)
|
||||
monitor ! Publish(probe.ref, tx, tx.txIn.head.outPoint, 2, "test-tx", 15 sat)
|
||||
waitTxInMempool(bitcoinClient, tx.txid, probe)
|
||||
val txPublished = eventListener.expectMsgType[TransactionPublished]
|
||||
assert(txPublished.tx == tx)
|
||||
assert(txPublished.miningFee == 15.sat)
|
||||
assert(txPublished.desc == "test-tx")
|
||||
|
||||
generateBlocks(TestConstants.Alice.nodeParams.channelConf.minDepthClosing)
|
||||
generateBlocks(2)
|
||||
monitor ! WrappedCurrentBlockHeight(currentBlockHeight())
|
||||
eventListener.expectMsg(TransactionConfirmed(txPublished.channelId, txPublished.remoteNodeId, tx))
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ import fr.acinq.eclair.transactions.Transactions._
|
|||
import fr.acinq.eclair.wire.protocol.{CommitSig, RevokeAndAck, UpdateFee}
|
||||
import fr.acinq.eclair.{BlockHeight, MilliSatoshi, MilliSatoshiLong, NodeParams, NotificationsLogger, TestConstants, TestKitBaseClass, TimestampSecond, randomKey}
|
||||
import org.scalatest.BeforeAndAfterAll
|
||||
import org.scalatest.Inside.inside
|
||||
import org.scalatest.funsuite.AnyFunSuiteLike
|
||||
import scodec.bits.ByteVector
|
||||
|
||||
|
@ -422,7 +423,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val actualFee = mempoolTxs.map(_.fees).sum
|
||||
assert(targetFee * 0.9 <= actualFee && actualFee <= targetFee * 1.1, s"actualFee=$actualFee targetFee=$targetFee")
|
||||
|
||||
generateBlocks(5)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val result = probe.expectMsgType[TxConfirmed]
|
||||
assert(result.cmd == anchorTx)
|
||||
|
@ -467,7 +468,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val actualFee = mempoolTxs.map(_.fees).sum
|
||||
assert(targetFee * 0.9 <= actualFee && actualFee <= targetFee * 1.1, s"actualFee=$actualFee targetFee=$targetFee")
|
||||
|
||||
generateBlocks(5)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val result = probe.expectMsgType[TxConfirmed]
|
||||
assert(result.cmd == anchorTx)
|
||||
|
@ -509,7 +510,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val actualFee = mempoolTxs.map(_.fees).sum
|
||||
assert(targetFee * 0.9 <= actualFee && actualFee <= targetFee * 1.1, s"actualFee=$actualFee targetFee=$targetFee")
|
||||
|
||||
generateBlocks(5)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val result = probe.expectMsgType[TxConfirmed]
|
||||
assert(result.cmd == anchorTx)
|
||||
|
@ -562,7 +563,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val actualFee = mempoolTxs.map(_.fees).sum
|
||||
assert(targetFee * 0.9 <= actualFee && actualFee <= targetFee * 1.1, s"actualFee=$actualFee targetFee=$targetFee")
|
||||
|
||||
generateBlocks(5)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val result = probe.expectMsgType[TxConfirmed]
|
||||
assert(result.cmd == anchorTx)
|
||||
|
@ -616,7 +617,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val actualFee = mempoolTxs.map(_.fees).sum
|
||||
assert(targetFee * 0.9 <= actualFee && actualFee <= targetFee * 1.1, s"actualFee=$actualFee targetFee=$targetFee")
|
||||
|
||||
generateBlocks(5)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val result = probe.expectMsgType[TxConfirmed]
|
||||
assert(result.cmd == anchorTx)
|
||||
|
@ -869,7 +870,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
})
|
||||
|
||||
// the first publishing attempt succeeds
|
||||
generateBlocks(5)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
assert(probe.expectMsgType[TxConfirmed].cmd == anchorTx)
|
||||
}
|
||||
|
@ -920,27 +921,47 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val htlcTimeout = alice2blockchain.expectMsgType[PublishReplaceableTx]
|
||||
assert(htlcTimeout.txInfo.isInstanceOf[HtlcTimeoutTx])
|
||||
|
||||
// Ensure remote commit tx confirms.
|
||||
// The remote commit tx has a few confirmations, but isn't deeply confirmed yet.
|
||||
val remoteCommitTx = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.fullySignedLocalCommitTx(bob.underlyingActor.nodeParams.channelKeyManager)
|
||||
wallet.publishTransaction(remoteCommitTx.tx).pipeTo(probe.ref)
|
||||
probe.expectMsg(remoteCommitTx.tx.txid)
|
||||
generateBlocks(5)
|
||||
generateBlocks(2)
|
||||
|
||||
// Verify that HTLC transactions immediately fail to publish.
|
||||
// Verify that HTLC transactions aren't published, but are retried in case a reorg makes the local commit confirm.
|
||||
setFeerate(FeeratePerKw(15_000 sat))
|
||||
val htlcSuccessPublisher = createPublisher()
|
||||
htlcSuccessPublisher ! Publish(probe.ref, htlcSuccess)
|
||||
val result1 = probe.expectMsgType[TxRejected]
|
||||
assert(result1.cmd == htlcSuccess)
|
||||
assert(result1.reason == ConflictingTxConfirmed)
|
||||
htlcSuccessPublisher ! Stop
|
||||
val htlcSuccessPublisher1 = createPublisher()
|
||||
htlcSuccessPublisher1 ! Publish(probe.ref, htlcSuccess)
|
||||
inside(probe.expectMsgType[TxRejected]) { result =>
|
||||
assert(result.cmd == htlcSuccess)
|
||||
assert(result.reason == TxSkipped(retryNextBlock = true))
|
||||
}
|
||||
htlcSuccessPublisher1 ! Stop
|
||||
|
||||
val htlcTimeoutPublisher = createPublisher()
|
||||
htlcTimeoutPublisher ! Publish(probe.ref, htlcTimeout)
|
||||
val result2 = probe.expectMsgType[TxRejected]
|
||||
assert(result2.cmd == htlcTimeout)
|
||||
assert(result2.reason == ConflictingTxConfirmed)
|
||||
htlcTimeoutPublisher ! Stop
|
||||
val htlcTimeoutPublisher1 = createPublisher()
|
||||
htlcTimeoutPublisher1 ! Publish(probe.ref, htlcTimeout)
|
||||
inside(probe.expectMsgType[TxRejected]) { result =>
|
||||
assert(result.cmd == htlcTimeout)
|
||||
assert(result.reason == TxSkipped(retryNextBlock = true))
|
||||
}
|
||||
htlcTimeoutPublisher1 ! Stop
|
||||
|
||||
// Once the remote commit is deeply confirmed, we stop trying to publish HTLC transactions.
|
||||
generateBlocks(4)
|
||||
val htlcSuccessPublisher2 = createPublisher()
|
||||
htlcSuccessPublisher2 ! Publish(probe.ref, htlcSuccess)
|
||||
inside(probe.expectMsgType[TxRejected]) { result =>
|
||||
assert(result.cmd == htlcSuccess)
|
||||
assert(result.reason == ConflictingTxConfirmed)
|
||||
}
|
||||
htlcSuccessPublisher2 ! Stop
|
||||
|
||||
val htlcTimeoutPublisher2 = createPublisher()
|
||||
htlcTimeoutPublisher2 ! Publish(probe.ref, htlcTimeout)
|
||||
inside(probe.expectMsgType[TxRejected]) { result =>
|
||||
assert(result.cmd == htlcTimeout)
|
||||
assert(result.reason == ConflictingTxConfirmed)
|
||||
}
|
||||
htlcTimeoutPublisher2 ! Stop
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -976,7 +997,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
assert(nextRemoteCommitTx.tx.txOut.length == 6) // 2 main outputs + 2 anchor outputs + 2 htlcs
|
||||
wallet.publishTransaction(nextRemoteCommitTx.tx).pipeTo(probe.ref)
|
||||
probe.expectMsg(nextRemoteCommitTx.tx.txid)
|
||||
generateBlocks(5)
|
||||
generateBlocks(6)
|
||||
|
||||
// Verify that HTLC transactions immediately fail to publish.
|
||||
setFeerate(FeeratePerKw(15_000 sat))
|
||||
|
@ -1055,9 +1076,9 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val htlcSuccessTx = getMempoolTxs(1).head
|
||||
val htlcSuccessTargetFee = Transactions.weight2fee(targetFeerate, htlcSuccessTx.weight.toInt)
|
||||
assert(htlcSuccessTargetFee * 0.9 <= htlcSuccessTx.fees && htlcSuccessTx.fees <= htlcSuccessTargetFee * 1.2, s"actualFee=${htlcSuccessTx.fees} targetFee=$htlcSuccessTargetFee")
|
||||
assert(htlcSuccessTx.fees <= htlcSuccess.txInfo.input.txOut.amount)
|
||||
assert(htlcSuccessTx.fees <= htlcSuccess.txInfo.amountIn)
|
||||
|
||||
generateBlocks(4)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val htlcSuccessResult = probe.expectMsgType[TxConfirmed]
|
||||
assert(htlcSuccessResult.cmd == htlcSuccess)
|
||||
|
@ -1083,9 +1104,9 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val htlcTimeoutTx = getMempoolTxs(1).head
|
||||
val htlcTimeoutTargetFee = Transactions.weight2fee(targetFeerate, htlcTimeoutTx.weight.toInt)
|
||||
assert(htlcTimeoutTargetFee * 0.9 <= htlcTimeoutTx.fees && htlcTimeoutTx.fees <= htlcTimeoutTargetFee * 1.2, s"actualFee=${htlcTimeoutTx.fees} targetFee=$htlcTimeoutTargetFee")
|
||||
assert(htlcTimeoutTx.fees <= htlcTimeout.txInfo.input.txOut.amount)
|
||||
assert(htlcTimeoutTx.fees <= htlcTimeout.txInfo.amountIn)
|
||||
|
||||
generateBlocks(4)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val htlcTimeoutResult = probe.expectMsgType[TxConfirmed]
|
||||
assert(htlcTimeoutResult.cmd == htlcTimeout)
|
||||
|
@ -1423,7 +1444,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
})
|
||||
|
||||
// the first publishing attempt succeeds
|
||||
generateBlocks(5)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
assert(probe.expectMsgType[TxConfirmed].cmd == htlcSuccess)
|
||||
publisher1 ! Stop
|
||||
|
@ -1472,26 +1493,46 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val claimHtlcSuccess = alice2blockchain.expectMsgType[PublishReplaceableTx]
|
||||
assert(claimHtlcSuccess.txInfo.isInstanceOf[ClaimHtlcSuccessTx])
|
||||
|
||||
// Ensure local commit tx confirms.
|
||||
// The local commit tx has a few confirmations, but isn't deeply confirmed yet.
|
||||
wallet.publishTransaction(localCommitTx.tx).pipeTo(probe.ref)
|
||||
probe.expectMsg(localCommitTx.tx.txid)
|
||||
generateBlocks(5)
|
||||
generateBlocks(3)
|
||||
|
||||
// Verify that Claim-HTLC transactions immediately fail to publish.
|
||||
// Verify that Claim-HTLC transactions aren't published, but are retried in case a reorg makes the remote commit confirm.
|
||||
setFeerate(FeeratePerKw(5_000 sat))
|
||||
val claimHtlcSuccessPublisher = createPublisher()
|
||||
claimHtlcSuccessPublisher ! Publish(probe.ref, claimHtlcSuccess)
|
||||
val result1 = probe.expectMsgType[TxRejected]
|
||||
assert(result1.cmd == claimHtlcSuccess)
|
||||
assert(result1.reason == ConflictingTxConfirmed)
|
||||
claimHtlcSuccessPublisher ! Stop
|
||||
val claimHtlcSuccessPublisher1 = createPublisher()
|
||||
claimHtlcSuccessPublisher1 ! Publish(probe.ref, claimHtlcSuccess)
|
||||
inside(probe.expectMsgType[TxRejected]) { result =>
|
||||
assert(result.cmd == claimHtlcSuccess)
|
||||
assert(result.reason == TxSkipped(retryNextBlock = true))
|
||||
}
|
||||
claimHtlcSuccessPublisher1 ! Stop
|
||||
|
||||
val claimHtlcTimeoutPublisher = createPublisher()
|
||||
claimHtlcTimeoutPublisher ! Publish(probe.ref, claimHtlcTimeout)
|
||||
val result2 = probe.expectMsgType[TxRejected]
|
||||
assert(result2.cmd == claimHtlcTimeout)
|
||||
assert(result2.reason == ConflictingTxConfirmed)
|
||||
claimHtlcTimeoutPublisher ! Stop
|
||||
val claimHtlcTimeoutPublisher1 = createPublisher()
|
||||
claimHtlcTimeoutPublisher1 ! Publish(probe.ref, claimHtlcTimeout)
|
||||
inside(probe.expectMsgType[TxRejected]) { result =>
|
||||
assert(result.cmd == claimHtlcTimeout)
|
||||
assert(result.reason == TxSkipped(retryNextBlock = true))
|
||||
}
|
||||
claimHtlcTimeoutPublisher1 ! Stop
|
||||
|
||||
// Once the local commit is deeply confirmed, we stop trying to publish Claim-HTLC transactions.
|
||||
generateBlocks(3)
|
||||
val claimHtlcSuccessPublisher2 = createPublisher()
|
||||
claimHtlcSuccessPublisher2 ! Publish(probe.ref, claimHtlcSuccess)
|
||||
inside(probe.expectMsgType[TxRejected]) { result =>
|
||||
assert(result.cmd == claimHtlcSuccess)
|
||||
assert(result.reason == ConflictingTxConfirmed)
|
||||
}
|
||||
claimHtlcSuccessPublisher2 ! Stop
|
||||
|
||||
val claimHtlcTimeoutPublisher2 = createPublisher()
|
||||
claimHtlcTimeoutPublisher2 ! Publish(probe.ref, claimHtlcTimeout)
|
||||
inside(probe.expectMsgType[TxRejected]) { result =>
|
||||
assert(result.cmd == claimHtlcTimeout)
|
||||
assert(result.reason == ConflictingTxConfirmed)
|
||||
}
|
||||
claimHtlcTimeoutPublisher2 ! Stop
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1558,7 +1599,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val claimHtlcSuccessTargetFee = Transactions.weight2fee(targetFeerate, claimHtlcSuccessTx.weight.toInt)
|
||||
assert(claimHtlcSuccessTargetFee * 0.9 <= claimHtlcSuccessTx.fees && claimHtlcSuccessTx.fees <= claimHtlcSuccessTargetFee * 1.1, s"actualFee=${claimHtlcSuccessTx.fees} targetFee=$claimHtlcSuccessTargetFee")
|
||||
|
||||
generateBlocks(4)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val claimHtlcSuccessResult = probe.expectMsgType[TxConfirmed]
|
||||
assert(claimHtlcSuccessResult.cmd == claimHtlcSuccess)
|
||||
|
@ -1585,7 +1626,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val claimHtlcTimeoutTargetFee = Transactions.weight2fee(targetFeerate, claimHtlcTimeoutTx.weight.toInt)
|
||||
assert(claimHtlcTimeoutTargetFee * 0.9 <= claimHtlcTimeoutTx.fees && claimHtlcTimeoutTx.fees <= claimHtlcTimeoutTargetFee * 1.1, s"actualFee=${claimHtlcTimeoutTx.fees} targetFee=$claimHtlcTimeoutTargetFee")
|
||||
|
||||
generateBlocks(4)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val claimHtlcTimeoutResult = probe.expectMsgType[TxConfirmed]
|
||||
assert(claimHtlcTimeoutResult.cmd == claimHtlcTimeout)
|
||||
|
@ -1650,7 +1691,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val claimHtlcSuccessTx = getMempoolTxs(1).head
|
||||
val claimHtlcSuccessTargetFee = Transactions.weight2fee(targetFeerate, claimHtlcSuccessTx.weight.toInt)
|
||||
assert(claimHtlcSuccessTargetFee * 0.9 <= claimHtlcSuccessTx.fees && claimHtlcSuccessTx.fees <= claimHtlcSuccessTargetFee * 1.1, s"actualFee=${claimHtlcSuccessTx.fees} targetFee=$claimHtlcSuccessTargetFee")
|
||||
generateBlocks(4)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val claimHtlcSuccessResult = probe.expectMsgType[TxConfirmed]
|
||||
assert(claimHtlcSuccessResult.cmd == claimHtlcSuccess)
|
||||
|
@ -1667,7 +1708,7 @@ class ReplaceableTxPublisherSpec extends TestKitBaseClass with AnyFunSuiteLike w
|
|||
val claimHtlcTimeoutTargetFee = Transactions.weight2fee(targetFeerate, claimHtlcTimeoutTx.weight.toInt)
|
||||
assert(claimHtlcTimeoutTargetFee * 0.9 <= claimHtlcTimeoutTx.fees && claimHtlcTimeoutTx.fees <= claimHtlcTimeoutTargetFee * 1.1, s"actualFee=${claimHtlcTimeoutTx.fees} targetFee=$claimHtlcTimeoutTargetFee")
|
||||
|
||||
generateBlocks(4)
|
||||
generateBlocks(6)
|
||||
system.eventStream.publish(CurrentBlockHeight(currentBlockHeight(probe)))
|
||||
val claimHtlcTimeoutResult = probe.expectMsgType[TxConfirmed]
|
||||
assert(claimHtlcTimeoutResult.cmd == claimHtlcTimeout)
|
||||
|
|
|
@ -74,7 +74,7 @@ class TxPublisherSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike {
|
|||
import f._
|
||||
|
||||
val tx = Transaction(2, TxIn(OutPoint(randomTxId(), 1), Nil, 0) :: Nil, Nil, 0)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, "final-tx", 5 sat, None)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, 100_000 sat, "final-tx", 5 sat, None)
|
||||
txPublisher ! cmd
|
||||
val child = factory.expectMsgType[FinalTxPublisherSpawned].actor
|
||||
assert(child.expectMsgType[FinalTxPublisher.Publish].cmd == cmd)
|
||||
|
@ -85,7 +85,7 @@ class TxPublisherSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike {
|
|||
|
||||
val input = OutPoint(randomTxId(), 1)
|
||||
val tx1 = Transaction(2, TxIn(input, Nil, 0) :: Nil, Nil, 0)
|
||||
val cmd1 = PublishFinalTx(tx1, input, "final-tx", 10 sat, None)
|
||||
val cmd1 = PublishFinalTx(tx1, input, 100_000 sat, "final-tx", 10 sat, None)
|
||||
txPublisher ! cmd1
|
||||
factory.expectMsgType[FinalTxPublisherSpawned]
|
||||
|
||||
|
@ -95,7 +95,7 @@ class TxPublisherSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike {
|
|||
|
||||
// But a different tx spending the same main input is allowed:
|
||||
val tx2 = tx1.copy(txIn = tx1.txIn ++ Seq(TxIn(OutPoint(randomTxId(), 0), Nil, 0)))
|
||||
val cmd2 = PublishFinalTx(tx2, input, "another-final-tx", 0 sat, None)
|
||||
val cmd2 = PublishFinalTx(tx2, input, 100_000 sat, "another-final-tx", 0 sat, None)
|
||||
txPublisher ! cmd2
|
||||
factory.expectMsgType[FinalTxPublisherSpawned]
|
||||
}
|
||||
|
@ -164,13 +164,13 @@ class TxPublisherSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike {
|
|||
|
||||
val input = OutPoint(randomTxId(), 3)
|
||||
val tx1 = Transaction(2, TxIn(input, Nil, 0) :: Nil, Nil, 0)
|
||||
val cmd1 = PublishFinalTx(tx1, input, "final-tx-1", 5 sat, None)
|
||||
val cmd1 = PublishFinalTx(tx1, input, 100_000 sat, "final-tx-1", 5 sat, None)
|
||||
txPublisher ! cmd1
|
||||
val attempt1 = factory.expectMsgType[FinalTxPublisherSpawned].actor
|
||||
attempt1.expectMsgType[FinalTxPublisher.Publish]
|
||||
|
||||
val tx2 = Transaction(2, TxIn(input, Nil, 0) :: TxIn(OutPoint(randomTxId(), 0), Nil, 3) :: Nil, Nil, 0)
|
||||
val cmd2 = PublishFinalTx(tx2, input, "final-tx-2", 15 sat, None)
|
||||
val cmd2 = PublishFinalTx(tx2, input, 100_000 sat, "final-tx-2", 15 sat, None)
|
||||
txPublisher ! cmd2
|
||||
val attempt2 = factory.expectMsgType[FinalTxPublisherSpawned].actor
|
||||
attempt2.expectMsgType[FinalTxPublisher.Publish]
|
||||
|
@ -192,7 +192,7 @@ class TxPublisherSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike {
|
|||
|
||||
val input = OutPoint(randomTxId(), 3)
|
||||
val tx1 = Transaction(2, TxIn(input, Nil, 0) :: Nil, Nil, 0)
|
||||
val cmd1 = PublishFinalTx(tx1, input, "final-tx-1", 0 sat, None)
|
||||
val cmd1 = PublishFinalTx(tx1, input, 100_000 sat, "final-tx-1", 0 sat, None)
|
||||
txPublisher ! cmd1
|
||||
val attempt1 = factory.expectMsgType[FinalTxPublisherSpawned]
|
||||
attempt1.actor.expectMsgType[FinalTxPublisher.Publish]
|
||||
|
@ -216,7 +216,7 @@ class TxPublisherSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike {
|
|||
|
||||
val input = OutPoint(randomTxId(), 3)
|
||||
val tx = Transaction(2, TxIn(input, Nil, 0) :: Nil, Nil, 0)
|
||||
val cmd = PublishFinalTx(tx, input, "final-tx", 0 sat, None)
|
||||
val cmd = PublishFinalTx(tx, input, 100_000 sat, "final-tx", 0 sat, None)
|
||||
txPublisher ! cmd
|
||||
val attempt1 = factory.expectMsgType[FinalTxPublisherSpawned]
|
||||
attempt1.actor.expectMsgType[FinalTxPublisher.Publish]
|
||||
|
@ -256,13 +256,13 @@ class TxPublisherSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike {
|
|||
import f._
|
||||
|
||||
val tx1 = Transaction(2, TxIn(OutPoint(randomTxId(), 1), Nil, 0) :: Nil, Nil, 0)
|
||||
val cmd1 = PublishFinalTx(tx1, tx1.txIn.head.outPoint, "final-tx-1", 0 sat, None)
|
||||
val cmd1 = PublishFinalTx(tx1, tx1.txIn.head.outPoint, 100_000 sat, "final-tx-1", 0 sat, None)
|
||||
txPublisher ! cmd1
|
||||
val attempt1 = factory.expectMsgType[FinalTxPublisherSpawned]
|
||||
attempt1.actor.expectMsgType[FinalTxPublisher.Publish]
|
||||
|
||||
val tx2 = Transaction(2, TxIn(OutPoint(randomTxId(), 0), Nil, 0) :: Nil, Nil, 0)
|
||||
val cmd2 = PublishFinalTx(tx2, tx2.txIn.head.outPoint, "final-tx-2", 5 sat, None)
|
||||
val cmd2 = PublishFinalTx(tx2, tx2.txIn.head.outPoint, 100_000 sat, "final-tx-2", 5 sat, None)
|
||||
txPublisher ! cmd2
|
||||
val attempt2 = factory.expectMsgType[FinalTxPublisherSpawned]
|
||||
attempt2.actor.expectMsgType[FinalTxPublisher.Publish]
|
||||
|
@ -283,7 +283,7 @@ class TxPublisherSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike {
|
|||
import f._
|
||||
|
||||
val tx = Transaction(2, TxIn(OutPoint(randomTxId(), 1), Nil, 0) :: Nil, Nil, 0)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, "final-tx", 5 sat, None)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, 100_000 sat, "final-tx", 5 sat, None)
|
||||
txPublisher ! cmd
|
||||
val attempt = factory.expectMsgType[FinalTxPublisherSpawned]
|
||||
attempt.actor.expectMsgType[FinalTxPublisher.Publish]
|
||||
|
@ -320,7 +320,7 @@ class TxPublisherSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike {
|
|||
import f._
|
||||
|
||||
val tx = Transaction(2, TxIn(OutPoint(randomTxId(), 1), Nil, 0) :: Nil, Nil, 0)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, "final-tx", 5 sat, None)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, 100_000 sat, "final-tx", 5 sat, None)
|
||||
txPublisher ! cmd
|
||||
val attempt = factory.expectMsgType[FinalTxPublisherSpawned]
|
||||
attempt.actor.expectMsgType[FinalTxPublisher.Publish]
|
||||
|
@ -337,7 +337,7 @@ class TxPublisherSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike {
|
|||
import f._
|
||||
|
||||
val tx = Transaction(2, TxIn(OutPoint(randomTxId(), 1), Nil, 0) :: Nil, Nil, 0)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, "final-tx", 5 sat, None)
|
||||
val cmd = PublishFinalTx(tx, tx.txIn.head.outPoint, 100_000 sat, "final-tx", 5 sat, None)
|
||||
txPublisher ! cmd
|
||||
val attempt = factory.expectMsgType[FinalTxPublisherSpawned]
|
||||
attempt.actor.expectMsgType[FinalTxPublisher.Publish]
|
||||
|
|
|
@ -31,11 +31,12 @@ import fr.acinq.eclair.blockchain.{DummyOnChainWallet, OnChainWallet, OnchainPub
|
|||
import fr.acinq.eclair.channel._
|
||||
import fr.acinq.eclair.channel.fsm.Channel
|
||||
import fr.acinq.eclair.channel.publish.TxPublisher
|
||||
import fr.acinq.eclair.channel.publish.TxPublisher.PublishReplaceableTx
|
||||
import fr.acinq.eclair.channel.publish.TxPublisher.{PublishFinalTx, PublishReplaceableTx}
|
||||
import fr.acinq.eclair.channel.states.ChannelStateTestsBase.FakeTxPublisherFactory
|
||||
import fr.acinq.eclair.payment.send.SpontaneousRecipient
|
||||
import fr.acinq.eclair.payment.{Invoice, OutgoingPaymentPacket}
|
||||
import fr.acinq.eclair.router.Router.{ChannelHop, HopRelayParams, Route}
|
||||
import fr.acinq.eclair.testutils.PimpTestProbe.convert
|
||||
import fr.acinq.eclair.transactions.Transactions
|
||||
import fr.acinq.eclair.transactions.Transactions._
|
||||
import fr.acinq.eclair.wire.protocol._
|
||||
|
@ -93,8 +94,10 @@ object ChannelStateTestsTags {
|
|||
val RejectRbfAttempts = "reject_rbf_attempts"
|
||||
/** If set, the non-initiator will require a 1-block delay between RBF attempts. */
|
||||
val DelayRbfAttempts = "delay_rbf_attempts"
|
||||
/** If set, channels will adapt their max HTLC amount to the available balance */
|
||||
val AdaptMaxHtlcAmount = "adapt-max-htlc-amount"
|
||||
/** If set, channels will adapt their max HTLC amount to the available balance. */
|
||||
val AdaptMaxHtlcAmount = "adapt_max_htlc_amount"
|
||||
/** If set, closing will use option_simple_close. */
|
||||
val SimpleClose = "option_simple_close"
|
||||
}
|
||||
|
||||
trait ChannelStateTestsBase extends Assertions with Eventually {
|
||||
|
@ -190,6 +193,7 @@ trait ChannelStateTestsBase extends Assertions with Eventually {
|
|||
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.ZeroConf))(_.updated(Features.ZeroConf, FeatureSupport.Optional))
|
||||
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.ScidAlias))(_.updated(Features.ScidAlias, FeatureSupport.Optional))
|
||||
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DualFunding))(_.updated(Features.DualFunding, FeatureSupport.Optional))
|
||||
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.SimpleClose))(_.updated(Features.SimpleClose, FeatureSupport.Optional))
|
||||
.initFeatures()
|
||||
val bobInitFeatures = Bob.nodeParams.features
|
||||
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DisableWumbo))(_.removed(Features.Wumbo))
|
||||
|
@ -202,6 +206,7 @@ trait ChannelStateTestsBase extends Assertions with Eventually {
|
|||
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.ZeroConf))(_.updated(Features.ZeroConf, FeatureSupport.Optional))
|
||||
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.ScidAlias))(_.updated(Features.ScidAlias, FeatureSupport.Optional))
|
||||
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DualFunding))(_.updated(Features.DualFunding, FeatureSupport.Optional))
|
||||
.modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.SimpleClose))(_.updated(Features.SimpleClose, FeatureSupport.Optional))
|
||||
.initFeatures()
|
||||
|
||||
val channelType = ChannelTypes.defaultFromFeatures(aliceInitFeatures, bobInitFeatures, announceChannel = channelFlags.announceChannel)
|
||||
|
@ -511,23 +516,41 @@ trait ChannelStateTestsBase extends Assertions with Eventually {
|
|||
s2r.forward(r)
|
||||
r2s.expectMsgType[Shutdown]
|
||||
r2s.forward(s)
|
||||
// agreeing on a closing fee
|
||||
var sCloseFee, rCloseFee = 0.sat
|
||||
do {
|
||||
sCloseFee = s2r.expectMsgType[ClosingSigned].feeSatoshis
|
||||
if (s.stateData.asInstanceOf[ChannelDataWithCommitments].commitments.params.localParams.initFeatures.hasFeature(Features.SimpleClose)) {
|
||||
s2r.expectMsgType[ClosingComplete]
|
||||
s2r.forward(r)
|
||||
rCloseFee = r2s.expectMsgType[ClosingSigned].feeSatoshis
|
||||
r2s.expectMsgType[ClosingComplete]
|
||||
r2s.forward(s)
|
||||
} while (sCloseFee != rCloseFee)
|
||||
s2blockchain.expectMsgType[TxPublisher.PublishTx]
|
||||
s2blockchain.expectMsgType[WatchTxConfirmed]
|
||||
r2blockchain.expectMsgType[TxPublisher.PublishTx]
|
||||
r2blockchain.expectMsgType[WatchTxConfirmed]
|
||||
eventually {
|
||||
assert(s.stateName == CLOSING)
|
||||
assert(r.stateName == CLOSING)
|
||||
r2s.expectMsgType[ClosingSig]
|
||||
r2s.forward(s)
|
||||
val sTx = r2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
r2blockchain.expectWatchTxConfirmed(sTx.txid)
|
||||
s2r.expectMsgType[ClosingSig]
|
||||
s2r.forward(r)
|
||||
val rTx = s2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
s2blockchain.expectWatchTxConfirmed(rTx.txid)
|
||||
assert(s2blockchain.expectMsgType[PublishFinalTx].tx.txid == sTx.txid)
|
||||
s2blockchain.expectWatchTxConfirmed(sTx.txid)
|
||||
assert(r2blockchain.expectMsgType[PublishFinalTx].tx.txid == rTx.txid)
|
||||
r2blockchain.expectWatchTxConfirmed(rTx.txid)
|
||||
} else {
|
||||
// agreeing on a closing fee
|
||||
var sCloseFee, rCloseFee = 0.sat
|
||||
do {
|
||||
sCloseFee = s2r.expectMsgType[ClosingSigned].feeSatoshis
|
||||
s2r.forward(r)
|
||||
rCloseFee = r2s.expectMsgType[ClosingSigned].feeSatoshis
|
||||
r2s.forward(s)
|
||||
} while (sCloseFee != rCloseFee)
|
||||
s2blockchain.expectMsgType[TxPublisher.PublishTx]
|
||||
s2blockchain.expectMsgType[WatchTxConfirmed]
|
||||
r2blockchain.expectMsgType[TxPublisher.PublishTx]
|
||||
r2blockchain.expectMsgType[WatchTxConfirmed]
|
||||
eventually {
|
||||
assert(s.stateName == CLOSING)
|
||||
assert(r.stateName == CLOSING)
|
||||
}
|
||||
}
|
||||
// both nodes are now in CLOSING state with a mutual close tx pending for confirmation
|
||||
}
|
||||
|
||||
def localClose(s: TestFSMRef[ChannelState, ChannelData, Channel], s2blockchain: TestProbe): LocalCommitPublished = {
|
||||
|
|
|
@ -86,7 +86,7 @@ class WaitForFundingCreatedStateSpec extends TestKitBaseClass with FixtureAnyFun
|
|||
bob2alice.expectMsgType[FundingSigned]
|
||||
bob2blockchain.expectMsgType[TxPublisher.SetChannelId]
|
||||
val watchConfirmed = bob2blockchain.expectMsgType[WatchFundingConfirmed]
|
||||
assert(watchConfirmed.minDepth == Bob.nodeParams.channelConf.minDepthFunding)
|
||||
assert(watchConfirmed.minDepth == Bob.nodeParams.channelConf.minDepth)
|
||||
}
|
||||
|
||||
test("recv FundingCreated (large channel)", Tag(LargeChannel)) { f =>
|
||||
|
@ -98,7 +98,7 @@ class WaitForFundingCreatedStateSpec extends TestKitBaseClass with FixtureAnyFun
|
|||
bob2blockchain.expectMsgType[TxPublisher.SetChannelId]
|
||||
val watchConfirmed = bob2blockchain.expectMsgType[WatchFundingConfirmed]
|
||||
// when we are fundee, we use a higher min depth for wumbo channels
|
||||
assert(watchConfirmed.minDepth > Bob.nodeParams.channelConf.minDepthFunding)
|
||||
assert(watchConfirmed.minDepth > Bob.nodeParams.channelConf.minDepth)
|
||||
}
|
||||
|
||||
test("recv FundingCreated (funder can't pay fees)", Tag(FunderBelowCommitFees)) { f =>
|
||||
|
|
|
@ -2466,7 +2466,7 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik
|
|||
// The commit confirms, along with Alice's 2nd-stage transactions.
|
||||
watchConfirmedCommit2.replyTo ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, commitTx2)
|
||||
watchConfirmedClaimMainDelayed2.replyTo ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, claimMainDelayed2)
|
||||
watchHtlcsOut.zip(htlcsTxsOut).foreach { case (watch, tx) => watch.replyTo ! WatchOutputSpentTriggered(tx) }
|
||||
watchHtlcsOut.zip(htlcsTxsOut).foreach { case (watch, tx) => watch.replyTo ! WatchOutputSpentTriggered(watch.amount, tx) }
|
||||
htlcsTxsOut.foreach { tx =>
|
||||
alice2blockchain.expectWatchTxConfirmed(tx.txid)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, tx)
|
||||
|
@ -2551,7 +2551,7 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik
|
|||
watchConfirmedClaimMain.replyTo ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, claimMain)
|
||||
val watchHtlcsOut1 = htlcs.aliceToBob.map(_ => alice2blockchain.expectMsgType[WatchOutputSpent])
|
||||
htlcs.bobToAlice.map(_ => alice2blockchain.expectMsgType[WatchOutputSpent])
|
||||
watchHtlcsOut1.zip(htlcsTxsOut1).foreach { case (watch, tx) => watch.replyTo ! WatchOutputSpentTriggered(tx) }
|
||||
watchHtlcsOut1.zip(htlcsTxsOut1).foreach { case (watch, tx) => watch.replyTo ! WatchOutputSpentTriggered(watch.amount, tx) }
|
||||
htlcsTxsOut1.foreach { tx =>
|
||||
alice2blockchain.expectWatchTxConfirmed(tx.txid)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, tx)
|
||||
|
@ -2843,7 +2843,7 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik
|
|||
// all penalty txs confirm
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, bobRevokedCommitTx)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, aliceClaimMain)
|
||||
alice ! WatchOutputSpentTriggered(aliceMainPenalty)
|
||||
alice ! WatchOutputSpentTriggered(aliceMainPenalty.txOut(0).amount, aliceMainPenalty)
|
||||
alice2blockchain.expectWatchTxConfirmed(aliceMainPenalty.txid)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, aliceMainPenalty)
|
||||
aliceHtlcsPenalty.foreach { tx => alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, tx) }
|
||||
|
|
|
@ -3437,15 +3437,15 @@ class NormalStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
// - 1 tx to claim the main delayed output
|
||||
// - 3 txs for each htlc
|
||||
// NB: 3rd-stage txs will only be published once the htlc txs confirm
|
||||
val claimMain = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
val htlcTx1 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
val htlcTx2 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
val htlcTx3 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
val claimMain = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
val htlcTx1 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
val htlcTx2 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
val htlcTx3 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
// the main delayed output and htlc txs spend the commitment transaction
|
||||
Seq(claimMain, htlcTx1, htlcTx2, htlcTx3).foreach(tx => Transaction.correctlySpends(tx, aliceCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS))
|
||||
Seq(claimMain, htlcTx1, htlcTx2, htlcTx3).foreach(tx => Transaction.correctlySpends(tx.tx, aliceCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS))
|
||||
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == aliceCommitTx.txid)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == claimMain.txid) // main-delayed
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == claimMain.tx.txid) // main-delayed
|
||||
alice2blockchain.expectMsgType[WatchOutputSpent] // htlc 1
|
||||
alice2blockchain.expectMsgType[WatchOutputSpent] // htlc 2
|
||||
alice2blockchain.expectMsgType[WatchOutputSpent] // htlc 3
|
||||
|
@ -3454,11 +3454,11 @@ class NormalStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
|
||||
// 3rd-stage txs are published when htlc txs confirm
|
||||
Seq(htlcTx1, htlcTx2, htlcTx3).foreach { htlcTimeoutTx =>
|
||||
alice ! WatchOutputSpentTriggered(htlcTimeoutTx)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == htlcTimeoutTx.txid)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(2701), 3, htlcTimeoutTx)
|
||||
alice ! WatchOutputSpentTriggered(htlcTimeoutTx.amount, htlcTimeoutTx.tx)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == htlcTimeoutTx.tx.txid)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(2701), 3, htlcTimeoutTx.tx)
|
||||
val claimHtlcDelayedTx = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
Transaction.correctlySpends(claimHtlcDelayedTx, htlcTimeoutTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
|
||||
Transaction.correctlySpends(claimHtlcDelayedTx, htlcTimeoutTx.tx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == claimHtlcDelayedTx.txid)
|
||||
}
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get.claimHtlcDelayedTxs.length == 3)
|
||||
|
|
|
@ -20,7 +20,7 @@ import akka.testkit.TestProbe
|
|||
import com.softwaremill.quicklens.ModifyPimp
|
||||
import fr.acinq.bitcoin.ScriptFlags
|
||||
import fr.acinq.bitcoin.scalacompat.Crypto.PrivateKey
|
||||
import fr.acinq.bitcoin.scalacompat.{ByteVector32, ByteVector64, Crypto, SatoshiLong, Transaction}
|
||||
import fr.acinq.bitcoin.scalacompat.{ByteVector32, ByteVector64, Crypto, SatoshiLong, Script, Transaction}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.ZmqWatcher._
|
||||
import fr.acinq.eclair.blockchain.fee.{FeeratePerKw, FeeratesPerKw}
|
||||
import fr.acinq.eclair.blockchain.{CurrentBlockHeight, CurrentFeerates}
|
||||
|
@ -33,7 +33,7 @@ import fr.acinq.eclair.payment.relay.Relayer._
|
|||
import fr.acinq.eclair.payment.send.SpontaneousRecipient
|
||||
import fr.acinq.eclair.transactions.Transactions.ClaimLocalAnchorOutputTx
|
||||
import fr.acinq.eclair.wire.protocol.{AnnouncementSignatures, ChannelUpdate, ClosingSigned, CommitSig, Error, FailureMessageCodecs, FailureReason, PermanentChannelFailure, RevokeAndAck, Shutdown, UpdateAddHtlc, UpdateFailHtlc, UpdateFailMalformedHtlc, UpdateFee, UpdateFulfillHtlc}
|
||||
import fr.acinq.eclair.{BlockHeight, CltvExpiry, CltvExpiryDelta, MilliSatoshiLong, TestConstants, TestKitBaseClass, randomBytes32}
|
||||
import fr.acinq.eclair.{BlockHeight, CltvExpiry, CltvExpiryDelta, MilliSatoshiLong, TestConstants, TestKitBaseClass, randomBytes32, randomKey}
|
||||
import org.scalatest.funsuite.FixtureAnyFunSuiteLike
|
||||
import org.scalatest.{Outcome, Tag}
|
||||
import scodec.bits.ByteVector
|
||||
|
@ -911,6 +911,25 @@ class ShutdownStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike wit
|
|||
assert(alice.stateData.asInstanceOf[DATA_SHUTDOWN].closingFeerates.contains(closingFeerates2))
|
||||
}
|
||||
|
||||
test("recv CMD_CLOSE with updated script") { f =>
|
||||
import f._
|
||||
val sender = TestProbe()
|
||||
val script = Script.write(Script.pay2wpkh(randomKey().publicKey))
|
||||
alice ! CMD_CLOSE(sender.ref, Some(script), None)
|
||||
sender.expectMsgType[RES_FAILURE[CMD_CLOSE, ClosingAlreadyInProgress]]
|
||||
}
|
||||
|
||||
test("recv CMD_CLOSE with updated script (option_simple_close)", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
val sender = TestProbe()
|
||||
val script = Script.write(Script.pay2wpkh(randomKey().publicKey))
|
||||
alice ! CMD_CLOSE(sender.ref, Some(script), None)
|
||||
sender.expectMsgType[RES_SUCCESS[CMD_CLOSE]]
|
||||
assert(alice2bob.expectMsgType[Shutdown].scriptPubKey == script)
|
||||
alice2bob.forward(bob)
|
||||
awaitCond(bob.stateData.asInstanceOf[DATA_SHUTDOWN].remoteShutdown.scriptPubKey == script)
|
||||
}
|
||||
|
||||
test("recv CMD_FORCECLOSE") { f =>
|
||||
import f._
|
||||
|
||||
|
@ -927,23 +946,23 @@ class ShutdownStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike wit
|
|||
assert(lcp.htlcTxs.size == 2)
|
||||
assert(lcp.claimHtlcDelayedTxs.isEmpty) // 3rd-stage txs will be published once htlc txs confirm
|
||||
|
||||
val claimMain = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
val htlc1 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
val htlc2 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
Seq(claimMain, htlc1, htlc2).foreach(tx => Transaction.correctlySpends(tx, aliceCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS))
|
||||
val claimMain = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
val htlc1 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
val htlc2 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
Seq(claimMain, htlc1, htlc2).foreach(tx => Transaction.correctlySpends(tx.tx, aliceCommitTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS))
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == aliceCommitTx.txid)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == claimMain.txid)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == claimMain.tx.txid)
|
||||
alice2blockchain.expectMsgType[WatchOutputSpent]
|
||||
alice2blockchain.expectMsgType[WatchOutputSpent]
|
||||
alice2blockchain.expectNoMessage(1 second)
|
||||
|
||||
// 3rd-stage txs are published when htlc txs confirm
|
||||
Seq(htlc1, htlc2).foreach(htlcTimeoutTx => {
|
||||
alice ! WatchOutputSpentTriggered(htlcTimeoutTx)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == htlcTimeoutTx.txid)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(2701), 3, htlcTimeoutTx)
|
||||
alice ! WatchOutputSpentTriggered(htlcTimeoutTx.amount, htlcTimeoutTx.tx)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == htlcTimeoutTx.tx.txid)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(2701), 3, htlcTimeoutTx.tx)
|
||||
val claimHtlcDelayedTx = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
Transaction.correctlySpends(claimHtlcDelayedTx, htlcTimeoutTx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
|
||||
Transaction.correctlySpends(claimHtlcDelayedTx, htlcTimeoutTx.tx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == claimHtlcDelayedTx.txid)
|
||||
})
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get.claimHtlcDelayedTxs.length == 2)
|
||||
|
|
|
@ -17,20 +17,22 @@
|
|||
package fr.acinq.eclair.channel.states.g
|
||||
|
||||
import akka.testkit.TestProbe
|
||||
import fr.acinq.bitcoin.scalacompat.{ByteVector32, ByteVector64, Satoshi, SatoshiLong, Transaction}
|
||||
import fr.acinq.bitcoin.scalacompat.{ByteVector32, ByteVector64, Satoshi, SatoshiLong, Script, Transaction}
|
||||
import fr.acinq.eclair.blockchain.bitcoind.ZmqWatcher._
|
||||
import fr.acinq.eclair.blockchain.fee.{FeeratePerKw, FeeratesPerKw}
|
||||
import fr.acinq.eclair.channel.Helpers.Closing
|
||||
import fr.acinq.eclair.channel._
|
||||
import fr.acinq.eclair.channel.fsm.Channel
|
||||
import fr.acinq.eclair.channel.publish.TxPublisher.{PublishFinalTx, PublishTx}
|
||||
import fr.acinq.eclair.channel.publish.TxPublisher.{PublishFinalTx, PublishTx, SetChannelId}
|
||||
import fr.acinq.eclair.channel.states.ChannelStateTestsBase.PimpTestFSM
|
||||
import fr.acinq.eclair.channel.states.{ChannelStateTestsBase, ChannelStateTestsTags}
|
||||
import fr.acinq.eclair.testutils.PimpTestProbe._
|
||||
import fr.acinq.eclair.transactions.Transactions
|
||||
import fr.acinq.eclair.transactions.Transactions.ZeroFeeHtlcTxAnchorOutputsCommitmentFormat
|
||||
import fr.acinq.eclair.wire.protocol.ClosingSignedTlv.FeeRange
|
||||
import fr.acinq.eclair.wire.protocol.{AnnouncementSignatures, ChannelUpdate, ClosingSigned, Error, Shutdown, TlvStream, Warning}
|
||||
import fr.acinq.eclair.{CltvExpiry, Features, MilliSatoshiLong, TestConstants, TestKitBaseClass, randomBytes32}
|
||||
import fr.acinq.eclair.wire.protocol.{AnnouncementSignatures, ChannelUpdate, ClosingComplete, ClosingSig, ClosingSigned, ClosingTlv, Error, Shutdown, TlvStream, Warning}
|
||||
import fr.acinq.eclair.{BlockHeight, CltvExpiry, Features, MilliSatoshiLong, TestConstants, TestKitBaseClass, randomBytes32, randomKey}
|
||||
import org.scalatest.Inside.inside
|
||||
import org.scalatest.funsuite.FixtureAnyFunSuiteLike
|
||||
import org.scalatest.{Outcome, Tag}
|
||||
|
||||
|
@ -63,11 +65,15 @@ class NegotiatingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike
|
|||
alice2bob.forward(bob, aliceShutdown)
|
||||
val bobShutdown = bob2alice.expectMsgType[Shutdown]
|
||||
bob2alice.forward(alice, bobShutdown)
|
||||
awaitCond(alice.stateName == NEGOTIATING)
|
||||
assert(alice.stateData.asInstanceOf[DATA_NEGOTIATING].commitments.params.localParams.upfrontShutdownScript_opt.forall(_ == aliceShutdown.scriptPubKey))
|
||||
|
||||
awaitCond(bob.stateName == NEGOTIATING)
|
||||
assert(bob.stateData.asInstanceOf[DATA_NEGOTIATING].commitments.params.localParams.upfrontShutdownScript_opt.forall(_ == bobShutdown.scriptPubKey))
|
||||
if (alice.stateData.asInstanceOf[ChannelDataWithCommitments].commitments.params.localParams.initFeatures.hasFeature(Features.SimpleClose)) {
|
||||
awaitCond(alice.stateName == NEGOTIATING_SIMPLE)
|
||||
awaitCond(bob.stateName == NEGOTIATING_SIMPLE)
|
||||
} else {
|
||||
awaitCond(alice.stateName == NEGOTIATING)
|
||||
assert(alice.stateData.asInstanceOf[DATA_NEGOTIATING].commitments.params.localParams.upfrontShutdownScript_opt.forall(_ == aliceShutdown.scriptPubKey))
|
||||
awaitCond(bob.stateName == NEGOTIATING)
|
||||
assert(bob.stateData.asInstanceOf[DATA_NEGOTIATING].commitments.params.localParams.upfrontShutdownScript_opt.forall(_ == bobShutdown.scriptPubKey))
|
||||
}
|
||||
}
|
||||
|
||||
def bobClose(f: FixtureParam, feerates: Option[ClosingFeerates] = None): Unit = {
|
||||
|
@ -79,11 +85,15 @@ class NegotiatingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike
|
|||
bob2alice.forward(alice, bobShutdown)
|
||||
val aliceShutdown = alice2bob.expectMsgType[Shutdown]
|
||||
alice2bob.forward(bob, aliceShutdown)
|
||||
awaitCond(alice.stateName == NEGOTIATING)
|
||||
assert(alice.stateData.asInstanceOf[DATA_NEGOTIATING].commitments.params.localParams.upfrontShutdownScript_opt.forall(_ == aliceShutdown.scriptPubKey))
|
||||
|
||||
awaitCond(bob.stateName == NEGOTIATING)
|
||||
assert(bob.stateData.asInstanceOf[DATA_NEGOTIATING].commitments.params.localParams.upfrontShutdownScript_opt.forall(_ == bobShutdown.scriptPubKey))
|
||||
if (bob.stateData.asInstanceOf[ChannelDataWithCommitments].commitments.params.localParams.initFeatures.hasFeature(Features.SimpleClose)) {
|
||||
awaitCond(alice.stateName == NEGOTIATING_SIMPLE)
|
||||
awaitCond(bob.stateName == NEGOTIATING_SIMPLE)
|
||||
} else {
|
||||
awaitCond(alice.stateName == NEGOTIATING)
|
||||
assert(alice.stateData.asInstanceOf[DATA_NEGOTIATING].commitments.params.localParams.upfrontShutdownScript_opt.forall(_ == aliceShutdown.scriptPubKey))
|
||||
awaitCond(bob.stateName == NEGOTIATING)
|
||||
assert(bob.stateData.asInstanceOf[DATA_NEGOTIATING].commitments.params.localParams.upfrontShutdownScript_opt.forall(_ == bobShutdown.scriptPubKey))
|
||||
}
|
||||
}
|
||||
|
||||
def buildFeerates(feerate: FeeratePerKw, minFeerate: FeeratePerKw = FeeratePerKw(250 sat)): FeeratesPerKw =
|
||||
|
@ -473,6 +483,211 @@ class NegotiatingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike
|
|||
bob2blockchain.expectMsgType[WatchTxConfirmed]
|
||||
}
|
||||
|
||||
test("recv ClosingComplete (both outputs)", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
aliceClose(f)
|
||||
val aliceClosingComplete = alice2bob.expectMsgType[ClosingComplete]
|
||||
assert(aliceClosingComplete.fees > 0.sat)
|
||||
assert(aliceClosingComplete.closerAndCloseeOutputsSig_opt.nonEmpty)
|
||||
assert(aliceClosingComplete.closerOutputOnlySig_opt.nonEmpty)
|
||||
assert(aliceClosingComplete.closeeOutputOnlySig_opt.isEmpty)
|
||||
val bobClosingComplete = bob2alice.expectMsgType[ClosingComplete]
|
||||
assert(bobClosingComplete.fees > 0.sat)
|
||||
assert(bobClosingComplete.closerAndCloseeOutputsSig_opt.nonEmpty)
|
||||
assert(bobClosingComplete.closerOutputOnlySig_opt.nonEmpty)
|
||||
assert(bobClosingComplete.closeeOutputOnlySig_opt.isEmpty)
|
||||
|
||||
alice2bob.forward(bob, aliceClosingComplete)
|
||||
val bobClosingSig = bob2alice.expectMsgType[ClosingSig]
|
||||
assert(bobClosingSig.fees == aliceClosingComplete.fees)
|
||||
assert(bobClosingSig.lockTime == aliceClosingComplete.lockTime)
|
||||
bob2alice.forward(alice, bobClosingSig)
|
||||
val aliceTx = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
assert(aliceTx.desc == "closing")
|
||||
assert(aliceTx.fee > 0.sat)
|
||||
alice2blockchain.expectWatchTxConfirmed(aliceTx.tx.txid)
|
||||
inside(bob2blockchain.expectMsgType[PublishFinalTx]) { p =>
|
||||
assert(p.tx.txid == aliceTx.tx.txid)
|
||||
assert(p.fee == 0.sat)
|
||||
}
|
||||
bob2blockchain.expectWatchTxConfirmed(aliceTx.tx.txid)
|
||||
assert(alice.stateName == NEGOTIATING_SIMPLE)
|
||||
|
||||
bob2alice.forward(alice, bobClosingComplete)
|
||||
val aliceClosingSig = alice2bob.expectMsgType[ClosingSig]
|
||||
assert(aliceClosingSig.fees == bobClosingComplete.fees)
|
||||
assert(aliceClosingSig.lockTime == bobClosingComplete.lockTime)
|
||||
alice2bob.forward(bob, aliceClosingSig)
|
||||
val bobTx = bob2blockchain.expectMsgType[PublishFinalTx]
|
||||
assert(bobTx.desc == "closing")
|
||||
assert(bobTx.fee > 0.sat)
|
||||
bob2blockchain.expectWatchTxConfirmed(bobTx.tx.txid)
|
||||
inside(alice2blockchain.expectMsgType[PublishFinalTx]) { p =>
|
||||
assert(p.tx.txid == bobTx.tx.txid)
|
||||
assert(p.fee == 0.sat)
|
||||
}
|
||||
assert(aliceTx.tx.txid != bobTx.tx.txid)
|
||||
alice2blockchain.expectWatchTxConfirmed(bobTx.tx.txid)
|
||||
assert(bob.stateName == NEGOTIATING_SIMPLE)
|
||||
}
|
||||
|
||||
test("recv ClosingComplete (single output)", Tag(ChannelStateTestsTags.SimpleClose), Tag(ChannelStateTestsTags.NoPushAmount)) { f =>
|
||||
import f._
|
||||
aliceClose(f)
|
||||
val closingComplete = alice2bob.expectMsgType[ClosingComplete]
|
||||
assert(closingComplete.closerAndCloseeOutputsSig_opt.isEmpty)
|
||||
assert(closingComplete.closerOutputOnlySig_opt.nonEmpty)
|
||||
assert(closingComplete.closeeOutputOnlySig_opt.isEmpty)
|
||||
// Bob has nothing at stake.
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
alice2bob.forward(bob, closingComplete)
|
||||
bob2alice.expectMsgType[ClosingSig]
|
||||
bob2alice.forward(alice)
|
||||
val closingTx = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
assert(bob2blockchain.expectMsgType[PublishFinalTx].tx.txid == closingTx.tx.txid)
|
||||
alice2blockchain.expectWatchTxConfirmed(closingTx.tx.txid)
|
||||
bob2blockchain.expectWatchTxConfirmed(closingTx.tx.txid)
|
||||
assert(alice.stateName == NEGOTIATING_SIMPLE)
|
||||
assert(bob.stateName == NEGOTIATING_SIMPLE)
|
||||
}
|
||||
|
||||
test("recv ClosingComplete (single output, trimmed)", Tag(ChannelStateTestsTags.SimpleClose), Tag(ChannelStateTestsTags.NoPushAmount)) { f =>
|
||||
import f._
|
||||
val (r, htlc) = addHtlc(250_000 msat, alice, bob, alice2bob, bob2alice)
|
||||
crossSign(alice, bob, alice2bob, bob2alice)
|
||||
fulfillHtlc(htlc.id, r, bob, alice, bob2alice, alice2bob)
|
||||
crossSign(bob, alice, bob2alice, alice2bob)
|
||||
|
||||
aliceClose(f)
|
||||
val aliceClosingComplete = alice2bob.expectMsgType[ClosingComplete]
|
||||
assert(aliceClosingComplete.closerAndCloseeOutputsSig_opt.isEmpty)
|
||||
assert(aliceClosingComplete.closerOutputOnlySig_opt.nonEmpty)
|
||||
assert(aliceClosingComplete.closeeOutputOnlySig_opt.isEmpty)
|
||||
val bobClosingComplete = bob2alice.expectMsgType[ClosingComplete]
|
||||
assert(bobClosingComplete.closerAndCloseeOutputsSig_opt.isEmpty)
|
||||
assert(bobClosingComplete.closerOutputOnlySig_opt.isEmpty)
|
||||
assert(bobClosingComplete.closeeOutputOnlySig_opt.nonEmpty)
|
||||
|
||||
bob2alice.forward(alice, bobClosingComplete)
|
||||
val aliceClosingSig = alice2bob.expectMsgType[ClosingSig]
|
||||
alice2bob.forward(bob, aliceClosingSig)
|
||||
val bobTx = bob2blockchain.expectMsgType[PublishFinalTx]
|
||||
assert(alice2blockchain.expectMsgType[PublishFinalTx].tx.txid == bobTx.tx.txid)
|
||||
bob2blockchain.expectWatchTxConfirmed(bobTx.tx.txid)
|
||||
alice2blockchain.expectWatchTxConfirmed(bobTx.tx.txid)
|
||||
assert(alice.stateName == NEGOTIATING_SIMPLE)
|
||||
assert(bob.stateName == NEGOTIATING_SIMPLE)
|
||||
}
|
||||
|
||||
test("recv ClosingComplete (missing closee output)", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
aliceClose(f)
|
||||
val aliceClosingComplete = alice2bob.expectMsgType[ClosingComplete]
|
||||
val bobClosingComplete = bob2alice.expectMsgType[ClosingComplete]
|
||||
alice2bob.forward(bob, aliceClosingComplete.copy(tlvStream = TlvStream(ClosingTlv.CloserOutputOnly(aliceClosingComplete.closerOutputOnlySig_opt.get))))
|
||||
// Bob expects to receive a signature for a closing transaction containing his output, so he ignores Alice's
|
||||
// closing_complete instead of sending back his closing_sig.
|
||||
bob2alice.expectMsgType[Warning]
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
bob2alice.forward(alice, bobClosingComplete)
|
||||
val aliceClosingSig = alice2bob.expectMsgType[ClosingSig]
|
||||
alice2bob.forward(bob, aliceClosingSig.copy(tlvStream = TlvStream(ClosingTlv.CloseeOutputOnly(aliceClosingSig.closerAndCloseeOutputsSig_opt.get))))
|
||||
bob2alice.expectMsgType[Warning]
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
bob2blockchain.expectNoMessage(100 millis)
|
||||
}
|
||||
|
||||
test("recv ClosingComplete (with concurrent script update)", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
aliceClose(f)
|
||||
alice2bob.expectMsgType[ClosingComplete]
|
||||
alice2bob.forward(bob)
|
||||
bob2alice.expectMsgType[ClosingComplete]
|
||||
bob2alice.forward(alice)
|
||||
val aliceTx1 = bob2blockchain.expectMsgType[PublishFinalTx]
|
||||
assert(bob2blockchain.expectMsgType[WatchTxConfirmed].txId == aliceTx1.tx.txid)
|
||||
val bobTx1 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == bobTx1.tx.txid)
|
||||
alice2bob.expectMsgType[ClosingSig]
|
||||
alice2bob.forward(bob)
|
||||
assert(bob2blockchain.expectMsgType[PublishFinalTx].tx.txid == bobTx1.tx.txid)
|
||||
assert(bob2blockchain.expectMsgType[WatchTxConfirmed].txId == bobTx1.tx.txid)
|
||||
bob2alice.expectMsgType[ClosingSig]
|
||||
bob2alice.forward(alice)
|
||||
assert(alice2blockchain.expectMsgType[PublishFinalTx].tx.txid == aliceTx1.tx.txid)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == aliceTx1.tx.txid)
|
||||
val aliceScript1 = alice.stateData.asInstanceOf[DATA_NEGOTIATING_SIMPLE].localScriptPubKey
|
||||
val bobScript1 = bob.stateData.asInstanceOf[DATA_NEGOTIATING_SIMPLE].localScriptPubKey
|
||||
|
||||
// Alice sends another closing_complete, updating her script and the fees.
|
||||
val probe = TestProbe()
|
||||
val aliceScript2 = Script.write(Script.pay2wpkh(randomKey().publicKey))
|
||||
val aliceFeerate2 = alice.stateData.asInstanceOf[DATA_NEGOTIATING_SIMPLE].lastClosingFeerate * 1.25
|
||||
alice ! CMD_CLOSE(probe.ref, Some(aliceScript2), Some(ClosingFeerates(aliceFeerate2, aliceFeerate2, aliceFeerate2)))
|
||||
probe.expectMsgType[RES_SUCCESS[CMD_CLOSE]]
|
||||
inside(alice2bob.expectMsgType[ClosingComplete]) { msg =>
|
||||
assert(msg.fees > aliceTx1.fee)
|
||||
assert(msg.closerScriptPubKey == aliceScript2)
|
||||
assert(msg.closeeScriptPubKey == bobScript1)
|
||||
}
|
||||
// Bob also sends closing_complete concurrently, updating his script and the fees.
|
||||
val bobScript2 = Script.write(Script.pay2wpkh(randomKey().publicKey))
|
||||
val bobFeerate2 = bob.stateData.asInstanceOf[DATA_NEGOTIATING_SIMPLE].lastClosingFeerate * 1.25
|
||||
bob ! CMD_CLOSE(probe.ref, Some(bobScript2), Some(ClosingFeerates(bobFeerate2, bobFeerate2, bobFeerate2)))
|
||||
probe.expectMsgType[RES_SUCCESS[CMD_CLOSE]]
|
||||
inside(bob2alice.expectMsgType[ClosingComplete]) { msg =>
|
||||
assert(msg.fees > bobTx1.fee)
|
||||
assert(msg.closerScriptPubKey == bobScript2)
|
||||
assert(msg.closeeScriptPubKey == aliceScript1)
|
||||
}
|
||||
// Those messages are ignored because they don't match the latest version of each participant's scripts.
|
||||
alice2bob.forward(bob)
|
||||
bob2alice.forward(alice)
|
||||
alice2bob.expectMsgType[Warning]
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
bob2alice.expectMsgType[Warning]
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// Alice retries with a higher fee, now that she received Bob's latest script.
|
||||
val aliceFeerate3 = aliceFeerate2 * 1.25
|
||||
alice ! CMD_CLOSE(probe.ref, Some(aliceScript2), Some(ClosingFeerates(aliceFeerate3, aliceFeerate3, aliceFeerate3)))
|
||||
probe.expectMsgType[RES_SUCCESS[CMD_CLOSE]]
|
||||
inside(alice2bob.expectMsgType[ClosingComplete]) { msg =>
|
||||
assert(msg.closerScriptPubKey == aliceScript2)
|
||||
assert(msg.closeeScriptPubKey == bobScript2)
|
||||
}
|
||||
alice2bob.forward(bob)
|
||||
val bobClosingSig3 = bob2alice.expectMsgType[ClosingSig]
|
||||
assert(bobClosingSig3.closerScriptPubKey == aliceScript2)
|
||||
assert(bobClosingSig3.closeeScriptPubKey == bobScript2)
|
||||
// Before receiving Bob's closing_sig, Alice updates her script again.
|
||||
val aliceFeerate4 = aliceFeerate3 * 1.25
|
||||
val aliceScript4 = Script.write(Script.pay2wpkh(randomKey().publicKey))
|
||||
alice ! CMD_CLOSE(probe.ref, Some(aliceScript4), Some(ClosingFeerates(aliceFeerate4, aliceFeerate4, aliceFeerate4)))
|
||||
probe.expectMsgType[RES_SUCCESS[CMD_CLOSE]]
|
||||
inside(alice2bob.expectMsgType[ClosingComplete]) { msg =>
|
||||
assert(msg.closerScriptPubKey == aliceScript4)
|
||||
assert(msg.closeeScriptPubKey == bobScript2)
|
||||
}
|
||||
alice2bob.forward(bob)
|
||||
val bobClosingSig4 = bob2alice.expectMsgType[ClosingSig]
|
||||
assert(bobClosingSig4.closerScriptPubKey == aliceScript4)
|
||||
assert(bobClosingSig4.closeeScriptPubKey == bobScript2)
|
||||
|
||||
// The first closing_sig is ignored because it's not using Alice's latest script.
|
||||
bob2alice.forward(alice, bobClosingSig3)
|
||||
alice2bob.expectMsgType[Warning]
|
||||
alice2blockchain.expectNoMessage(100 millis)
|
||||
// The second closing_sig lets Alice broadcast a new version of her closing transaction.
|
||||
bob2alice.forward(alice, bobClosingSig4)
|
||||
val aliceTx4 = alice2blockchain.expectMsgType[PublishFinalTx]
|
||||
assert(aliceTx4.fee > aliceTx1.fee)
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == aliceTx4.tx.txid)
|
||||
alice2blockchain.expectNoMessage(100 millis)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
}
|
||||
|
||||
test("recv WatchFundingSpentTriggered (counterparty's mutual close)") { f =>
|
||||
import f._
|
||||
aliceClose(f)
|
||||
|
@ -533,6 +748,98 @@ class NegotiatingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike
|
|||
assert(bob.stateName == CLOSING)
|
||||
}
|
||||
|
||||
test("recv WatchFundingSpentTriggered (signed closing tx)", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
bobClose(f)
|
||||
// Alice and Bob publish a first closing tx.
|
||||
val aliceClosingComplete1 = alice2bob.expectMsgType[ClosingComplete]
|
||||
alice2bob.forward(bob, aliceClosingComplete1)
|
||||
val bobClosingComplete1 = bob2alice.expectMsgType[ClosingComplete]
|
||||
bob2alice.forward(alice, bobClosingComplete1)
|
||||
val aliceClosingSig1 = alice2bob.expectMsgType[ClosingSig]
|
||||
val bobTx1 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
alice2blockchain.expectWatchTxConfirmed(bobTx1.txid)
|
||||
val bobClosingSig1 = bob2alice.expectMsgType[ClosingSig]
|
||||
val aliceTx1 = bob2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
bob2blockchain.expectWatchTxConfirmed(aliceTx1.txid)
|
||||
alice2bob.forward(bob, aliceClosingSig1)
|
||||
assert(bob2blockchain.expectMsgType[PublishFinalTx].tx.txid == bobTx1.txid)
|
||||
bob2blockchain.expectWatchTxConfirmed(bobTx1.txid)
|
||||
bob2alice.forward(alice, bobClosingSig1)
|
||||
assert(alice2blockchain.expectMsgType[PublishFinalTx].tx.txid == aliceTx1.txid)
|
||||
alice2blockchain.expectWatchTxConfirmed(aliceTx1.txid)
|
||||
|
||||
// Alice updates her closing script.
|
||||
alice ! CMD_CLOSE(TestProbe().ref, Some(Script.write(Script.pay2wpkh(randomKey().publicKey))), None)
|
||||
alice2bob.expectMsgType[ClosingComplete]
|
||||
alice2bob.forward(bob)
|
||||
val bobClosingSig = bob2alice.expectMsgType[ClosingSig]
|
||||
bob2alice.forward(alice, bobClosingSig)
|
||||
val aliceTx2 = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
alice2blockchain.expectWatchTxConfirmed(aliceTx2.txid)
|
||||
assert(bob2blockchain.expectMsgType[PublishFinalTx].tx.txid == aliceTx2.txid)
|
||||
bob2blockchain.expectWatchTxConfirmed(aliceTx2.txid)
|
||||
|
||||
// They first receive a watch event for the older transaction, then the new one.
|
||||
alice ! WatchFundingSpentTriggered(aliceTx1)
|
||||
alice2blockchain.expectWatchTxConfirmed(aliceTx1.txid)
|
||||
alice ! WatchFundingSpentTriggered(bobTx1)
|
||||
alice2blockchain.expectWatchTxConfirmed(bobTx1.txid)
|
||||
alice ! WatchFundingSpentTriggered(aliceTx2)
|
||||
alice2blockchain.expectWatchTxConfirmed(aliceTx2.txid)
|
||||
alice2blockchain.expectNoMessage(100 millis)
|
||||
assert(alice.stateName == NEGOTIATING_SIMPLE)
|
||||
bob ! WatchFundingSpentTriggered(aliceTx1)
|
||||
bob2blockchain.expectWatchTxConfirmed(aliceTx1.txid)
|
||||
bob ! WatchFundingSpentTriggered(bobTx1)
|
||||
bob2blockchain.expectWatchTxConfirmed(bobTx1.txid)
|
||||
bob ! WatchFundingSpentTriggered(aliceTx2)
|
||||
bob2blockchain.expectWatchTxConfirmed(aliceTx2.txid)
|
||||
bob2blockchain.expectNoMessage(100 millis)
|
||||
assert(bob.stateName == NEGOTIATING_SIMPLE)
|
||||
}
|
||||
|
||||
test("recv WatchFundingSpentTriggered (unsigned closing tx)", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
bobClose(f)
|
||||
val aliceClosingComplete = alice2bob.expectMsgType[ClosingComplete]
|
||||
alice2bob.forward(bob, aliceClosingComplete)
|
||||
val bobClosingComplete = bob2alice.expectMsgType[ClosingComplete]
|
||||
bob2alice.forward(alice, bobClosingComplete)
|
||||
alice2bob.expectMsgType[ClosingSig]
|
||||
val bobTx = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
alice2blockchain.expectWatchTxConfirmed(bobTx.txid)
|
||||
bob2alice.expectMsgType[ClosingSig]
|
||||
val aliceTx = bob2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
bob2blockchain.expectWatchTxConfirmed(aliceTx.txid)
|
||||
|
||||
alice ! WatchFundingSpentTriggered(aliceTx)
|
||||
assert(alice2blockchain.expectMsgType[PublishFinalTx].tx.txid == aliceTx.txid)
|
||||
alice2blockchain.expectWatchTxConfirmed(aliceTx.txid)
|
||||
alice2blockchain.expectNoMessage(100 millis)
|
||||
|
||||
bob ! WatchFundingSpentTriggered(bobTx)
|
||||
assert(bob2blockchain.expectMsgType[PublishFinalTx].tx.txid == bobTx.txid)
|
||||
bob2blockchain.expectWatchTxConfirmed(bobTx.txid)
|
||||
bob2blockchain.expectNoMessage(100 millis)
|
||||
}
|
||||
|
||||
test("recv WatchFundingSpentTriggered (unrecognized commit)") { f =>
|
||||
import f._
|
||||
bobClose(f)
|
||||
alice ! WatchFundingSpentTriggered(Transaction(0, Nil, Nil, 0))
|
||||
alice2blockchain.expectNoMessage(100 millis)
|
||||
assert(alice.stateName == NEGOTIATING)
|
||||
}
|
||||
|
||||
test("recv WatchFundingSpentTriggered (unrecognized commit, option_simple_close)", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
bobClose(f)
|
||||
alice ! WatchFundingSpentTriggered(Transaction(0, Nil, Nil, 0))
|
||||
alice2blockchain.expectNoMessage(100 millis)
|
||||
assert(alice.stateName == NEGOTIATING_SIMPLE)
|
||||
}
|
||||
|
||||
test("recv CMD_CLOSE") { f =>
|
||||
import f._
|
||||
bobClose(f)
|
||||
|
@ -573,12 +880,71 @@ class NegotiatingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike
|
|||
awaitCond(bob.stateName == CLOSING)
|
||||
}
|
||||
|
||||
test("recv WatchFundingSpentTriggered (unrecognized commit)") { f =>
|
||||
test("recv CMD_CLOSE with RBF feerate too low", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
bobClose(f)
|
||||
alice ! WatchFundingSpentTriggered(Transaction(0, Nil, Nil, 0))
|
||||
alice2blockchain.expectNoMessage(100 millis)
|
||||
assert(alice.stateName == NEGOTIATING)
|
||||
|
||||
alice.setBitcoinCoreFeerates(buildFeerates(FeeratePerKw(500 sat)))
|
||||
aliceClose(f)
|
||||
alice2bob.expectMsgType[ClosingComplete]
|
||||
alice2bob.forward(bob)
|
||||
bob2alice.expectMsgType[ClosingComplete] // ignored
|
||||
val bobClosingSig = bob2alice.expectMsgType[ClosingSig]
|
||||
bob2alice.forward(alice, bobClosingSig)
|
||||
|
||||
val probe = TestProbe()
|
||||
alice ! CMD_CLOSE(probe.ref, None, Some(ClosingFeerates(FeeratePerKw(450 sat), FeeratePerKw(450 sat), FeeratePerKw(450 sat))))
|
||||
probe.expectMsgType[RES_FAILURE[CMD_CLOSE, InvalidRbfFeerate]]
|
||||
alice ! CMD_CLOSE(probe.ref, None, Some(ClosingFeerates(FeeratePerKw(500 sat), FeeratePerKw(500 sat), FeeratePerKw(500 sat))))
|
||||
probe.expectMsgType[RES_SUCCESS[CMD_CLOSE]]
|
||||
}
|
||||
|
||||
test("receive INPUT_RESTORED", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
aliceClose(f)
|
||||
alice2bob.expectMsgType[ClosingComplete]
|
||||
alice2bob.forward(bob)
|
||||
val aliceTx = bob2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
bob2blockchain.expectWatchTxConfirmed(aliceTx.txid)
|
||||
bob2alice.expectMsgType[ClosingComplete]
|
||||
bob2alice.forward(alice)
|
||||
val bobTx = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
alice2blockchain.expectWatchTxConfirmed(bobTx.txid)
|
||||
alice2bob.expectMsgType[ClosingSig]
|
||||
alice2bob.forward(bob)
|
||||
bob2alice.expectMsgType[ClosingSig] // Alice doesn't receive Bob's closing_sig
|
||||
assert(bob2blockchain.expectMsgType[PublishFinalTx].tx.txid == bobTx.txid)
|
||||
bob2blockchain.expectWatchTxConfirmed(bobTx.txid)
|
||||
val aliceData = alice.underlyingActor.nodeParams.db.channels.getChannel(channelId(alice)).get
|
||||
val bobData = bob.underlyingActor.nodeParams.db.channels.getChannel(channelId(bob)).get
|
||||
|
||||
// Alice restarts before receiving Bob's closing_sig: she cannot publish her own closing transaction, but will
|
||||
// detect it when receiving it in her mempool (or in the blockchain).
|
||||
alice.setState(WAIT_FOR_INIT_INTERNAL, Nothing)
|
||||
alice ! INPUT_RESTORED(aliceData)
|
||||
alice2blockchain.expectMsgType[SetChannelId]
|
||||
alice2blockchain.expectMsgType[WatchFundingSpent]
|
||||
awaitCond(alice.stateName == OFFLINE)
|
||||
|
||||
// Alice's transaction (published by Bob) confirms.
|
||||
alice ! WatchFundingSpentTriggered(aliceTx)
|
||||
inside(alice2blockchain.expectMsgType[PublishFinalTx]) { p =>
|
||||
assert(p.tx.txid == aliceTx.txid)
|
||||
assert(p.fee > 0.sat)
|
||||
}
|
||||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == aliceTx.txid)
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(100), 3, aliceTx)
|
||||
awaitCond(alice.stateName == CLOSED)
|
||||
|
||||
// Bob restarts and detects that Alice's closing transaction is confirmed.
|
||||
bob.setState(WAIT_FOR_INIT_INTERNAL, Nothing)
|
||||
bob ! INPUT_RESTORED(bobData)
|
||||
bob2blockchain.expectMsgType[SetChannelId]
|
||||
bob2blockchain.expectMsgType[WatchFundingSpent]
|
||||
awaitCond(bob.stateName == OFFLINE)
|
||||
bob ! WatchFundingSpentTriggered(aliceTx)
|
||||
assert(bob2blockchain.expectMsgType[WatchTxConfirmed].txId == aliceTx.txid)
|
||||
bob ! WatchTxConfirmedTriggered(BlockHeight(100), 3, aliceTx)
|
||||
awaitCond(bob.stateName == CLOSED)
|
||||
}
|
||||
|
||||
test("recv Error") { f =>
|
||||
|
@ -593,4 +959,28 @@ class NegotiatingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike
|
|||
assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == tx.txid)
|
||||
}
|
||||
|
||||
test("recv Error (option_simple_close)", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
aliceClose(f)
|
||||
val closingComplete = alice2bob.expectMsgType[ClosingComplete]
|
||||
alice2bob.forward(bob, closingComplete)
|
||||
bob2alice.expectMsgType[ClosingComplete]
|
||||
val closingSig = bob2alice.expectMsgType[ClosingSig]
|
||||
bob2alice.forward(alice, closingSig)
|
||||
val closingTx = alice2blockchain.expectMsgType[PublishFinalTx].tx
|
||||
alice2blockchain.expectWatchTxConfirmed(closingTx.txid)
|
||||
assert(bob2blockchain.expectMsgType[PublishFinalTx].tx.txid == closingTx.txid)
|
||||
bob2blockchain.expectWatchTxConfirmed(closingTx.txid)
|
||||
|
||||
alice ! Error(ByteVector32.Zeroes, "oops")
|
||||
awaitCond(alice.stateName == CLOSING)
|
||||
assert(alice.stateData.asInstanceOf[DATA_CLOSING].mutualClosePublished.nonEmpty)
|
||||
alice2blockchain.expectNoMessage(100 millis) // we have a mutual close transaction, so we don't publish the commit tx
|
||||
|
||||
bob ! Error(ByteVector32.Zeroes, "oops")
|
||||
awaitCond(bob.stateName == CLOSING)
|
||||
assert(bob.stateData.asInstanceOf[DATA_CLOSING].mutualClosePublished.nonEmpty)
|
||||
bob2blockchain.expectNoMessage(100 millis) // we have a mutual close transaction, so we don't publish the commit tx
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -344,6 +344,18 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
awaitCond(alice.stateName == CLOSED)
|
||||
}
|
||||
|
||||
test("recv WatchTxConfirmedTriggered (mutual close, option_simple_close)", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
mutualClose(alice, bob, alice2bob, bob2alice, alice2blockchain, bob2blockchain)
|
||||
val mutualCloseTx = alice.stateData.asInstanceOf[DATA_NEGOTIATING_SIMPLE].publishedClosingTxs.last
|
||||
|
||||
alice ! WatchTxConfirmedTriggered(BlockHeight(0), 0, mutualCloseTx.tx)
|
||||
awaitCond(alice.stateName == CLOSED)
|
||||
|
||||
bob ! WatchTxConfirmedTriggered(BlockHeight(0), 0, mutualCloseTx.tx)
|
||||
awaitCond(bob.stateName == CLOSED)
|
||||
}
|
||||
|
||||
test("recv WatchFundingSpentTriggered (local commit)") { f =>
|
||||
import f._
|
||||
// an error occurs and alice publishes her commit tx
|
||||
|
@ -384,15 +396,15 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
channelUpdateListener.expectMsgType[LocalChannelDown]
|
||||
|
||||
// scenario 1: bob claims the htlc output from the commit tx using its preimage
|
||||
val claimHtlcSuccessFromCommitTx = Transaction(version = 0, txIn = TxIn(outPoint = OutPoint(randomTxId(), 0), signatureScript = ByteVector.empty, sequence = 0, witness = Scripts.witnessClaimHtlcSuccessFromCommitTx(Transactions.PlaceHolderSig, ra1, ByteVector.fill(130)(33))) :: Nil, txOut = Nil, lockTime = 0)
|
||||
alice ! WatchOutputSpentTriggered(claimHtlcSuccessFromCommitTx)
|
||||
val claimHtlcSuccessFromCommitTx = Transaction(version = 2, txIn = TxIn(outPoint = OutPoint(randomTxId(), 0), signatureScript = ByteVector.empty, sequence = 0, witness = Scripts.witnessClaimHtlcSuccessFromCommitTx(Transactions.PlaceHolderSig, ra1, ByteVector.fill(130)(33))) :: Nil, txOut = Nil, lockTime = 0)
|
||||
alice ! WatchOutputSpentTriggered(100_000 sat, claimHtlcSuccessFromCommitTx)
|
||||
val fulfill1 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFulfill]]
|
||||
assert(fulfill1.htlc == htlca1)
|
||||
assert(fulfill1.result.paymentPreimage == ra1)
|
||||
|
||||
// scenario 2: bob claims the htlc output from his own commit tx using its preimage (let's assume both parties had published their commitment tx)
|
||||
val claimHtlcSuccessTx = Transaction(version = 0, txIn = TxIn(outPoint = OutPoint(randomTxId(), 0), signatureScript = ByteVector.empty, sequence = 0, witness = Scripts.witnessHtlcSuccess(Transactions.PlaceHolderSig, Transactions.PlaceHolderSig, ra1, ByteVector.fill(130)(33), Transactions.DefaultCommitmentFormat)) :: Nil, txOut = Nil, lockTime = 0)
|
||||
alice ! WatchOutputSpentTriggered(claimHtlcSuccessTx)
|
||||
val claimHtlcSuccessTx = Transaction(version = 2, txIn = TxIn(outPoint = OutPoint(randomTxId(), 0), signatureScript = ByteVector.empty, sequence = 0, witness = Scripts.witnessHtlcSuccess(Transactions.PlaceHolderSig, Transactions.PlaceHolderSig, ra1, ByteVector.fill(130)(33), Transactions.DefaultCommitmentFormat)) :: Nil, txOut = Nil, lockTime = 0)
|
||||
alice ! WatchOutputSpentTriggered(100_000 sat, claimHtlcSuccessTx)
|
||||
val fulfill2 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFulfill]]
|
||||
assert(fulfill2.htlc == htlca1)
|
||||
assert(fulfill2.result.paymentPreimage == ra1)
|
||||
|
@ -852,6 +864,18 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
inside(listener.expectMsgType[LocalChannelUpdate]) { u => assert(!u.channelUpdate.channelFlags.isEnabled) }
|
||||
}
|
||||
|
||||
test("recv WatchFundingSpentTriggered (remote commit, option_simple_close)", Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
mutualClose(alice, bob, alice2bob, bob2alice, alice2blockchain, bob2blockchain)
|
||||
// Bob publishes his last current commit tx, the one it had when entering NEGOTIATING state.
|
||||
val bobCommitTx = bobCommitTxs.last.commitTx.tx
|
||||
val closingState = remoteClose(bobCommitTx, alice, alice2blockchain)
|
||||
assert(closingState.claimHtlcTxs.isEmpty)
|
||||
val txPublished = txListener.expectMsgType[TransactionPublished]
|
||||
assert(txPublished.tx == bobCommitTx)
|
||||
assert(txPublished.miningFee > 0.sat) // alice is funder, she pays the fee for the remote commit
|
||||
}
|
||||
|
||||
test("recv CMD_BUMP_FORCE_CLOSE_FEE (remote commit)", Tag(ChannelStateTestsTags.AnchorOutputsZeroFeeHtlcTxs)) { f =>
|
||||
import f._
|
||||
|
||||
|
@ -897,10 +921,10 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
awaitCond(alice.stateName == CLOSED)
|
||||
}
|
||||
|
||||
test("recv WatchTxConfirmedTriggered (remote commit, option_static_remotekey)", Tag(ChannelStateTestsTags.StaticRemoteKey)) { f =>
|
||||
test("recv WatchTxConfirmedTriggered (remote commit, option_static_remotekey)", Tag(ChannelStateTestsTags.StaticRemoteKey), Tag(ChannelStateTestsTags.SimpleClose)) { f =>
|
||||
import f._
|
||||
mutualClose(alice, bob, alice2bob, bob2alice, alice2blockchain, bob2blockchain)
|
||||
assert(alice.stateData.asInstanceOf[DATA_CLOSING].commitments.params.channelFeatures == ChannelFeatures(Features.StaticRemoteKey))
|
||||
assert(alice.stateData.asInstanceOf[DATA_NEGOTIATING_SIMPLE].commitments.params.channelFeatures == ChannelFeatures(Features.StaticRemoteKey))
|
||||
// bob publishes his last current commit tx, the one it had when entering NEGOTIATING state
|
||||
val bobCommitTx = bobCommitTxs.last.commitTx.tx
|
||||
assert(bobCommitTx.txOut.size == 2) // two main outputs
|
||||
|
@ -1627,7 +1651,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
assert(bobOutpoints.size == 2)
|
||||
|
||||
// alice reacts by publishing penalty txs that spend bob's htlc transactions
|
||||
alice ! WatchOutputSpentTriggered(bobHtlcSuccessTx1.tx)
|
||||
alice ! WatchOutputSpentTriggered(bobHtlcSuccessTx1.amountIn, bobHtlcSuccessTx1.tx)
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].revokedCommitPublished.head.claimHtlcDelayedPenaltyTxs.size == 1)
|
||||
val claimHtlcSuccessPenalty1 = alice.stateData.asInstanceOf[DATA_CLOSING].revokedCommitPublished.head.claimHtlcDelayedPenaltyTxs.last
|
||||
Transaction.correctlySpends(claimHtlcSuccessPenalty1.tx, bobHtlcSuccessTx1.tx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
|
||||
|
@ -1638,7 +1662,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
assert(watchSpent1.outputIndex == claimHtlcSuccessPenalty1.input.outPoint.index)
|
||||
alice2blockchain.expectNoMessage(1 second)
|
||||
|
||||
alice ! WatchOutputSpentTriggered(bobHtlcTimeoutTx.tx)
|
||||
alice ! WatchOutputSpentTriggered(bobHtlcTimeoutTx.amountIn, bobHtlcTimeoutTx.tx)
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].revokedCommitPublished.head.claimHtlcDelayedPenaltyTxs.size == 2)
|
||||
val claimHtlcTimeoutPenalty = alice.stateData.asInstanceOf[DATA_CLOSING].revokedCommitPublished.head.claimHtlcDelayedPenaltyTxs.last
|
||||
Transaction.correctlySpends(claimHtlcTimeoutPenalty.tx, bobHtlcTimeoutTx.tx :: Nil, ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS)
|
||||
|
@ -1652,7 +1676,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
// bob RBFs his htlc-success with a different transaction
|
||||
val bobHtlcSuccessTx2 = bobHtlcSuccessTx1.tx.copy(txIn = TxIn(OutPoint(randomTxId(), 0), Nil, 0) +: bobHtlcSuccessTx1.tx.txIn)
|
||||
assert(bobHtlcSuccessTx2.txid !== bobHtlcSuccessTx1.tx.txid)
|
||||
alice ! WatchOutputSpentTriggered(bobHtlcSuccessTx2)
|
||||
alice ! WatchOutputSpentTriggered(bobHtlcSuccessTx1.amountIn, bobHtlcSuccessTx2)
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].revokedCommitPublished.head.claimHtlcDelayedPenaltyTxs.size == 3)
|
||||
val claimHtlcSuccessPenalty2 = alice.stateData.asInstanceOf[DATA_CLOSING].revokedCommitPublished.head.claimHtlcDelayedPenaltyTxs.last
|
||||
assert(claimHtlcSuccessPenalty1.tx.txid != claimHtlcSuccessPenalty2.tx.txid)
|
||||
|
@ -1749,7 +1773,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
)
|
||||
|
||||
// alice reacts by publishing penalty txs that spend bob's htlc transaction
|
||||
alice ! WatchOutputSpentTriggered(bobHtlcTx)
|
||||
alice ! WatchOutputSpentTriggered(bobHtlcTxs(0).amountIn, bobHtlcTx)
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].revokedCommitPublished.head.claimHtlcDelayedPenaltyTxs.size == 4)
|
||||
val claimHtlcDelayedPenaltyTxs = alice.stateData.asInstanceOf[DATA_CLOSING].revokedCommitPublished.head.claimHtlcDelayedPenaltyTxs
|
||||
val spentOutpoints = Set(OutPoint(bobHtlcTx, 1), OutPoint(bobHtlcTx, 2), OutPoint(bobHtlcTx, 3), OutPoint(bobHtlcTx, 4))
|
||||
|
|
|
@ -262,12 +262,12 @@ abstract class ChannelIntegrationSpec extends IntegrationSpec {
|
|||
val bitcoinClient = new BitcoinCoreClient(bitcoinrpcclient)
|
||||
waitForTxBroadcastOrConfirmed(localCommit.commitTx.txid, bitcoinClient, sender)
|
||||
// we generate a few blocks to get the commit tx confirmed
|
||||
generateBlocks(3, Some(minerAddress))
|
||||
generateBlocks(6, Some(minerAddress))
|
||||
// we wait until the htlc-timeout has been broadcast
|
||||
assert(localCommit.htlcTxs.size == 1)
|
||||
waitForOutputSpent(localCommit.htlcTxs.keys.head, bitcoinClient, sender)
|
||||
// we generate more blocks for the htlc-timeout to reach enough confirmations
|
||||
generateBlocks(3, Some(minerAddress))
|
||||
generateBlocks(6, Some(minerAddress))
|
||||
// this will fail the htlc
|
||||
val failed = paymentSender.expectMsgType[PaymentFailed](max = 60 seconds)
|
||||
assert(failed.id == paymentId)
|
||||
|
@ -323,7 +323,7 @@ abstract class ChannelIntegrationSpec extends IntegrationSpec {
|
|||
assert(remoteCommit.claimHtlcTxs.size == 1)
|
||||
waitForOutputSpent(remoteCommit.claimHtlcTxs.keys.head, bitcoinClient, sender)
|
||||
// and we generate blocks for the claim-htlc-timeout to reach enough confirmations
|
||||
generateBlocks(3, Some(minerAddress))
|
||||
generateBlocks(6, Some(minerAddress))
|
||||
// this will fail the htlc
|
||||
val failed = paymentSender.expectMsgType[PaymentFailed](max = 60 seconds)
|
||||
assert(failed.id == paymentId)
|
||||
|
@ -491,7 +491,7 @@ class StandardChannelIntegrationSpec extends ChannelIntegrationSpec {
|
|||
}
|
||||
|
||||
test("open a wumbo channel C <-> F, wait for longer than the default min_depth, then close") {
|
||||
// we open a 5BTC channel and check that we scale `min_depth` up to 13 confirmations
|
||||
// we open a 5BTC channel and check that we scale `min_depth` up to 17 confirmations
|
||||
val funder = nodes("C")
|
||||
val fundee = nodes("F")
|
||||
val tempChannelId = connect(funder, fundee, 5 btc, 100000000000L msat).channelId
|
||||
|
@ -510,9 +510,8 @@ class StandardChannelIntegrationSpec extends ChannelIntegrationSpec {
|
|||
sender.expectMsgType[RES_GET_CHANNEL_STATE].state == WAIT_FOR_CHANNEL_READY
|
||||
})
|
||||
|
||||
generateBlocks(2)
|
||||
|
||||
// after 8 blocks the fundee is still waiting for more confirmations
|
||||
generateBlocks(2)
|
||||
fundee.register ! Register.Forward(sender.ref.toTyped[Any], channelId, CMD_GET_CHANNEL_STATE(ActorRef.noSender))
|
||||
assert(sender.expectMsgType[RES_GET_CHANNEL_STATE].state == WAIT_FOR_FUNDING_CONFIRMED)
|
||||
|
||||
|
@ -576,18 +575,22 @@ class StandardChannelIntegrationSpec extends ChannelIntegrationSpec {
|
|||
fundee.register ! Register.Forward(sender.ref.toTyped[Any], channelId, CMD_CLOSE(sender.ref, None, None))
|
||||
sender.expectMsgType[RES_SUCCESS[CMD_CLOSE]]
|
||||
// we then wait for C and F to negotiate the closing fee
|
||||
awaitCond(stateListener.expectMsgType[ChannelStateChanged](max = 60 seconds).currentState == CLOSING, max = 60 seconds)
|
||||
awaitCond(stateListener.expectMsgType[ChannelStateChanged](max = 60 seconds).currentState == NEGOTIATING_SIMPLE, max = 60 seconds)
|
||||
// and close the channel
|
||||
val bitcoinClient = new BitcoinCoreClient(bitcoinrpcclient)
|
||||
awaitCond({
|
||||
bitcoinClient.getMempool().pipeTo(sender.ref)
|
||||
sender.expectMsgType[Seq[Transaction]].exists(_.txIn.head.outPoint.txid == fundingOutpoint.txid)
|
||||
}, max = 20 seconds, interval = 1 second)
|
||||
// we generate more blocks than the default min depth, but are still waiting for more confirmations
|
||||
generateBlocks(10)
|
||||
stateListener.expectNoMessage(100 millis)
|
||||
|
||||
// we generate enough blocks for the channel to be deeply confirmed
|
||||
generateBlocks(12)
|
||||
generateBlocks(10)
|
||||
awaitCond(stateListener.expectMsgType[ChannelStateChanged](max = 60 seconds).currentState == CLOSED, max = 60 seconds)
|
||||
|
||||
bitcoinClient.lookForSpendingTx(None, fundingOutpoint.txid, fundingOutpoint.index.toInt, limit = 12).pipeTo(sender.ref)
|
||||
bitcoinClient.lookForSpendingTx(None, fundingOutpoint.txid, fundingOutpoint.index.toInt, limit = 25).pipeTo(sender.ref)
|
||||
val closingTx = sender.expectMsgType[Transaction]
|
||||
assert(closingTx.txOut.map(_.publicKeyScript).toSet == Set(finalPubKeyScriptC, finalPubKeyScriptF))
|
||||
awaitAnnouncements(1)
|
||||
|
|
|
@ -19,7 +19,7 @@ package fr.acinq.eclair.transactions
|
|||
import fr.acinq.bitcoin.SigHash._
|
||||
import fr.acinq.bitcoin.scalacompat.Crypto.{PrivateKey, ripemd160, sha256}
|
||||
import fr.acinq.bitcoin.scalacompat.Script.{pay2wpkh, pay2wsh, write}
|
||||
import fr.acinq.bitcoin.scalacompat.{Btc, ByteVector32, Crypto, MilliBtc, MilliBtcDouble, OutPoint, Protocol, Satoshi, SatoshiLong, Script, ScriptWitness, Transaction, TxId, TxIn, TxOut, millibtc2satoshi}
|
||||
import fr.acinq.bitcoin.scalacompat.{Btc, ByteVector32, Crypto, MilliBtc, MilliBtcDouble, OP_PUSHDATA, OP_RETURN, OutPoint, Protocol, Satoshi, SatoshiLong, Script, ScriptWitness, Transaction, TxId, TxIn, TxOut, millibtc2satoshi}
|
||||
import fr.acinq.eclair.TestUtils.randomTxId
|
||||
import fr.acinq.eclair._
|
||||
import fr.acinq.eclair.blockchain.fee.{ConfirmationTarget, FeeratePerKw}
|
||||
|
@ -828,6 +828,56 @@ class TransactionsSpec extends AnyFunSuite with Logging {
|
|||
val toRemoteIndex = (toLocal.index + 1) % 2
|
||||
assert(closingTx.tx.txOut(toRemoteIndex.toInt).amount == 250_000.sat)
|
||||
}
|
||||
{
|
||||
// Different amounts, both outputs untrimmed, local is closer (option_simple_close):
|
||||
val spec = CommitmentSpec(Set.empty, feeratePerKw, 150_000_000 msat, 250_000_000 msat)
|
||||
val closingTxs = makeSimpleClosingTxs(commitInput, spec, SimpleClosingTxFee.PaidByUs(5_000 sat), 0, localPubKeyScript, remotePubKeyScript)
|
||||
assert(closingTxs.localAndRemote_opt.nonEmpty)
|
||||
assert(closingTxs.localOnly_opt.nonEmpty)
|
||||
assert(closingTxs.remoteOnly_opt.isEmpty)
|
||||
val localAndRemote = closingTxs.localAndRemote_opt.flatMap(_.toLocalOutput).get
|
||||
assert(localAndRemote.publicKeyScript == localPubKeyScript)
|
||||
assert(localAndRemote.amount == 145_000.sat)
|
||||
val localOnly = closingTxs.localOnly_opt.flatMap(_.toLocalOutput).get
|
||||
assert(localOnly.publicKeyScript == localPubKeyScript)
|
||||
assert(localOnly.amount == 145_000.sat)
|
||||
}
|
||||
{
|
||||
// Remote is using OP_RETURN (option_simple_close): we set their output amount to 0 sat.
|
||||
val spec = CommitmentSpec(Set.empty, feeratePerKw, 150_000_000 msat, 1_500_000 msat)
|
||||
val remotePubKeyScript = Script.write(OP_RETURN :: OP_PUSHDATA(hex"deadbeef") :: Nil)
|
||||
val closingTxs = makeSimpleClosingTxs(commitInput, spec, SimpleClosingTxFee.PaidByUs(5_000 sat), 0, localPubKeyScript, remotePubKeyScript)
|
||||
assert(closingTxs.localAndRemote_opt.nonEmpty)
|
||||
assert(closingTxs.localOnly_opt.nonEmpty)
|
||||
assert(closingTxs.remoteOnly_opt.isEmpty)
|
||||
val localAndRemote = closingTxs.localAndRemote_opt.flatMap(_.toLocalOutput).get
|
||||
assert(localAndRemote.publicKeyScript == localPubKeyScript)
|
||||
assert(localAndRemote.amount == 145_000.sat)
|
||||
val remoteOutput = closingTxs.localAndRemote_opt.get.tx.txOut((localAndRemote.index.toInt + 1) % 2)
|
||||
assert(remoteOutput.amount == 0.sat)
|
||||
assert(remoteOutput.publicKeyScript == remotePubKeyScript)
|
||||
val localOnly = closingTxs.localOnly_opt.flatMap(_.toLocalOutput).get
|
||||
assert(localOnly.publicKeyScript == localPubKeyScript)
|
||||
assert(localOnly.amount == 145_000.sat)
|
||||
}
|
||||
{
|
||||
// Remote is using OP_RETURN (option_simple_close) and paying the fees: we set their output amount to 0 sat.
|
||||
val spec = CommitmentSpec(Set.empty, feeratePerKw, 150_000_000 msat, 10_000_000 msat)
|
||||
val remotePubKeyScript = Script.write(OP_RETURN :: OP_PUSHDATA(hex"deadbeef") :: Nil)
|
||||
val closingTxs = makeSimpleClosingTxs(commitInput, spec, SimpleClosingTxFee.PaidByThem(5_000 sat), 0, localPubKeyScript, remotePubKeyScript)
|
||||
assert(closingTxs.localAndRemote_opt.nonEmpty)
|
||||
assert(closingTxs.localOnly_opt.nonEmpty)
|
||||
assert(closingTxs.remoteOnly_opt.isEmpty)
|
||||
val localAndRemote = closingTxs.localAndRemote_opt.flatMap(_.toLocalOutput).get
|
||||
assert(localAndRemote.publicKeyScript == localPubKeyScript)
|
||||
assert(localAndRemote.amount == 150_000.sat)
|
||||
val remoteOutput = closingTxs.localAndRemote_opt.get.tx.txOut((localAndRemote.index.toInt + 1) % 2)
|
||||
assert(remoteOutput.amount == 0.sat)
|
||||
assert(remoteOutput.publicKeyScript == remotePubKeyScript)
|
||||
val localOnly = closingTxs.localOnly_opt.flatMap(_.toLocalOutput).get
|
||||
assert(localOnly.publicKeyScript == localPubKeyScript)
|
||||
assert(localOnly.amount == 150_000.sat)
|
||||
}
|
||||
{
|
||||
// Same amounts, both outputs untrimmed, local is fundee:
|
||||
val spec = CommitmentSpec(Set.empty, feeratePerKw, 150_000_000 msat, 150_000_000 msat)
|
||||
|
@ -851,6 +901,29 @@ class TransactionsSpec extends AnyFunSuite with Logging {
|
|||
assert(toLocal.amount == 150_000.sat)
|
||||
assert(toLocal.index == 0)
|
||||
}
|
||||
{
|
||||
// Their output is trimmed (option_simple_close):
|
||||
val spec = CommitmentSpec(Set.empty, feeratePerKw, 150_000_000 msat, 1_000_000 msat)
|
||||
val closingTxs = makeSimpleClosingTxs(commitInput, spec, SimpleClosingTxFee.PaidByThem(800 sat), 0, localPubKeyScript, remotePubKeyScript)
|
||||
assert(closingTxs.all.size == 1)
|
||||
assert(closingTxs.localOnly_opt.nonEmpty)
|
||||
val toLocal = closingTxs.localOnly_opt.flatMap(_.toLocalOutput).get
|
||||
assert(toLocal.publicKeyScript == localPubKeyScript)
|
||||
assert(toLocal.amount == 150_000.sat)
|
||||
assert(toLocal.index == 0)
|
||||
}
|
||||
{
|
||||
// Their OP_RETURN output is trimmed (option_simple_close):
|
||||
val spec = CommitmentSpec(Set.empty, feeratePerKw, 150_000_000 msat, 1_000_000 msat)
|
||||
val remotePubKeyScript = Script.write(OP_RETURN :: OP_PUSHDATA(hex"deadbeef") :: Nil)
|
||||
val closingTxs = makeSimpleClosingTxs(commitInput, spec, SimpleClosingTxFee.PaidByThem(1_001 sat), 0, localPubKeyScript, remotePubKeyScript)
|
||||
assert(closingTxs.all.size == 1)
|
||||
assert(closingTxs.localOnly_opt.nonEmpty)
|
||||
val toLocal = closingTxs.localOnly_opt.flatMap(_.toLocalOutput).get
|
||||
assert(toLocal.publicKeyScript == localPubKeyScript)
|
||||
assert(toLocal.amount == 150_000.sat)
|
||||
assert(toLocal.index == 0)
|
||||
}
|
||||
{
|
||||
// Our output is trimmed:
|
||||
val spec = CommitmentSpec(Set.empty, feeratePerKw, 50_000 msat, 150_000_000 msat)
|
||||
|
@ -858,6 +931,14 @@ class TransactionsSpec extends AnyFunSuite with Logging {
|
|||
assert(closingTx.tx.txOut.length == 1)
|
||||
assert(closingTx.toLocalOutput.isEmpty)
|
||||
}
|
||||
{
|
||||
// Our output is trimmed (option_simple_close):
|
||||
val spec = CommitmentSpec(Set.empty, feeratePerKw, 1_000_000 msat, 150_000_000 msat)
|
||||
val closingTxs = makeSimpleClosingTxs(commitInput, spec, SimpleClosingTxFee.PaidByUs(800 sat), 0, localPubKeyScript, remotePubKeyScript)
|
||||
assert(closingTxs.all.size == 1)
|
||||
assert(closingTxs.remoteOnly_opt.nonEmpty)
|
||||
assert(closingTxs.remoteOnly_opt.flatMap(_.toLocalOutput).isEmpty)
|
||||
}
|
||||
{
|
||||
// Both outputs are trimmed:
|
||||
val spec = CommitmentSpec(Set.empty, feeratePerKw, 50_000 msat, 10_000 msat)
|
||||
|
|
|
@ -519,6 +519,33 @@ class LightningMessageCodecsSpec extends AnyFunSuite {
|
|||
}
|
||||
}
|
||||
|
||||
test("encode/decode closing messages") {
|
||||
val channelId = ByteVector32(hex"58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86")
|
||||
val sig1 = ByteVector64(hex"01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101")
|
||||
val sig2 = ByteVector64(hex"02020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202")
|
||||
val sig3 = ByteVector64(hex"03030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303")
|
||||
val closerScript = hex"deadbeef"
|
||||
val closeeScript = hex"d43db3ef1234"
|
||||
val testCases = Seq(
|
||||
hex"0028 58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86 0004deadbeef 0006d43db3ef1234 0000000000000451 00000000" -> ClosingComplete(channelId, closerScript, closeeScript, 1105 sat, 0),
|
||||
hex"0028 58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86 0004deadbeef 0006d43db3ef1234 0000000000000451 000c96a8 024001010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101" -> ClosingComplete(channelId, closerScript, closeeScript, 1105 sat, 825_000, TlvStream(ClosingTlv.CloseeOutputOnly(sig1))),
|
||||
hex"0028 58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86 0004deadbeef 0006d43db3ef1234 0000000000000451 00000000 034001010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101" -> ClosingComplete(channelId, closerScript, closeeScript, 1105 sat, 0, TlvStream(ClosingTlv.CloserAndCloseeOutputs(sig1))),
|
||||
hex"0028 58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86 0004deadbeef 0006d43db3ef1234 0000000000000451 00000000 014001010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101 034002020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202" -> ClosingComplete(channelId, closerScript, closeeScript, 1105 sat, 0, TlvStream(ClosingTlv.CloserOutputOnly(sig1), ClosingTlv.CloserAndCloseeOutputs(sig2))),
|
||||
hex"0028 58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86 0004deadbeef 0006d43db3ef1234 0000000000000451 00000000 014001010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101 024002020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202 034003030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303" -> ClosingComplete(channelId, closerScript, closeeScript, 1105 sat, 0, TlvStream(ClosingTlv.CloserOutputOnly(sig1), ClosingTlv.CloseeOutputOnly(sig2), ClosingTlv.CloserAndCloseeOutputs(sig3))),
|
||||
hex"0029 58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86 0004deadbeef 0006d43db3ef1234 0000000000000451 00000000" -> ClosingSig(channelId, closerScript, closeeScript, 1105 sat, 0),
|
||||
hex"0029 58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86 0004deadbeef 0006d43db3ef1234 0000000000000451 00000000 024001010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101" -> ClosingSig(channelId, closerScript, closeeScript, 1105 sat, 0, TlvStream(ClosingTlv.CloseeOutputOnly(sig1))),
|
||||
hex"0029 58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86 0004deadbeef 0006d43db3ef1234 0000000000000451 00000000 034001010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101" -> ClosingSig(channelId, closerScript, closeeScript, 1105 sat, 0, TlvStream(ClosingTlv.CloserAndCloseeOutputs(sig1))),
|
||||
hex"0029 58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86 0004deadbeef 0006d43db3ef1234 0000000000000451 00000000 014001010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101 034002020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202" -> ClosingSig(channelId, closerScript, closeeScript, 1105 sat, 0, TlvStream(ClosingTlv.CloserOutputOnly(sig1), ClosingTlv.CloserAndCloseeOutputs(sig2))),
|
||||
hex"0029 58a00a6f14e69a2e97b18cf76f755c8551fea9947cf7b6ece9d641013eba5f86 0004deadbeef 0006d43db3ef1234 0000000000000451 00000000 014001010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101 024002020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202 034003030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303" -> ClosingSig(channelId, closerScript, closeeScript, 1105 sat, 0, TlvStream(ClosingTlv.CloserOutputOnly(sig1), ClosingTlv.CloseeOutputOnly(sig2), ClosingTlv.CloserAndCloseeOutputs(sig3))),
|
||||
)
|
||||
for ((encoded, expected) <- testCases) {
|
||||
val decoded = lightningMessageCodec.decode(encoded.bits).require.value
|
||||
assert(decoded == expected)
|
||||
val reEncoded = lightningMessageCodec.encode(expected).require.bytes
|
||||
assert(reEncoded == encoded)
|
||||
}
|
||||
}
|
||||
|
||||
test("encode/decode all channel messages") {
|
||||
val unknownTlv = GenericTlv(UInt64(5), ByteVector.fromValidHex("deadbeef"))
|
||||
val msgs = List(
|
||||
|
|
Loading…
Add table
Reference in a new issue