mirror of
https://github.com/ACINQ/eclair.git
synced 2025-03-12 10:30:45 +01:00
Add support for your_last_funding_locked and my_current_funding_locked tlvs in channel_reestablish (#3007)
When the `my_current_funding_locked_txid` TLV attribute confirms the latest funding tx we prune previous funding transaction similarly to receiving `splice_locked` from our peer for that txid. When we receive `your_last_funding_locked_txid` that does not match our latest confirmed funding tx, then we know our peer did not receive our last `splice_locked` and retransmit it. Doing the same for `channel_ready` will be handled in a follow-up PR. For public channels, nodes also retransmit `splice_locked` after `channel_reestablish` if they have not received `announcement_signatures` for the latest confirmed funding tx. This is needed to prompt our peer to also retransmit their own `splice_locked` and `announcement_signatures`. For public channels, nodes respond to `splice_locked` with their own `splice_locked` if they have not already sent it since the last `channel_reestablish` - this to prevents exchanging an endless loop of `splice_locked` messages. These changes ensure nodes have exchanged `splice_locked` (and `announcement_signatures` for public channels) after a disconnect and will be relevant for simple taproot channels to exchange nonces. If the `your_last_funding_locked` tlv is not set then nodes always send `splice_locked` on reconnect to preserve previous behavior for retransmitting `splice_locked`. Note: Previous behavior was susceptible to a race condition if one node sent a channel update after `channel_reestablish`, but before receiving `splice_locked` from a peer that had confirmed the latest funding tx while offline. cf. https://github.com/lightning/bolts/issues/1223 When reconnecting in the Normal state, if a legacy channel does not have its latest remote funding status set to `Locked`, we set and store it to migrate older channels. After reconnecting in other states, the remote funding status will be set to `Locked` and stored when receiving `channel_ready` or deferred if the node is still waiting for the funding tx to be confirmed locally.
This commit is contained in:
parent
f6b051cf73
commit
21917f55dd
11 changed files with 401 additions and 60 deletions
|
@ -873,6 +873,9 @@ case class Commitments(params: ChannelParams,
|
|||
// We always use the last commitment that was created, to make sure we never go back in time.
|
||||
val latest = FullCommitment(params, changes, active.head.fundingTxIndex, active.head.firstRemoteCommitIndex, active.head.remoteFundingPubKey, active.head.localFundingStatus, active.head.remoteFundingStatus, active.head.localCommit, active.head.remoteCommit, active.head.nextRemoteCommit_opt)
|
||||
|
||||
val lastLocalLocked_opt: Option[Commitment] = active.filter(_.localFundingStatus.isInstanceOf[LocalFundingStatus.Locked]).sortBy(_.fundingTxIndex).lastOption
|
||||
val lastRemoteLocked_opt: Option[Commitment] = active.filter(c => c.remoteFundingStatus == RemoteFundingStatus.Locked).sortBy(_.fundingTxIndex).lastOption
|
||||
|
||||
def add(commitment: Commitment): Commitments = copy(active = commitment +: active)
|
||||
|
||||
// @formatter:off
|
||||
|
@ -1270,8 +1273,6 @@ case class Commitments(params: ChannelParams,
|
|||
// This ensures that we only have to send splice_locked for the latest commitment instead of sending it for every commitment.
|
||||
// A side-effect is that previous commitments that are implicitly locked don't necessarily have their status correctly set.
|
||||
// That's why we look at locked commitments separately and then select the one with the oldest fundingTxIndex.
|
||||
val lastLocalLocked_opt = active.find(_.localFundingStatus.isInstanceOf[LocalFundingStatus.Locked])
|
||||
val lastRemoteLocked_opt = active.find(_.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
val lastLocked_opt = (lastLocalLocked_opt, lastRemoteLocked_opt) match {
|
||||
// We select the locked commitment with the smaller value for fundingTxIndex, but both have to be defined.
|
||||
// If both have the same fundingTxIndex, they must actually be the same commitment, because:
|
||||
|
@ -1280,13 +1281,13 @@ case class Commitments(params: ChannelParams,
|
|||
// - we don't allow creating a splice on top of an unconfirmed transaction that has RBF attempts (because it
|
||||
// would become invalid if another of the RBF attempts end up being confirmed)
|
||||
case (Some(lastLocalLocked), Some(lastRemoteLocked)) => Some(Seq(lastLocalLocked, lastRemoteLocked).minBy(_.fundingTxIndex))
|
||||
// Special case for the initial funding tx, we only require a local lock because channel_ready doesn't explicitly reference a funding tx.
|
||||
// Special case for the initial funding tx, we only require a local lock because our peer may have never sent channel_ready.
|
||||
case (Some(lastLocalLocked), None) if lastLocalLocked.fundingTxIndex == 0 => Some(lastLocalLocked)
|
||||
case _ => None
|
||||
}
|
||||
lastLocked_opt match {
|
||||
case Some(lastLocked) =>
|
||||
// all commitments older than this one are inactive
|
||||
// All commitments older than this one, and RBF alternatives, become inactive.
|
||||
val inactive1 = active.filter(c => c.fundingTxId != lastLocked.fundingTxId && c.fundingTxIndex <= lastLocked.fundingTxIndex)
|
||||
inactive1.foreach(c => log.info("deactivating commitment fundingTxIndex={} fundingTxId={}", c.fundingTxIndex, c.fundingTxId))
|
||||
copy(
|
||||
|
|
|
@ -222,6 +222,8 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
var announcementSigsStash = Map.empty[RealShortChannelId, AnnouncementSignatures]
|
||||
// we record the announcement_signatures messages we already sent to avoid unnecessary retransmission
|
||||
var announcementSigsSent = Set.empty[RealShortChannelId]
|
||||
// we keep track of the splice_locked we sent after channel_reestablish and it's funding tx index to avoid sending it again
|
||||
private var spliceLockedSent = Map.empty[TxId, Long]
|
||||
|
||||
private def trimAnnouncementSigsStashIfNeeded(): Unit = {
|
||||
if (announcementSigsStash.size >= 10) {
|
||||
|
@ -233,6 +235,17 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
}
|
||||
}
|
||||
|
||||
private def trimSpliceLockedSentIfNeeded(): Unit = {
|
||||
if (spliceLockedSent.size >= 10) {
|
||||
// We shouldn't store an unbounded number of splice_locked: on long-lived connections where we do a lot of splice
|
||||
// transactions, we only need to keep track of the most recent ones.
|
||||
val oldestFundingTxId = spliceLockedSent.toSeq
|
||||
.sortBy { case (_, fundingTxIndex) => fundingTxIndex }
|
||||
.map { case (fundingTxId, _) => fundingTxId }.head
|
||||
spliceLockedSent -= oldestFundingTxId
|
||||
}
|
||||
}
|
||||
|
||||
val txPublisher = txPublisherFactory.spawnTxPublisher(context, remoteNodeId)
|
||||
|
||||
// this will be used to detect htlc timeouts
|
||||
|
@ -775,10 +788,15 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
|
||||
case Event(c: CurrentFeerates.BitcoinCore, d: DATA_NORMAL) => handleCurrentFeerate(c, d)
|
||||
|
||||
case Event(_: ChannelReady, _: DATA_NORMAL) =>
|
||||
// This happens on reconnection, because channel_ready is sent again if the channel hasn't been used yet,
|
||||
// otherwise we cannot be sure that it was correctly received before disconnecting.
|
||||
stay()
|
||||
case Event(_: ChannelReady, d: DATA_NORMAL) =>
|
||||
// After a reconnection, if the channel hasn't been used yet, our peer cannot be sure we received their channel_ready
|
||||
// so they will resend it. Their remote funding status must also be set to Locked if it wasn't already.
|
||||
// NB: Their remote funding status will be stored when the commitment is next updated, or channel_ready will
|
||||
// be sent again if a reconnection occurs first.
|
||||
stay() using d.copy(commitments = d.commitments.copy(active = d.commitments.active.map {
|
||||
case c if c.fundingTxIndex == 0 => c.copy(remoteFundingStatus = RemoteFundingStatus.Locked)
|
||||
case c => c
|
||||
}))
|
||||
|
||||
// Channels are publicly announced if both parties want it: we ignore this message if we don't want to announce the channel.
|
||||
case Event(remoteAnnSigs: AnnouncementSignatures, d: DATA_NORMAL) if d.commitments.announceChannel =>
|
||||
|
@ -1341,11 +1359,13 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
case Event(w: WatchPublishedTriggered, d: DATA_NORMAL) =>
|
||||
val fundingStatus = LocalFundingStatus.ZeroconfPublishedFundingTx(w.tx, d.commitments.localFundingSigs(w.tx.txid), d.commitments.liquidityPurchase(w.tx.txid))
|
||||
d.commitments.updateLocalFundingStatus(w.tx.txid, fundingStatus, d.lastAnnouncedFundingTxId_opt) match {
|
||||
case Right((commitments1, _)) =>
|
||||
case Right((commitments1, commitment)) =>
|
||||
// This is a zero-conf channel, the min-depth isn't critical: we use the default.
|
||||
watchFundingConfirmed(w.tx.txid, Some(nodeParams.channelConf.minDepth), delay_opt = None)
|
||||
maybeEmitEventsPostSplice(d.aliases, d.commitments, commitments1, d.lastAnnouncement_opt)
|
||||
maybeUpdateMaxHtlcAmount(d.channelUpdate.htlcMaximumMsat, commitments1)
|
||||
spliceLockedSent += (commitment.fundingTxId -> commitment.fundingTxIndex)
|
||||
trimSpliceLockedSentIfNeeded()
|
||||
stay() using d.copy(commitments = commitments1) storing() sending SpliceLocked(d.channelId, w.tx.txid)
|
||||
case Left(_) => stay()
|
||||
}
|
||||
|
@ -1356,7 +1376,11 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
// We check if this commitment was already locked before receiving the event (which happens when using 0-conf
|
||||
// or for the initial funding transaction). If it was previously not locked, we must send splice_locked now.
|
||||
val previouslyNotLocked = d.commitments.all.exists(c => c.fundingTxId == commitment.fundingTxId && c.localFundingStatus.isInstanceOf[LocalFundingStatus.NotLocked])
|
||||
val spliceLocked_opt = if (previouslyNotLocked) Some(SpliceLocked(d.channelId, w.tx.txid)) else None
|
||||
val spliceLocked_opt = if (previouslyNotLocked) {
|
||||
spliceLockedSent += (commitment.fundingTxId -> commitment.fundingTxIndex)
|
||||
trimSpliceLockedSentIfNeeded()
|
||||
Some(SpliceLocked(d.channelId, w.tx.txid))
|
||||
} else None
|
||||
// If the channel is public and we've received the remote splice_locked, we send our announcement_signatures
|
||||
// in order to generate the channel_announcement.
|
||||
val remoteLocked = commitment.fundingTxIndex == 0 || d.commitments.all.exists(c => c.fundingTxId == commitment.fundingTxId && c.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
|
@ -1379,19 +1403,34 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
case Event(msg: SpliceLocked, d: DATA_NORMAL) =>
|
||||
d.commitments.updateRemoteFundingStatus(msg.fundingTxId, d.lastAnnouncedFundingTxId_opt) match {
|
||||
case Right((commitments1, commitment)) =>
|
||||
// If we have both already sent splice_locked for this commitment, then we are receiving splice_locked
|
||||
// again after a reconnection and must retransmit our splice_locked and new announcement_signatures. Nodes
|
||||
// retransmit splice_locked after a reconnection when they have received splice_locked but NOT matching signatures
|
||||
// before the last disconnect. If a matching splice_locked has already been sent since reconnecting, then do not
|
||||
// retransmit splice_locked to avoid a loop.
|
||||
// NB: It is important both nodes retransmit splice_locked after reconnecting to ensure new Taproot nonces
|
||||
// are exchanged for channel announcements.
|
||||
val isLatestLocked = d.commitments.lastLocalLocked_opt.exists(_.fundingTxId == msg.fundingTxId) && d.commitments.lastRemoteLocked_opt.exists(_.fundingTxId == msg.fundingTxId)
|
||||
val spliceLocked_opt = if (d.commitments.announceChannel && isLatestLocked && !spliceLockedSent.contains(commitment.fundingTxId)) {
|
||||
spliceLockedSent += (commitment.fundingTxId -> commitment.fundingTxIndex)
|
||||
trimSpliceLockedSentIfNeeded()
|
||||
Some(SpliceLocked(d.channelId, commitment.fundingTxId))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
// If the commitment is confirmed, we were waiting to receive the remote splice_locked before sending our announcement_signatures.
|
||||
val localAnnSigs_opt = if (d.commitments.announceChannel) commitment.signAnnouncement(nodeParams, commitments1.params) else None
|
||||
localAnnSigs_opt match {
|
||||
case Some(localAnnSigs) =>
|
||||
// The commitment was locked on our side and we were waiting to receive the remote splice_locked before sending our announcement_signatures.
|
||||
val localAnnSigs_opt = commitment.signAnnouncement(nodeParams, commitments1.params) match {
|
||||
case Some(localAnnSigs) if !announcementSigsSent.contains(localAnnSigs.shortChannelId) =>
|
||||
announcementSigsSent += localAnnSigs.shortChannelId
|
||||
// If we've already received the remote announcement_signatures, we're now ready to process them.
|
||||
announcementSigsStash.get(localAnnSigs.shortChannelId).foreach(self ! _)
|
||||
case None => // The channel is private or the commitment isn't locked on our side.
|
||||
Some(localAnnSigs)
|
||||
case Some(_) => None // We've already sent these announcement_signatures since the last reconnect.
|
||||
case None => None // The channel is private or the commitment isn't locked on our side.
|
||||
}
|
||||
maybeEmitEventsPostSplice(d.aliases, d.commitments, commitments1, d.lastAnnouncement_opt)
|
||||
maybeUpdateMaxHtlcAmount(d.channelUpdate.htlcMaximumMsat, commitments1)
|
||||
stay() using d.copy(commitments = commitments1) storing() sending localAnnSigs_opt.toSeq
|
||||
stay() using d.copy(commitments = commitments1) storing() sending spliceLocked_opt.toSeq ++ localAnnSigs_opt.toSeq
|
||||
case Left(_) => stay()
|
||||
}
|
||||
|
||||
|
@ -2235,13 +2274,17 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
}
|
||||
case _ => Set.empty
|
||||
}
|
||||
val lastFundingLockedTlvs: Set[ChannelReestablishTlv] =
|
||||
d.commitments.lastLocalLocked_opt.map(c => ChannelReestablishTlv.MyCurrentFundingLockedTlv(c.fundingTxId)).toSet ++
|
||||
d.commitments.lastRemoteLocked_opt.map(c => ChannelReestablishTlv.YourLastFundingLockedTlv(c.fundingTxId)).toSet
|
||||
|
||||
val channelReestablish = ChannelReestablish(
|
||||
channelId = d.channelId,
|
||||
nextLocalCommitmentNumber = d.commitments.localCommitIndex + 1,
|
||||
nextRemoteRevocationNumber = d.commitments.remoteCommitIndex,
|
||||
yourLastPerCommitmentSecret = PrivateKey(yourLastPerCommitmentSecret),
|
||||
myCurrentPerCommitmentPoint = myCurrentPerCommitmentPoint,
|
||||
tlvStream = TlvStream(rbfTlv)
|
||||
tlvStream = TlvStream(rbfTlv ++ lastFundingLockedTlvs)
|
||||
)
|
||||
// we update local/remote connection-local global/local features, we don't persist it right now
|
||||
val d1 = Helpers.updateFeatures(d, localInit, remoteInit)
|
||||
|
@ -2333,6 +2376,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
// re-send channel_ready if necessary
|
||||
if (d.commitments.latest.fundingTxIndex == 0 && channelReestablish.nextLocalCommitmentNumber == 1 && d.commitments.localCommitIndex == 0) {
|
||||
// If next_local_commitment_number is 1 in both the channel_reestablish it sent and received, then the node MUST retransmit channel_ready, otherwise it MUST NOT
|
||||
// TODO: when the remote node enables option_splice we can use your_last_funding_locked to detect they did not receive our channel_ready.
|
||||
log.debug("re-sending channelReady")
|
||||
val channelKeyPath = keyManager.keyPath(d.commitments.params.localParams, d.commitments.params.channelConfig)
|
||||
val nextPerCommitmentPoint = keyManager.commitmentPoint(channelKeyPath, 1)
|
||||
|
@ -2379,25 +2423,39 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
case None => d.spliceStatus
|
||||
}
|
||||
|
||||
// re-send splice_locked (must come *after* potentially retransmitting tx_signatures)
|
||||
// NB: there is a key difference between channel_ready and splice_confirmed:
|
||||
// - channel_ready: a non-zero commitment index implies that both sides have seen the channel_ready
|
||||
// - splice_confirmed: the commitment index can be updated as long as it is compatible with all splices, so
|
||||
// we must keep sending our most recent splice_locked at each reconnection
|
||||
val spliceLocked = d.commitments.active
|
||||
.filter(c => c.fundingTxIndex > 0) // only consider splice txs
|
||||
.collectFirst { case c if c.localFundingStatus.isInstanceOf[LocalFundingStatus.Locked] =>
|
||||
log.debug("re-sending splice_locked for fundingTxId={}", c.fundingTxId)
|
||||
SpliceLocked(d.channelId, c.fundingTxId)
|
||||
}
|
||||
sendQueue = sendQueue ++ spliceLocked
|
||||
// Prune previous funding transactions and RBF attempts if we already sent splice_locked for the last funding
|
||||
// transaction that is also locked by our counterparty; we either missed their splice_locked or it confirmed
|
||||
// while disconnected.
|
||||
val commitments1: Commitments = channelReestablish.myCurrentFundingLocked_opt
|
||||
.flatMap(remoteFundingTxLocked => d.commitments.updateRemoteFundingStatus(remoteFundingTxLocked, d.lastAnnouncedFundingTxId_opt).toOption.map(_._1))
|
||||
.getOrElse(d.commitments)
|
||||
// We then clean up unsigned updates that haven't been received before the disconnection.
|
||||
.discardUnsignedUpdates()
|
||||
|
||||
val spliceLocked_opt = commitments1.lastLocalLocked_opt match {
|
||||
case None => None
|
||||
// We only send splice_locked for splice transactions.
|
||||
case Some(c) if c.fundingTxIndex == 0 => None
|
||||
case Some(c) =>
|
||||
// If our peer has not received our splice_locked, we retransmit it.
|
||||
val notReceivedByRemote = !channelReestablish.yourLastFundingLocked_opt.contains(c.fundingTxId)
|
||||
// If this is a public channel and we haven't announced the splice, we retransmit our splice_locked and
|
||||
// will exchange announcement_signatures afterwards.
|
||||
val notAnnouncedYet = commitments1.announceChannel && d.lastAnnouncement_opt.forall(ann => !c.shortChannelId_opt.contains(ann.shortChannelId))
|
||||
if (notReceivedByRemote || notAnnouncedYet) {
|
||||
log.debug("re-sending splice_locked for fundingTxId={}", c.fundingTxId)
|
||||
spliceLockedSent += (c.fundingTxId -> c.fundingTxIndex)
|
||||
trimSpliceLockedSentIfNeeded()
|
||||
Some(SpliceLocked(d.channelId, c.fundingTxId))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
sendQueue = sendQueue ++ spliceLocked_opt.toSeq
|
||||
|
||||
// we may need to retransmit updates and/or commit_sig and/or revocation
|
||||
sendQueue = sendQueue ++ syncSuccess.retransmit
|
||||
|
||||
// then we clean up unsigned updates
|
||||
val commitments1 = d.commitments.discardUnsignedUpdates()
|
||||
|
||||
commitments1.remoteNextCommitInfo match {
|
||||
case Left(_) =>
|
||||
// we expect them to (re-)send the revocation immediately
|
||||
|
@ -2877,6 +2935,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
|
|||
sigStash = Nil
|
||||
announcementSigsStash = Map.empty
|
||||
announcementSigsSent = Set.empty
|
||||
spliceLockedSent = Map.empty[TxId, Long]
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -95,7 +95,7 @@ trait ChannelOpenDualFunded extends DualFundingHandlers with ErrorHandlers {
|
|||
| . |
|
||||
| . |
|
||||
WAIT_FOR_DUAL_FUNDING_LOCKED | | WAIT_FOR_DUAL_FUNDING_LOCKED
|
||||
| funding_locked funding_locked |
|
||||
| channel_ready channel_ready |
|
||||
|---------------- ---------------|
|
||||
| \/ |
|
||||
| /\ |
|
||||
|
|
|
@ -143,7 +143,14 @@ trait CommonFundingHandlers extends CommonHandlers {
|
|||
val initialChannelUpdate = Announcements.makeChannelUpdate(nodeParams, remoteNodeId, scidForChannelUpdate, commitments.params, relayFees, Helpers.maxHtlcAmount(nodeParams, commitments), enable = true)
|
||||
// We need to periodically re-send channel updates, otherwise channel will be considered stale and get pruned by network.
|
||||
context.system.scheduler.scheduleWithFixedDelay(initialDelay = REFRESH_CHANNEL_UPDATE_INTERVAL, delay = REFRESH_CHANNEL_UPDATE_INTERVAL, receiver = self, message = BroadcastChannelUpdate(PeriodicRefresh))
|
||||
val commitments1 = commitments.modify(_.remoteNextCommitInfo).setTo(Right(channelReady.nextPerCommitmentPoint))
|
||||
val commitments1 = commitments.copy(
|
||||
// Set the remote status for all initial funding commitments to Locked. If there are RBF attempts, only one can be confirmed locally.
|
||||
active = commitments.active.map {
|
||||
case c if c.fundingTxIndex == 0 => c.copy(remoteFundingStatus = RemoteFundingStatus.Locked)
|
||||
case c => c
|
||||
},
|
||||
remoteNextCommitInfo = Right(channelReady.nextPerCommitmentPoint)
|
||||
)
|
||||
peer ! ChannelReadyForPayments(self, remoteNodeId, commitments.channelId, fundingTxIndex = 0)
|
||||
DATA_NORMAL(commitments1, aliases1, None, initialChannelUpdate, None, None, None, SpliceStatus.NoSplice)
|
||||
}
|
||||
|
|
|
@ -235,13 +235,24 @@ sealed trait ChannelReestablishTlv extends Tlv
|
|||
object ChannelReestablishTlv {
|
||||
|
||||
case class NextFundingTlv(txId: TxId) extends ChannelReestablishTlv
|
||||
case class YourLastFundingLockedTlv(txId: TxId) extends ChannelReestablishTlv
|
||||
case class MyCurrentFundingLockedTlv(txId: TxId) extends ChannelReestablishTlv
|
||||
|
||||
object NextFundingTlv {
|
||||
val codec: Codec[NextFundingTlv] = tlvField(txIdAsHash)
|
||||
}
|
||||
|
||||
object YourLastFundingLockedTlv {
|
||||
val codec: Codec[YourLastFundingLockedTlv] = tlvField("your_last_funding_locked_txid" | txIdAsHash)
|
||||
}
|
||||
object MyCurrentFundingLockedTlv {
|
||||
val codec: Codec[MyCurrentFundingLockedTlv] = tlvField("my_current_funding_locked_txid" | txIdAsHash)
|
||||
}
|
||||
|
||||
val channelReestablishTlvCodec: Codec[TlvStream[ChannelReestablishTlv]] = tlvStream(discriminated[ChannelReestablishTlv].by(varint)
|
||||
.typecase(UInt64(0), NextFundingTlv.codec)
|
||||
.typecase(UInt64(1), YourLastFundingLockedTlv.codec)
|
||||
.typecase(UInt64(3), MyCurrentFundingLockedTlv.codec)
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -185,6 +185,8 @@ case class ChannelReestablish(channelId: ByteVector32,
|
|||
myCurrentPerCommitmentPoint: PublicKey,
|
||||
tlvStream: TlvStream[ChannelReestablishTlv] = TlvStream.empty) extends ChannelMessage with HasChannelId {
|
||||
val nextFundingTxId_opt: Option[TxId] = tlvStream.get[ChannelReestablishTlv.NextFundingTlv].map(_.txId)
|
||||
val myCurrentFundingLocked_opt: Option[TxId] = tlvStream.get[ChannelReestablishTlv.MyCurrentFundingLockedTlv].map(_.txId)
|
||||
val yourLastFundingLocked_opt: Option[TxId] = tlvStream.get[ChannelReestablishTlv.YourLastFundingLockedTlv].map(_.txId)
|
||||
}
|
||||
|
||||
case class OpenChannel(chainHash: BlockHash,
|
||||
|
|
|
@ -100,6 +100,8 @@ class WaitForChannelReadyStateSpec extends TestKitBaseClass with FixtureAnyFunSu
|
|||
alice2bob.expectMsgType[ChannelReady]
|
||||
awaitCond(alice.stateName == WAIT_FOR_CHANNEL_READY)
|
||||
awaitCond(bob.stateName == WAIT_FOR_CHANNEL_READY)
|
||||
assert(alice.stateData.asInstanceOf[DATA_WAIT_FOR_CHANNEL_READY].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.NotLocked)
|
||||
assert(bob.stateData.asInstanceOf[DATA_WAIT_FOR_CHANNEL_READY].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.NotLocked)
|
||||
withFixture(test.toNoArgTest(FixtureParam(alice, bob, alice2bob, bob2alice, alice2blockchain, bob2blockchain, router, aliceListener, bobListener)))
|
||||
}
|
||||
}
|
||||
|
@ -116,6 +118,7 @@ class WaitForChannelReadyStateSpec extends TestKitBaseClass with FixtureAnyFunSu
|
|||
val listener = TestProbe()
|
||||
alice.underlying.system.eventStream.subscribe(listener.ref, classOf[ChannelOpened])
|
||||
bob2alice.forward(alice)
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
listener.expectMsg(ChannelOpened(alice, bob.underlyingActor.nodeParams.nodeId, channelId(alice)))
|
||||
val initialChannelUpdate = alice.stateData.asInstanceOf[DATA_NORMAL].channelUpdate
|
||||
assert(initialChannelUpdate.shortChannelId == aliceIds.localAlias)
|
||||
|
@ -139,6 +142,7 @@ class WaitForChannelReadyStateSpec extends TestKitBaseClass with FixtureAnyFunSu
|
|||
val channelReady = bob2alice.expectMsgType[ChannelReady]
|
||||
val channelReadyNoAlias = channelReady.modify(_.tlvStream.records).using(_.filterNot(_.isInstanceOf[ChannelReadyTlv.ShortChannelIdTlv]))
|
||||
bob2alice.forward(alice, channelReadyNoAlias)
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
val initialChannelUpdate = alice.stateData.asInstanceOf[DATA_NORMAL].channelUpdate
|
||||
assert(initialChannelUpdate.shortChannelId == aliceIds.localAlias)
|
||||
assert(initialChannelUpdate.feeBaseMsat == relayFees.feeBase)
|
||||
|
@ -163,6 +167,7 @@ class WaitForChannelReadyStateSpec extends TestKitBaseClass with FixtureAnyFunSu
|
|||
alice.underlying.system.eventStream.subscribe(listener.ref, classOf[ChannelOpened])
|
||||
bob2alice.forward(alice)
|
||||
listener.expectMsg(ChannelOpened(alice, bob.underlyingActor.nodeParams.nodeId, channelId(alice)))
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
val initialChannelUpdate = alice.stateData.asInstanceOf[DATA_NORMAL].channelUpdate
|
||||
assert(initialChannelUpdate.shortChannelId == aliceIds.localAlias)
|
||||
assert(initialChannelUpdate.feeBaseMsat == relayFees.feeBase)
|
||||
|
@ -183,6 +188,7 @@ class WaitForChannelReadyStateSpec extends TestKitBaseClass with FixtureAnyFunSu
|
|||
val channelReady = bob2alice.expectMsgType[ChannelReady]
|
||||
val channelReadyNoAlias = channelReady.modify(_.tlvStream.records).using(_.filterNot(_.isInstanceOf[ChannelReadyTlv.ShortChannelIdTlv]))
|
||||
bob2alice.forward(alice, channelReadyNoAlias)
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
val initialChannelUpdate = alice.stateData.asInstanceOf[DATA_NORMAL].channelUpdate
|
||||
assert(initialChannelUpdate.shortChannelId == aliceIds.localAlias)
|
||||
assert(initialChannelUpdate.feeBaseMsat == relayFees.feeBase)
|
||||
|
@ -207,6 +213,7 @@ class WaitForChannelReadyStateSpec extends TestKitBaseClass with FixtureAnyFunSu
|
|||
assert(channelReady.alias_opt.contains(bobIds.localAlias))
|
||||
bob2alice.forward(alice)
|
||||
alice2bob.expectMsgType[AnnouncementSignatures]
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
val initialChannelUpdate = alice.stateData.asInstanceOf[DATA_NORMAL].channelUpdate
|
||||
assert(initialChannelUpdate.shortChannelId == aliceIds.localAlias)
|
||||
assert(initialChannelUpdate.feeBaseMsat == relayFees.feeBase)
|
||||
|
@ -228,6 +235,7 @@ class WaitForChannelReadyStateSpec extends TestKitBaseClass with FixtureAnyFunSu
|
|||
val channelReady = bob2alice.expectMsgType[ChannelReady]
|
||||
assert(channelReady.alias_opt.contains(bobIds.localAlias))
|
||||
bob2alice.forward(alice)
|
||||
awaitCond(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
val initialChannelUpdate = alice.stateData.asInstanceOf[DATA_NORMAL].channelUpdate
|
||||
assert(initialChannelUpdate.shortChannelId == aliceIds.localAlias)
|
||||
assert(initialChannelUpdate.feeBaseMsat == relayFees.feeBase)
|
||||
|
|
|
@ -124,10 +124,12 @@ class WaitForDualFundingReadyStateSpec extends TestKitBaseClass with FixtureAnyF
|
|||
alice2bob.forward(bob, aliceChannelReady)
|
||||
listenerB.expectMsg(ChannelOpened(bob, alice.underlyingActor.nodeParams.nodeId, channelId(bob)))
|
||||
awaitCond(bob.stateName == NORMAL)
|
||||
assert(bob.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
val bobChannelReady = bob2alice.expectMsgType[ChannelReady]
|
||||
bob2alice.forward(alice, bobChannelReady)
|
||||
listenerA.expectMsg(ChannelOpened(alice, bob.underlyingActor.nodeParams.nodeId, channelId(alice)))
|
||||
awaitCond(alice.stateName == NORMAL)
|
||||
assert(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
|
||||
// The channel is now ready to process payments.
|
||||
alicePeer.fishForMessage() {
|
||||
|
@ -180,10 +182,12 @@ class WaitForDualFundingReadyStateSpec extends TestKitBaseClass with FixtureAnyF
|
|||
alice2bob.forward(bob, aliceChannelReady)
|
||||
listenerB.expectMsg(ChannelOpened(bob, alice.underlyingActor.nodeParams.nodeId, channelId(bob)))
|
||||
awaitCond(bob.stateName == NORMAL)
|
||||
assert(bob.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
val bobChannelReady = bob2alice.expectMsgType[ChannelReady]
|
||||
bob2alice.forward(alice, bobChannelReady)
|
||||
listenerA.expectMsg(ChannelOpened(alice, bob.underlyingActor.nodeParams.nodeId, channelId(alice)))
|
||||
awaitCond(alice.stateName == NORMAL)
|
||||
assert(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
|
||||
val aliceCommitments = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.latest
|
||||
assert(aliceCommitments.commitment.shortChannelId_opt.isEmpty)
|
||||
|
@ -208,9 +212,11 @@ class WaitForDualFundingReadyStateSpec extends TestKitBaseClass with FixtureAnyF
|
|||
val aliceChannelReady = alice2bob.expectMsgType[ChannelReady]
|
||||
alice2bob.forward(bob, aliceChannelReady)
|
||||
awaitCond(bob.stateName == NORMAL)
|
||||
assert(bob.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
val bobChannelReady = bob2alice.expectMsgType[ChannelReady]
|
||||
bob2alice.forward(alice, bobChannelReady)
|
||||
awaitCond(alice.stateName == NORMAL)
|
||||
assert(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.active.head.remoteFundingStatus == RemoteFundingStatus.Locked)
|
||||
|
||||
// Alice sends announcement_signatures to Bob.
|
||||
val aliceAnnSigs = alice2bob.expectMsgType[AnnouncementSignatures]
|
||||
|
|
|
@ -1493,10 +1493,11 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik
|
|||
|
||||
// Alice and Bob reconnect.
|
||||
reconnect(f)
|
||||
assert(alice2bob.expectMsgType[SpliceLocked].fundingTxId == spliceTx.txid)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
assert(alice2bob.expectMsgType[SpliceLocked].fundingTxId == spliceTx.txid) // Alice resends `splice_locked` because she hasn't received Bob's announcement_signatures.
|
||||
alice2bob.forward(bob)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
assert(bob2alice.expectMsgType[SpliceLocked].fundingTxId == spliceTx.txid)
|
||||
assert(bob2alice.expectMsgType[SpliceLocked].fundingTxId == spliceTx.txid) // Bob resends `splice_locked` in response to Alice's `splice_locked` after channel_reestablish.
|
||||
bob2alice.forward(alice)
|
||||
assert(bob2alice.expectMsgType[AnnouncementSignatures].shortChannelId == spliceAnn.shortChannelId)
|
||||
bob2alice.forward(alice)
|
||||
|
@ -2340,8 +2341,8 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik
|
|||
reconnect(f)
|
||||
|
||||
// NB: channel_ready are not re-sent because the channel has already been used (for building splices).
|
||||
bob2alice.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx1.txid)
|
||||
bob2alice.forward(alice)
|
||||
// Alice has already received `splice_locked` from Bob for the first splice, so he doesn't need to resend it.
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
|
||||
// The first splice confirms on Alice's side.
|
||||
|
@ -2353,10 +2354,9 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik
|
|||
disconnect(f)
|
||||
reconnect(f)
|
||||
|
||||
alice2bob.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx1.txid)
|
||||
alice2bob.forward(bob)
|
||||
bob2alice.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx1.txid)
|
||||
bob2alice.forward(alice)
|
||||
// Alice and Bob have already exchanged `splice_locked` for the first splice, so there is need to resend it.
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
|
||||
// The second splice confirms on Alice's side.
|
||||
alice ! WatchFundingConfirmedTriggered(BlockHeight(400000), 42, fundingTx2)
|
||||
|
@ -2367,10 +2367,6 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik
|
|||
disconnect(f)
|
||||
reconnect(f)
|
||||
|
||||
alice2bob.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx2.txid)
|
||||
alice2bob.forward(bob)
|
||||
bob2alice.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx1.txid)
|
||||
bob2alice.forward(alice)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
|
@ -2383,8 +2379,7 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik
|
|||
disconnect(f)
|
||||
reconnect(f)
|
||||
|
||||
alice2bob.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx2.txid)
|
||||
alice2bob.forward(bob)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
bob2alice.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx2.txid)
|
||||
// This time alice received the splice_locked for the second splice.
|
||||
bob2alice.forward(alice)
|
||||
|
@ -2394,10 +2389,6 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik
|
|||
disconnect(f)
|
||||
reconnect(f)
|
||||
|
||||
alice2bob.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx2.txid)
|
||||
alice2bob.forward(bob)
|
||||
bob2alice.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx2.txid)
|
||||
bob2alice.forward(alice)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
|
@ -2412,6 +2403,255 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik
|
|||
}
|
||||
}
|
||||
|
||||
test("disconnect and update channel before receiving final splice_locked") { f =>
|
||||
import f._
|
||||
|
||||
val fundingTx = initiateSplice(f, spliceIn_opt = Some(SpliceIn(500_000 sat, pushAmount = 0 msat)))
|
||||
checkWatchConfirmed(f, fundingTx)
|
||||
|
||||
// The splice confirms on Alice's side.
|
||||
alice ! WatchFundingConfirmedTriggered(BlockHeight(400000), 42, fundingTx)
|
||||
alice2blockchain.expectMsgTypeHaving[WatchFundingSpent](_.txId == fundingTx.txid)
|
||||
alice2bob.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx.txid)
|
||||
alice2bob.forward(bob)
|
||||
|
||||
alice2bob.ignoreMsg { case _: ChannelUpdate => true }
|
||||
bob2alice.ignoreMsg { case _: ChannelUpdate => true }
|
||||
|
||||
disconnect(f)
|
||||
|
||||
// The splice confirms on Bob's side while disconnected.
|
||||
bob ! WatchFundingConfirmedTriggered(BlockHeight(400000), 42, fundingTx)
|
||||
bob2blockchain.expectMsgTypeHaving[WatchFundingSpent](_.txId == fundingTx.txid)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// From Alice's point of view, we still have two active commitments, FundingTx1 and FundingTx2.
|
||||
// From Bob's point of view, we have one active commitment, FundingTx2.
|
||||
assert(alice.stateData.asInstanceOf[ChannelDataWithCommitments].commitments.active.size == 2)
|
||||
assert(bob.stateData.asInstanceOf[ChannelDataWithCommitments].commitments.active.size == 1)
|
||||
|
||||
reconnect(f)
|
||||
|
||||
// Because `your_last_funding_locked_txid` from Bob matches the last `splice_locked` txid sent by Alice; there is no need
|
||||
// for Alice to resend `splice_locked`. Alice processes the `my_current_funding_locked` from Bob as if she received
|
||||
// `splice_locked` from Bob and prunes the initial funding commitment.
|
||||
awaitCond(alice.stateData.asInstanceOf[ChannelDataWithCommitments].commitments.active.size == 1)
|
||||
assert(alice.stateData.asInstanceOf[ChannelDataWithCommitments].commitments.active.head.fundingTxId == fundingTx.txid)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
|
||||
// The `your_last_funding_locked_txid` from Alice does not match the last `splice_locked` sent by Bob, so Bob must resend `splice_locked`.
|
||||
val bobSpliceLocked = bob2alice.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx.txid)
|
||||
assert(bob.stateData.asInstanceOf[ChannelDataWithCommitments].commitments.active.size == 1)
|
||||
|
||||
// Alice sends an HTLC before receiving Bob's splice_locked: see https://github.com/lightning/bolts/issues/1223.
|
||||
addHtlc(15_000_000 msat, alice, bob, alice2bob, bob2alice)
|
||||
val sender = TestProbe()
|
||||
alice ! CMD_SIGN(Some(sender.ref))
|
||||
sender.expectMsgType[RES_SUCCESS[CMD_SIGN]]
|
||||
alice2bob.expectMsgType[CommitSig]
|
||||
alice2bob.forward(bob)
|
||||
bob2alice.forward(alice, bobSpliceLocked)
|
||||
bob2alice.expectMsgType[RevokeAndAck]
|
||||
bob2alice.forward(alice)
|
||||
bob2alice.expectMsgType[CommitSig]
|
||||
bob2alice.forward(alice)
|
||||
alice2bob.expectMsgType[RevokeAndAck]
|
||||
alice2bob.forward(bob)
|
||||
|
||||
bob2relayer.expectMsgType[Relayer.RelayForward]
|
||||
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// the splice is locked on both sides
|
||||
alicePeer.fishForMessage() {
|
||||
case e: ChannelReadyForPayments => e.fundingTxIndex == 1
|
||||
case _ => false
|
||||
}
|
||||
bobPeer.fishForMessage() {
|
||||
case e: ChannelReadyForPayments => e.fundingTxIndex == 1
|
||||
case _ => false
|
||||
}
|
||||
}
|
||||
|
||||
test("disconnect before receiving announcement_signatures from one peer", Tag(ChannelStateTestsTags.ChannelsPublic)) { f =>
|
||||
import f._
|
||||
|
||||
val fundingTx = initiateSplice(f, spliceIn_opt = Some(SpliceIn(500_000 sat, pushAmount = 0 msat)))
|
||||
checkWatchConfirmed(f, fundingTx)
|
||||
|
||||
// The splice confirms on Alice's side.
|
||||
alice ! WatchFundingConfirmedTriggered(BlockHeight(420000), 42, fundingTx)
|
||||
alice2blockchain.expectMsgTypeHaving[WatchFundingSpent](_.txId == fundingTx.txid)
|
||||
alice2bob.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx.txid)
|
||||
alice2bob.forward(bob)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
|
||||
// The splice confirms on Bob's side.
|
||||
bob ! WatchFundingConfirmedTriggered(BlockHeight(420000), 42, fundingTx)
|
||||
bob2blockchain.expectMsgTypeHaving[WatchFundingSpent](_.txId == fundingTx.txid)
|
||||
bob2alice.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx.txid)
|
||||
bob2alice.forward(alice)
|
||||
|
||||
// Alice sends announcement_signatures to Bob.
|
||||
alice2bob.expectMsgType[AnnouncementSignatures]
|
||||
alice2bob.forward(bob)
|
||||
|
||||
// Alice disconnects before Bob can send announcement_signatures.
|
||||
bob2alice.expectMsgType[AnnouncementSignatures]
|
||||
|
||||
disconnect(f)
|
||||
reconnect(f)
|
||||
|
||||
// Bob will not resend `splice_locked` because he has already received `announcement_signatures` from Alice.
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// Alice resends `splice_locked` because she did not receive `announcement_signatures` from Bob before the disconnect.
|
||||
val aliceSpliceLocked = alice2bob.expectMsgType[SpliceLocked]
|
||||
alice2bob.forward(bob)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
|
||||
// Bob receives Alice's `splice_locked` after `channel_reestablish` and must retransmit both `splice_locked` and `announcement_signatures`.
|
||||
val bobSpliceLocked = bob2alice.expectMsgType[SpliceLocked]
|
||||
bob2alice.forward(alice)
|
||||
bob2alice.expectMsgType[AnnouncementSignatures]
|
||||
bob2alice.forward(alice)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// Alice retransmits `announcement_signatures` to Bob after receiving `splice_locked` from Bob.
|
||||
alice2bob.expectMsgType[AnnouncementSignatures]
|
||||
alice2bob.forward(bob)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// If either node receives `splice_locked` again, it should be ignored; `announcement_signatures have already been sent.
|
||||
alice2bob.forward(bob, aliceSpliceLocked)
|
||||
bob2alice.forward(alice, bobSpliceLocked)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// the splice is locked on both sides
|
||||
alicePeer.fishForMessage() {
|
||||
case e: ChannelReadyForPayments => e.fundingTxIndex == 1
|
||||
case _ => false
|
||||
}
|
||||
bobPeer.fishForMessage() {
|
||||
case e: ChannelReadyForPayments => e.fundingTxIndex == 1
|
||||
case _ => false
|
||||
}
|
||||
}
|
||||
|
||||
test("disconnect before receiving splice_locked from a legacy peer") { f =>
|
||||
import f._
|
||||
|
||||
val fundingTx = initiateSplice(f, spliceIn_opt = Some(SpliceIn(500_000 sat, pushAmount = 0 msat)))
|
||||
checkWatchConfirmed(f, fundingTx)
|
||||
|
||||
// The splice confirms for both.
|
||||
alice ! WatchFundingConfirmedTriggered(BlockHeight(400000), 42, fundingTx)
|
||||
alice2blockchain.expectMsgTypeHaving[WatchFundingSpent](_.txId == fundingTx.txid)
|
||||
alice2bob.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx.txid)
|
||||
alice2bob.forward(bob)
|
||||
bob ! WatchFundingConfirmedTriggered(BlockHeight(400000), 42, fundingTx)
|
||||
bob2blockchain.expectMsgTypeHaving[WatchFundingSpent](_.txId == fundingTx.txid)
|
||||
bob2alice.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx.txid)
|
||||
bob2alice.forward(alice)
|
||||
|
||||
alice2bob.ignoreMsg { case _: ChannelUpdate => true }
|
||||
bob2alice.ignoreMsg { case _: ChannelUpdate => true }
|
||||
|
||||
disconnect(f)
|
||||
val (aliceReestablish, bobReestablish) = reconnect(f, sendReestablish = false)
|
||||
|
||||
// remove the last_funding_locked tlv from the reestablish messages
|
||||
alice2bob.forward(bob, aliceReestablish.copy(tlvStream = TlvStream.empty))
|
||||
bob2alice.forward(alice, bobReestablish.copy(tlvStream = TlvStream.empty))
|
||||
|
||||
// always send last splice_locked after reconnection if the last_funding_locked tlv is not set
|
||||
alice2bob.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx.txid)
|
||||
bob2alice.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx.txid)
|
||||
alice2bob.forward(bob)
|
||||
bob2alice.forward(alice)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// the splice is locked on both sides
|
||||
alicePeer.fishForMessage() {
|
||||
case e: ChannelReadyForPayments => e.fundingTxIndex == 1
|
||||
case _ => false
|
||||
}
|
||||
bobPeer.fishForMessage() {
|
||||
case e: ChannelReadyForPayments => e.fundingTxIndex == 1
|
||||
case _ => false
|
||||
}
|
||||
}
|
||||
|
||||
test("disconnect before receiving announcement_signatures from both peers", Tag(ChannelStateTestsTags.ChannelsPublic)) { f =>
|
||||
import f._
|
||||
|
||||
val fundingTx = initiateSplice(f, spliceIn_opt = Some(SpliceIn(500_000 sat, pushAmount = 0 msat)))
|
||||
checkWatchConfirmed(f, fundingTx)
|
||||
|
||||
// The splice confirms on Alice's side.
|
||||
alice ! WatchFundingConfirmedTriggered(BlockHeight(420000), 42, fundingTx)
|
||||
alice2blockchain.expectMsgTypeHaving[WatchFundingSpent](_.txId == fundingTx.txid)
|
||||
alice2bob.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx.txid)
|
||||
alice2bob.forward(bob)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
|
||||
// The splice confirms on Bob's side.
|
||||
bob ! WatchFundingConfirmedTriggered(BlockHeight(420000), 42, fundingTx)
|
||||
bob2blockchain.expectMsgTypeHaving[WatchFundingSpent](_.txId == fundingTx.txid)
|
||||
bob2alice.expectMsgTypeHaving[SpliceLocked](_.fundingTxId == fundingTx.txid)
|
||||
bob2alice.forward(alice)
|
||||
|
||||
// Alice sends announcement_signatures to Bob.
|
||||
alice2bob.expectMsgType[AnnouncementSignatures]
|
||||
|
||||
// Bob sends announcement_signatures to Alice.
|
||||
bob2alice.expectMsgType[AnnouncementSignatures]
|
||||
|
||||
disconnect(f)
|
||||
reconnect(f)
|
||||
|
||||
// Bob resends `splice_locked` because he did not receive `announcement_signatures` from Alice before the disconnect.
|
||||
val bobSpliceLocked = bob2alice.expectMsgType[SpliceLocked]
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// Alice resends `splice_locked` because she did not receive `announcement_signatures` from Bob before the disconnect.
|
||||
val aliceSpliceLocked = alice2bob.expectMsgType[SpliceLocked]
|
||||
alice2bob.forward(bob)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
|
||||
// Alice receives Bob's `splice_locked` after already resending their `splice_locked` and retransmits `announcement_signatures`.
|
||||
bob2alice.forward(alice)
|
||||
alice2bob.expectMsgType[AnnouncementSignatures]
|
||||
alice2bob.forward(bob)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
|
||||
// Bob retransmits `announcement_signatures` to Alice after receiving `announcement_signatures` from Alice.
|
||||
bob2alice.expectMsgType[AnnouncementSignatures]
|
||||
bob2alice.forward(alice)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// If either node receives `splice_locked` again, it should be ignored; `announcement_signatures have already been sent.
|
||||
alice2bob.forward(bob, aliceSpliceLocked)
|
||||
bob2alice.forward(alice, bobSpliceLocked)
|
||||
alice2bob.expectNoMessage(100 millis)
|
||||
bob2alice.expectNoMessage(100 millis)
|
||||
|
||||
// the splice is locked on both sides
|
||||
alicePeer.fishForMessage() {
|
||||
case e: ChannelReadyForPayments => e.fundingTxIndex == 1
|
||||
case _ => false
|
||||
}
|
||||
bobPeer.fishForMessage() {
|
||||
case e: ChannelReadyForPayments => e.fundingTxIndex == 1
|
||||
case _ => false
|
||||
}
|
||||
}
|
||||
|
||||
/** Check type of published transactions */
|
||||
def assertPublished(probe: TestProbe, desc: String): Transaction = {
|
||||
val p = probe.expectMsgType[PublishTx]
|
||||
|
|
|
@ -72,6 +72,10 @@ class OfflineStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
val aliceInit = Init(TestConstants.Alice.nodeParams.features.initFeatures())
|
||||
val bobInit = Init(TestConstants.Bob.nodeParams.features.initFeatures())
|
||||
|
||||
private def lastFundingLockedTlvs(commitments: Commitments): Set[ChannelReestablishTlv] =
|
||||
commitments.lastLocalLocked_opt.map(c => ChannelReestablishTlv.MyCurrentFundingLockedTlv(c.fundingTxId)).toSet ++
|
||||
commitments.lastRemoteLocked_opt.map(c => ChannelReestablishTlv.YourLastFundingLockedTlv(c.fundingTxId)).toSet
|
||||
|
||||
test("reconnect after creating channel", Tag(IgnoreChannelUpdates)) { f =>
|
||||
import f._
|
||||
|
||||
|
@ -121,8 +125,8 @@ class OfflineStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
disconnect(alice, bob)
|
||||
|
||||
val (aliceCurrentPerCommitmentPoint, bobCurrentPerCommitmentPoint) = reconnect(alice, bob, alice2bob, bob2alice)
|
||||
val reestablishA = alice2bob.expectMsg(ChannelReestablish(htlc.channelId, 1, 0, PrivateKey(ByteVector32.Zeroes), aliceCurrentPerCommitmentPoint))
|
||||
val reestablishB = bob2alice.expectMsg(ChannelReestablish(htlc.channelId, 1, 0, PrivateKey(ByteVector32.Zeroes), bobCurrentPerCommitmentPoint))
|
||||
val reestablishA = alice2bob.expectMsg(ChannelReestablish(htlc.channelId, 1, 0, PrivateKey(ByteVector32.Zeroes), aliceCurrentPerCommitmentPoint, TlvStream(lastFundingLockedTlvs(alice.stateData.asInstanceOf[DATA_NORMAL].commitments))))
|
||||
val reestablishB = bob2alice.expectMsg(ChannelReestablish(htlc.channelId, 1, 0, PrivateKey(ByteVector32.Zeroes), bobCurrentPerCommitmentPoint, TlvStream(lastFundingLockedTlvs(bob.stateData.asInstanceOf[DATA_NORMAL].commitments))))
|
||||
alice2bob.forward(bob, reestablishA)
|
||||
bob2alice.forward(alice, reestablishB)
|
||||
|
||||
|
@ -180,8 +184,8 @@ class OfflineStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
|
||||
disconnect(alice, bob)
|
||||
val (aliceCurrentPerCommitmentPoint, bobCurrentPerCommitmentPoint) = reconnect(alice, bob, alice2bob, bob2alice)
|
||||
val reestablishA = alice2bob.expectMsg(ChannelReestablish(htlc.channelId, 1, 0, PrivateKey(ByteVector32.Zeroes), aliceCurrentPerCommitmentPoint))
|
||||
val reestablishB = bob2alice.expectMsg(ChannelReestablish(htlc.channelId, 2, 0, PrivateKey(ByteVector32.Zeroes), bobCurrentPerCommitmentPoint))
|
||||
val reestablishA = alice2bob.expectMsg(ChannelReestablish(htlc.channelId, 1, 0, PrivateKey(ByteVector32.Zeroes), aliceCurrentPerCommitmentPoint, TlvStream(lastFundingLockedTlvs(alice.stateData.asInstanceOf[DATA_NORMAL].commitments))))
|
||||
val reestablishB = bob2alice.expectMsg(ChannelReestablish(htlc.channelId, 2, 0, PrivateKey(ByteVector32.Zeroes), bobCurrentPerCommitmentPoint, TlvStream(lastFundingLockedTlvs(bob.stateData.asInstanceOf[DATA_NORMAL].commitments))))
|
||||
alice2bob.forward(bob, reestablishA)
|
||||
bob2alice.forward(alice, reestablishB)
|
||||
|
||||
|
@ -228,8 +232,8 @@ class OfflineStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
|
||||
{
|
||||
val (aliceCurrentPerCommitmentPoint, bobCurrentPerCommitmentPoint) = reconnect(alice, bob, alice2bob, bob2alice)
|
||||
val reestablishA = alice2bob.expectMsg(ChannelReestablish(htlc.channelId, 1, 1, revB.perCommitmentSecret, aliceCurrentPerCommitmentPoint))
|
||||
val reestablishB = bob2alice.expectMsg(ChannelReestablish(htlc.channelId, 2, 0, PrivateKey(ByteVector32.Zeroes), bobCurrentPerCommitmentPoint))
|
||||
val reestablishA = alice2bob.expectMsg(ChannelReestablish(htlc.channelId, 1, 1, revB.perCommitmentSecret, aliceCurrentPerCommitmentPoint, TlvStream(lastFundingLockedTlvs(alice.stateData.asInstanceOf[DATA_NORMAL].commitments))))
|
||||
val reestablishB = bob2alice.expectMsg(ChannelReestablish(htlc.channelId, 2, 0, PrivateKey(ByteVector32.Zeroes), bobCurrentPerCommitmentPoint, TlvStream(lastFundingLockedTlvs(bob.stateData.asInstanceOf[DATA_NORMAL].commitments))))
|
||||
alice2bob.forward(bob, reestablishA)
|
||||
bob2alice.forward(alice, reestablishB)
|
||||
}
|
||||
|
@ -259,8 +263,9 @@ class OfflineStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with
|
|||
|
||||
{
|
||||
val (aliceCurrentPerCommitmentPoint, bobCurrentPerCommitmentPoint) = reconnect(alice, bob, alice2bob, bob2alice)
|
||||
val reestablishA = alice2bob.expectMsg(ChannelReestablish(htlc.channelId, 2, 1, revB.perCommitmentSecret, aliceCurrentPerCommitmentPoint))
|
||||
val reestablishB = bob2alice.expectMsg(ChannelReestablish(htlc.channelId, 2, 0, PrivateKey(ByteVector32.Zeroes), bobCurrentPerCommitmentPoint))
|
||||
val fundingTxId = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.fundingTxId
|
||||
val reestablishA = alice2bob.expectMsg(ChannelReestablish(htlc.channelId, 2, 1, revB.perCommitmentSecret, aliceCurrentPerCommitmentPoint,TlvStream(lastFundingLockedTlvs(alice.stateData.asInstanceOf[DATA_NORMAL].commitments))))
|
||||
val reestablishB = bob2alice.expectMsg(ChannelReestablish(htlc.channelId, 2, 0, PrivateKey(ByteVector32.Zeroes), bobCurrentPerCommitmentPoint,TlvStream(lastFundingLockedTlvs(bob.stateData.asInstanceOf[DATA_NORMAL].commitments))))
|
||||
alice2bob.forward(bob, reestablishA)
|
||||
bob2alice.forward(alice, reestablishB)
|
||||
}
|
||||
|
|
|
@ -153,6 +153,8 @@ class LightningMessageCodecsSpec extends AnyFunSuite {
|
|||
|
||||
hex"0088" ++ channelId ++ hex"0001020304050607 0809aabbccddeeff" ++ key.value ++ point.value -> ChannelReestablish(channelId, 0x01020304050607L, 0x0809aabbccddeeffL, key, point),
|
||||
hex"0088" ++ channelId ++ hex"0001020304050607 0809aabbccddeeff" ++ key.value ++ point.value ++ hex"00 20" ++ txId.value.reverse -> ChannelReestablish(channelId, 0x01020304050607L, 0x0809aabbccddeeffL, key, point, TlvStream(ChannelReestablishTlv.NextFundingTlv(txId))),
|
||||
hex"0088" ++ channelId ++ hex"0001020304050607 0809aabbccddeeff" ++ key.value ++ point.value ++ hex"01 20" ++ txId.value.reverse -> ChannelReestablish(channelId, 0x01020304050607L, 0x0809aabbccddeeffL, key, point, TlvStream(ChannelReestablishTlv.YourLastFundingLockedTlv(txId))),
|
||||
hex"0088" ++ channelId ++ hex"0001020304050607 0809aabbccddeeff" ++ key.value ++ point.value ++ hex"03 20" ++ txId.value.reverse -> ChannelReestablish(channelId, 0x01020304050607L, 0x0809aabbccddeeffL, key, point, TlvStream(ChannelReestablishTlv.MyCurrentFundingLockedTlv(txId))),
|
||||
hex"0088" ++ channelId ++ hex"0001020304050607 0809aabbccddeeff" ++ key.value ++ point.value ++ hex"fe47010000 00" -> ChannelReestablish(channelId, 0x01020304050607L, 0x0809aabbccddeeffL, key, point, TlvStream[ChannelReestablishTlv](Set.empty[ChannelReestablishTlv], Set(GenericTlv(tlvTag, ByteVector.empty)))),
|
||||
hex"0088" ++ channelId ++ hex"0001020304050607 0809aabbccddeeff" ++ key.value ++ point.value ++ hex"fe47010000 07 bbbbbbbbbbbbbb" -> ChannelReestablish(channelId, 0x01020304050607L, 0x0809aabbccddeeffL, key, point, TlvStream[ChannelReestablishTlv](Set.empty[ChannelReestablishTlv], Set(GenericTlv(tlvTag, hex"bbbbbbbbbbbbbb")))),
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue