1
0
Fork 0
mirror of https://github.com/ACINQ/eclair.git synced 2025-02-22 14:22:39 +01:00

Improve logs (#320)

* always print `shortChannelId` in hex

* fixed logs in `ThrottleForwarder`

* logs the `paymentHash` when relaying an htlc

* don't print all channel data when restoring a channel

* added logs to relayer

* reduced log level in Peer

* cleaned up switchboard logs

* fixed `id`/`channelId` mixup in relayer logs

* slight changes in log levels

* do not log as warning when tx generation is simply skipped

* streamlined relayer logs

* improved router logs

* don't display errors when witness can't be parsed

* don't log connection errors as warnings

* reduce amount of logs in case of local known error

* removed reconnection message to deadletter

* try a cleaner way of displaying channel errors

* put some reconnection-related logs from info to debug

* peer: ignore `Rebroadcast` messages in `INITIALIZING`

* less verbose exception logging in channel

* display friendlier close type

* reduced default log level to INFO
This commit is contained in:
Pierre-Marie Padiou 2017-12-22 23:32:34 +01:00 committed by GitHub
parent 8197747913
commit a3bdf52a2f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 218 additions and 173 deletions

View file

@ -173,7 +173,9 @@ class ZmqWatcher(client: ExtendedBitcoinClient)(implicit ec: ExecutionContext =
case w: WatchConfirmed => self ! TickNewBlock
case w => log.warning(s"ignoring $w (not implemented)")
case w: WatchLost => () // TODO: not implemented
case w => log.warning(s"ignoring $w")
}
log.debug(s"adding watch $w for $sender")
context.watch(w.channel)

View file

@ -467,15 +467,15 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
// note: spec would allow us to keep sending new htlcs after having received their shutdown (and not sent ours)
// but we want to converge as fast as possible and they would probably not route them anyway
val error = NoMoreHtlcsClosingInProgress(d.channelId)
handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), Some(d.channelUpdate)))
handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), Some(d.channelUpdate)), c)
case Event(c: CMD_ADD_HTLC, d: DATA_NORMAL) =>
Try(Commitments.sendAdd(d.commitments, c, origin(c))) match {
case Success(Right((commitments1, add))) =>
if (c.commit) self ! CMD_SIGN
handleCommandSuccess(sender, d.copy(commitments = commitments1)) sending add
case Success(Left(error)) => handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), Some(d.channelUpdate)))
case Failure(cause) => handleCommandError(AddHtlcFailed(d.channelId, cause, origin(c), Some(d.channelUpdate)))
case Success(Left(error)) => handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), Some(d.channelUpdate)), c)
case Failure(cause) => handleCommandError(AddHtlcFailed(d.channelId, cause, origin(c), Some(d.channelUpdate)), c)
}
case Event(add: UpdateAddHtlc, d: DATA_NORMAL) =>
@ -489,7 +489,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Success((commitments1, fulfill)) =>
if (c.commit) self ! CMD_SIGN
handleCommandSuccess(sender, d.copy(commitments = commitments1)) sending fulfill
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Event(fulfill: UpdateFulfillHtlc, d: DATA_NORMAL) =>
@ -506,7 +506,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Success((commitments1, fail)) =>
if (c.commit) self ! CMD_SIGN
handleCommandSuccess(sender, d.copy(commitments = commitments1)) sending fail
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Event(c: CMD_FAIL_MALFORMED_HTLC, d: DATA_NORMAL) =>
@ -514,7 +514,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Success((commitments1, fail)) =>
if (c.commit) self ! CMD_SIGN
handleCommandSuccess(sender, d.copy(commitments = commitments1)) sending fail
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Event(fail: UpdateFailHtlc, d: DATA_NORMAL) =>
@ -540,7 +540,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Success((commitments1, fee)) =>
if (c.commit) self ! CMD_SIGN
handleCommandSuccess(sender, d.copy(commitments = commitments1)) sending fee
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Event(fee: UpdateFee, d: DATA_NORMAL) =>
@ -549,7 +549,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Failure(cause) => handleLocalError(cause, d, Some(fee))
}
case Event(CMD_SIGN, d: DATA_NORMAL) =>
case Event(c@CMD_SIGN, d: DATA_NORMAL) =>
d.commitments.remoteNextCommitInfo match {
case _ if !Commitments.localHasChanges(d.commitments) =>
log.debug("ignoring CMD_SIGN (nothing to sign)")
@ -560,7 +560,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
log.debug(s"sending a new sig, spec:\n${Commitments.specs2String(commitments1)}")
commitments1.localChanges.signed.collect { case u: UpdateFulfillHtlc => relayer ! AckFulfillCmd(u.channelId, u.id) }
handleCommandSuccess(sender, store(d.copy(commitments = commitments1))) sending commit
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Left(waitForRevocation) =>
log.debug(s"already in the process of signing, will sign again as soon as possible")
@ -609,15 +609,15 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Failure(cause) => handleLocalError(cause, d, Some(revocation))
}
case Event(CMD_CLOSE(localScriptPubKey_opt), d: DATA_NORMAL) =>
case Event(c@CMD_CLOSE(localScriptPubKey_opt), d: DATA_NORMAL) =>
val localScriptPubKey = localScriptPubKey_opt.getOrElse(d.commitments.localParams.defaultFinalScriptPubKey)
if (d.localShutdown.isDefined)
handleCommandError(ClosingAlreadyInProgress((d.channelId)))
handleCommandError(ClosingAlreadyInProgress((d.channelId)), c)
else if (Commitments.localHasUnsignedOutgoingHtlcs(d.commitments))
// TODO: simplistic behavior, we could also sign-then-close
handleCommandError(CannotCloseWithUnsignedOutgoingHtlcs((d.channelId)))
handleCommandError(CannotCloseWithUnsignedOutgoingHtlcs((d.channelId)), c)
else if (!Closing.isValidFinalScriptPubkey(localScriptPubKey))
handleCommandError(InvalidFinalScript(d.channelId))
handleCommandError(InvalidFinalScript(d.channelId), c)
else {
val shutdown = Shutdown(d.channelId, localScriptPubKey)
handleCommandSuccess(sender, store(d.copy(localShutdown = Some(shutdown)))) sending shutdown
@ -720,7 +720,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
d.channelAnnouncement match {
case None =>
require(d.shortChannelId == remoteAnnSigs.shortChannelId, s"shortChannelId mismatch: local=${d.shortChannelId.toHexString} remote=${remoteAnnSigs.shortChannelId.toHexString}")
log.info(s"announcing channelId=${d.channelId} on the network with shortId=${d.shortChannelId}")
log.info(s"announcing channelId=${d.channelId} on the network with shortId=${d.shortChannelId.toHexString}")
import d.commitments.{localParams, remoteParams}
val channelAnn = Announcements.makeChannelAnnouncement(nodeParams.chainHash, localAnnSigs.shortChannelId, localParams.nodeId, remoteParams.nodeId, localParams.fundingPrivKey.publicKey, remoteParams.fundingPubKey, localAnnSigs.nodeSignature, remoteAnnSigs.nodeSignature, localAnnSigs.bitcoinSignature, remoteAnnSigs.bitcoinSignature)
// we use GOTO instead of stay because we want to fire transitions
@ -791,7 +791,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Success((commitments1, fulfill)) =>
if (c.commit) self ! CMD_SIGN
handleCommandSuccess(sender, d.copy(commitments = commitments1)) sending fulfill
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Event(fulfill: UpdateFulfillHtlc, d: DATA_SHUTDOWN) =>
@ -808,7 +808,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Success((commitments1, fail)) =>
if (c.commit) self ! CMD_SIGN
handleCommandSuccess(sender, d.copy(commitments = commitments1)) sending fail
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Event(c: CMD_FAIL_MALFORMED_HTLC, d: DATA_SHUTDOWN) =>
@ -816,7 +816,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Success((commitments1, fail)) =>
if (c.commit) self ! CMD_SIGN
handleCommandSuccess(sender, d.copy(commitments = commitments1)) sending fail
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Event(fail: UpdateFailHtlc, d: DATA_SHUTDOWN) =>
@ -842,7 +842,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Success((commitments1, fee)) =>
if (c.commit) self ! CMD_SIGN
handleCommandSuccess(sender, d.copy(commitments = commitments1)) sending fee
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Event(fee: UpdateFee, d: DATA_SHUTDOWN) =>
@ -851,7 +851,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Failure(cause) => handleLocalError(cause, d, Some(fee))
}
case Event(CMD_SIGN, d: DATA_SHUTDOWN) =>
case Event(c@CMD_SIGN, d: DATA_SHUTDOWN) =>
d.commitments.remoteNextCommitInfo match {
case _ if !Commitments.localHasChanges(d.commitments) =>
log.debug("ignoring CMD_SIGN (nothing to sign)")
@ -862,7 +862,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
log.debug(s"sending a new sig, spec:\n${Commitments.specs2String(commitments1)}")
commitments1.localChanges.signed.collect { case u: UpdateFulfillHtlc => relayer ! AckFulfillCmd(u.channelId, u.id) }
handleCommandSuccess(sender, store(d.copy(commitments = commitments1))) sending commit
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Left(waitForRevocation) =>
log.debug(s"already in the process of signing, will sign again as soon as possible")
@ -932,7 +932,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Event(WatchEventSpent(BITCOIN_FUNDING_SPENT, tx), d: DATA_SHUTDOWN) => handleRemoteSpentOther(tx, d)
case Event(CMD_CLOSE(_), d: DATA_SHUTDOWN) => handleCommandError(ClosingAlreadyInProgress(d.channelId))
case Event(c: CMD_CLOSE, d: DATA_SHUTDOWN) => handleCommandError(ClosingAlreadyInProgress(d.channelId), c)
case Event(e: Error, d: DATA_SHUTDOWN) => handleRemoteError(e, d)
@ -968,7 +968,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
case Event(WatchEventSpent(BITCOIN_FUNDING_SPENT, tx), d: DATA_NEGOTIATING) => handleRemoteSpentOther(tx, d)
case Event(CMD_CLOSE(_), d: DATA_NEGOTIATING) => handleCommandError(ClosingAlreadyInProgress(d.channelId))
case Event(c: CMD_CLOSE, d: DATA_NEGOTIATING) => handleCommandError(ClosingAlreadyInProgress(d.channelId), c)
case Event(e: Error, d: DATA_NEGOTIATING) => handleRemoteError(e, d)
@ -998,7 +998,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
remoteCommitPublished1
}
stay using store(d.copy(commitments = commitments1, localCommitPublished = localCommitPublished1, remoteCommitPublished = remoteCommitPublished1, nextRemoteCommitPublished = nextRemoteCommitPublished1))
case Failure(cause) => handleCommandError(cause)
case Failure(cause) => handleCommandError(cause, c)
}
case Event(WatchEventSpent(BITCOIN_FUNDING_SPENT, tx), d: DATA_CLOSING) =>
@ -1036,41 +1036,45 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
log.warning(s"processing BITCOIN_OUTPUT_SPENT with txid=${tx.txid} tx=$tx")
require(tx.txIn.size == 1, s"htlc tx should only have 1 input")
val witness = tx.txIn(0).witness
val extracted = witness match {
val extracted_opt = witness match {
case ScriptWitness(Seq(localSig, paymentPreimage, htlcOfferedScript)) if paymentPreimage.size == 32 =>
log.warning(s"extracted preimage=$paymentPreimage from tx=$tx (claim-htlc-success)")
paymentPreimage
log.info(s"extracted preimage=$paymentPreimage from tx=$tx (claim-htlc-success)")
Some(paymentPreimage)
case ScriptWitness(Seq(BinaryData.empty, remoteSig, localSig, paymentPreimage, htlcReceivedScript)) if paymentPreimage.size == 32 =>
log.warning(s"extracted preimage=$paymentPreimage from tx=$tx (htlc-success)")
paymentPreimage
log.info(s"extracted preimage=$paymentPreimage from tx=$tx (htlc-success)")
Some(paymentPreimage)
case ScriptWitness(Seq(BinaryData.empty, remoteSig, localSig, BinaryData.empty, htlcOfferedScript)) =>
val paymentHash160 = BinaryData(htlcOfferedScript.slice(109, 109 + 20))
log.warning(s"extracted paymentHash160=$paymentHash160 from tx=$tx (htlc-timeout)")
paymentHash160
log.info(s"extracted paymentHash160=$paymentHash160 from tx=$tx (htlc-timeout)")
Some(paymentHash160)
case ScriptWitness(Seq(remoteSig, BinaryData.empty, htlcReceivedScript)) =>
val paymentHash160 = BinaryData(htlcReceivedScript.slice(69, 69 + 20))
log.warning(s"extracted paymentHash160=$paymentHash160 from tx=$tx (claim-htlc-timeout)")
paymentHash160
log.info(s"extracted paymentHash160=$paymentHash160 from tx=$tx (claim-htlc-timeout)")
Some(paymentHash160)
case _ =>
// this is not an htlc witness (we don't watch only htlc outputs)
None
}
// we only consider htlcs in our local commitment, because we only care about outgoing htlcs, which disappear first in the remote commitment
// if an outgoing htlc is in the remote commitment, then:
// - either it is in the local commitment (it was never fulfilled)
// - or we have already received the fulfill and forwarded it upstream
val outgoingHtlcs = d.commitments.localCommit.spec.htlcs.filter(_.direction == OUT).map(_.add)
outgoingHtlcs.collect {
case add if add.paymentHash == sha256(extracted) =>
val origin = d.commitments.originChannels(add.id)
log.warning(s"found a match between preimage=$extracted and origin=$origin: htlc was fulfilled")
// let's just pretend we received the preimage from the counterparty
relayer ! ForwardFulfill(UpdateFulfillHtlc(add.channelId, add.id, extracted), origin)
case add if ripemd160(add.paymentHash) == extracted =>
val origin = d.commitments.originChannels(add.id)
log.warning(s"found a match between paymentHash160=$extracted and origin=$origin: htlc timed out")
relayer ! Status.Failure(AddHtlcFailed(d.channelId, HtlcTimedout(d.channelId), origin, None))
extracted_opt map { extracted =>
// we only consider htlcs in our local commitment, because we only care about outgoing htlcs, which disappear first in the remote commitment
// if an outgoing htlc is in the remote commitment, then:
// - either it is in the local commitment (it was never fulfilled)
// - or we have already received the fulfill and forwarded it upstream
val outgoingHtlcs = d.commitments.localCommit.spec.htlcs.filter(_.direction == OUT).map(_.add)
outgoingHtlcs.collect {
case add if add.paymentHash == sha256(extracted) =>
val origin = d.commitments.originChannels(add.id)
log.warning(s"found a match between preimage=$extracted and origin=$origin: htlc was fulfilled")
// let's just pretend we received the preimage from the counterparty
relayer ! ForwardFulfill(UpdateFulfillHtlc(add.channelId, add.id, extracted), origin)
case add if ripemd160(add.paymentHash) == extracted =>
val origin = d.commitments.originChannels(add.id)
log.warning(s"found a match between paymentHash160=$extracted and origin=$origin: htlc timed out")
relayer ! Status.Failure(AddHtlcFailed(d.channelId, HtlcTimedout(d.channelId), origin, None))
}
// TODO: should we handle local htlcs here as well? currently timed out htlcs that we sent will never have an answer
// TODO: we do not handle the case where htlcs transactions end up being unconfirmed this can happen if an htlc-success tx is published right before a htlc timed out
}
// TODO: should we handle local htlcs here as well? currently timed out htlcs that we sent will never have an answer
// TODO: we do not handle the case where htlcs transactions end up being unconfirmed this can happen if an htlc-success tx is published right before a htlc timed out
stay
case Event(WatchEventConfirmed(BITCOIN_TX_CONFIRMED(tx), _, _), d: DATA_CLOSING) =>
@ -1087,11 +1091,23 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
val revokedCommitDone = revokedCommitPublished1.map(Closing.isRevokedCommitDone(_)).exists(_ == true) // we only need one revoked commit done
// finally, if one of the unilateral closes is done, we move to CLOSED state, otherwise we stay (note that we don't store the state)
val d1 = d.copy(localCommitPublished = localCommitPublished1, remoteCommitPublished = remoteCommitPublished1, nextRemoteCommitPublished = nextRemoteCommitPublished1, revokedCommitPublished = revokedCommitPublished1)
if (mutualCloseDone || localCommitDone || remoteCommitDone || nextRemoteCommitDone || revokedCommitDone) {
log.info(s"channel closed (mutualClose=$mutualCloseDone localCommit=$localCommitDone remoteCommit=$remoteCommitDone nextRemoteCommit=$nextRemoteCommitDone revokedCommit=$revokedCommitDone)")
goto(CLOSED) using d1
val closeType_opt = if (mutualCloseDone) {
Some("mutual")
} else if (localCommitDone) {
Some("local")
} else if (remoteCommitDone || nextRemoteCommitDone) {
Some("remote")
} else if (revokedCommitDone) {
Some("revoked")
} else {
stay using d1
None
}
closeType_opt match {
case Some(closeType) =>
log.info(s"channel closed type=$closeType")
goto(CLOSED) using d1
case None =>
stay using d1
}
case Event(_: ChannelReestablish, d: DATA_CLOSING) =>
@ -1102,7 +1118,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
val error = Error(d.channelId, exc.getMessage.getBytes)
stay sending error
case Event(CMD_CLOSE(_), d: DATA_CLOSING) => handleCommandError(ClosingAlreadyInProgress(d.channelId))
case Event(c: CMD_CLOSE, d: DATA_CLOSING) => handleCommandError(ClosingAlreadyInProgress(d.channelId), c)
case Event(e: Error, d: DATA_CLOSING) => handleRemoteError(e, d)
@ -1139,7 +1155,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
)
goto(SYNCING) sending channelReestablish
case Event(c@CMD_CLOSE(_), d: HasCommitments) => handleLocalError(ForcedLocalCommit(d.channelId, "can't do a mutual close while disconnected"), d, Some(c)) replying "ok"
case Event(c: CMD_CLOSE, d: HasCommitments) => handleLocalError(ForcedLocalCommit(d.channelId, "can't do a mutual close while disconnected"), d, Some(c)) replying "ok"
case Event(c@CurrentBlockCount(count), d: HasCommitments) if d.commitments.hasTimedoutOutgoingHtlcs(count) =>
// note: this can only happen if state is NORMAL or SHUTDOWN
@ -1162,7 +1178,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
goto(WAIT_FOR_FUNDING_CONFIRMED)
case Event(_: ChannelReestablish, d: DATA_WAIT_FOR_FUNDING_LOCKED) =>
log.info(s"re-sending fundingLocked")
log.debug(s"re-sending fundingLocked")
val nextPerCommitmentPoint = Generators.perCommitPoint(d.commitments.localParams.shaSeed, 1)
val fundingLocked = FundingLocked(d.commitments.channelId, nextPerCommitmentPoint)
goto(WAIT_FOR_FUNDING_LOCKED) sending fundingLocked
@ -1171,7 +1187,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
if (channelReestablish.nextLocalCommitmentNumber == 1 && d.commitments.localCommit.index == 0) {
// If next_local_commitment_number is 1 in both the channel_reestablish it sent and received, then the node MUST retransmit funding_locked, otherwise it MUST NOT
log.info(s"re-sending fundingLocked")
log.debug(s"re-sending fundingLocked")
val nextPerCommitmentPoint = Generators.perCommitPoint(d.commitments.localParams.shaSeed, 1)
val fundingLocked = FundingLocked(d.commitments.channelId, nextPerCommitmentPoint)
forwarder ! fundingLocked
@ -1181,7 +1197,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
d.localShutdown.map {
case localShutdown =>
log.info(s"re-sending localShutdown")
log.debug(s"re-sending localShutdown")
forwarder ! localShutdown
}
@ -1223,7 +1239,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
forwarder ! d.localClosingSigned
goto(NEGOTIATING)
case Event(c@CMD_CLOSE(_), d: HasCommitments) => handleLocalError(ForcedLocalCommit(d.channelId, "can't do a mutual close while syncing"), d, Some(c))
case Event(c: CMD_CLOSE, d: HasCommitments) => handleLocalError(ForcedLocalCommit(d.channelId, "can't do a mutual close while syncing"), d, Some(c))
case Event(c@CurrentBlockCount(count), d: HasCommitments) if d.commitments.hasTimedoutOutgoingHtlcs(count) => handleLocalError(HtlcTimedout(d.channelId), d, Some(c))
@ -1273,8 +1289,8 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
log.info(s"rejecting htlc request in state=$stateName")
val error = ChannelUnavailable(d.channelId)
d match {
case normal: DATA_NORMAL => handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), Some(normal.channelUpdate))) // can happen if we are in OFFLINE or SYNCING state (channelUpdate will have enable=false)
case _ => handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), None)) // we don't provide a channel_update: this will be a permanent channel failure
case normal: DATA_NORMAL => handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), Some(normal.channelUpdate)), c) // can happen if we are in OFFLINE or SYNCING state (channelUpdate will have enable=false)
case _ => handleCommandError(AddHtlcFailed(d.channelId, error, origin(c), None), c) // we don't provide a channel_update: this will be a permanent channel failure
}
// we only care about this event in NORMAL and SHUTDOWN state, and we never unregister to the event stream
@ -1343,16 +1359,21 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
stay using newData replying "ok"
}
def handleCommandError(cause: Throwable) = {
def handleCommandError(cause: Throwable, cmd: Command) = {
log.error(s"${cause.getMessage} while processing cmd=${cmd.getClass.getSimpleName} in state=$stateName")
cause match {
case _: ChannelException => log.error(s"$cause")
case _ => log.error(cause, "")
case _: ChannelException => ()
case _ => log.error(cause, s"msg=$cmd stateData=$stateData ")
}
stay replying Status.Failure(cause)
}
def handleLocalError(cause: Throwable, d: HasCommitments, msg: Option[Any]) = {
log.error(cause, s"error while processing msg=${msg.getOrElse("n/a")} in state=$stateData ")
log.error(s"${cause.getMessage} while processing msg=${msg.getOrElse("n/a").getClass.getSimpleName} in state=$stateName")
cause match {
case _: ChannelException => ()
case _ => log.error(cause, s"msg=${msg.getOrElse("n/a")} stateData=$stateData ")
}
val error = Error(d.channelId, cause.getMessage.getBytes)
spendLocalCurrent(d) sending error
}

View file

@ -216,8 +216,11 @@ object Helpers {
def generateTx(desc: String)(attempt: Try[TransactionWithInputInfo])(implicit log: LoggingAdapter): Option[TransactionWithInputInfo] = {
attempt match {
case Success(txinfo) =>
log.warning(s"tx generation success: desc=$desc txid=${txinfo.tx.txid} amount=${txinfo.tx.txOut.map(_.amount.amount).sum} tx=${txinfo.tx}")
log.info(s"tx generation success: desc=$desc txid=${txinfo.tx.txid} amount=${txinfo.tx.txOut.map(_.amount.amount).sum} tx=${txinfo.tx}")
Some(txinfo)
case Failure(t: TxGenerationSkipped) =>
log.info(s"tx generation skipped: desc=$desc reason: ${t.getMessage}")
None
case Failure(t) =>
log.warning(s"tx generation failure: desc=$desc reason: ${t.getMessage}")
None

View file

@ -131,15 +131,15 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
whenUnhandled {
case Event(ErrorClosed(cause), _) =>
log.debug(s"tcp connection error: $cause")
log.info(s"tcp connection error: $cause")
stop(FSM.Normal)
case Event(PeerClosed, _) =>
log.debug(s"connection closed")
log.info(s"connection closed")
stop(FSM.Normal)
case Event(Terminated(actor), _) if actor == connection =>
log.debug(s"connection terminated, stopping the transport")
log.info(s"connection terminated, stopping the transport")
// this can be the connection or the listener, either way it is a cause of death
stop(FSM.Normal)
}

View file

@ -58,8 +58,6 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
val h = channels.filter(_._2 == actor).map(_._1)
log.info(s"channel closed: channelId=${h.mkString("/")}")
stay using d.copy(channels = channels -- h)
case Event(_: Rebroadcast, _) => stay // ignored
}
when(INITIALIZING) {
@ -187,7 +185,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
stay
case Event(Terminated(actor), ConnectedData(address_opt, transport, _, channels)) if actor == transport =>
log.warning(s"lost connection to $remoteNodeId")
log.info(s"lost connection to $remoteNodeId")
channels.values.foreach(_ ! INPUT_DISCONNECTED)
goto(DISCONNECTED) using DisconnectedData(address_opt, channels)
@ -224,6 +222,8 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, previousKnownAddress
case Event(GetPeerInfo, d) =>
sender ! PeerInfo(remoteNodeId, stateName.toString, d.address_opt, d.channels.values.toSet.size) // we use toSet to dedup because a channel can have a TemporaryChannelId + a ChannelId
stay
case Event(_: Rebroadcast, _) => stay // ignored
}
onTransition {

View file

@ -62,7 +62,7 @@ class Relayer(nodeParams: NodeParams, register: ActorRef, paymentHandler: ActorR
case _: ChannelStateChanged => ()
case LocalChannelUpdate(_, channelId, shortChannelId, remoteNodeId, _, channelUpdate) =>
log.info(s"updating channel_update for channelId=$channelId shortChannelId=${shortChannelId.toHexString} remoteNodeId=$remoteNodeId channelUpdate=$channelUpdate ")
log.debug(s"updating channel_update for channelId=$channelId shortChannelId=${shortChannelId.toHexString} remoteNodeId=$remoteNodeId channelUpdate=$channelUpdate ")
context become main(channelUpdates + (channelUpdate.shortChannelId -> channelUpdate))
case LocalChannelDown(_, channelId, shortChannelId, _) =>
@ -70,49 +70,68 @@ class Relayer(nodeParams: NodeParams, register: ActorRef, paymentHandler: ActorR
context become main(channelUpdates - shortChannelId)
case ForwardAdd(add) =>
log.debug(s"received forwarding request for htlc #${add.id} paymentHash=${add.paymentHash} from channelId=${add.channelId} ")
Sphinx.parsePacket(nodeParams.privateKey, add.paymentHash, add.onionRoutingPacket)
.map(parsedPacket => (LightningMessageCodecs.perHopPayloadCodec.decode(BitVector(parsedPacket.payload.data)), parsedPacket.nextPacket, parsedPacket.sharedSecret)) match {
case Success((Attempt.Successful(DecodeResult(perHopPayload, _)), nextPacket, _)) if nextPacket.isLastPacket =>
log.info(s"looks like we are the final recipient of htlc #${add.id}")
perHopPayload match {
.flatMap {
case Sphinx.ParsedPacket(payload, nextPacket, sharedSecret) =>
LightningMessageCodecs.perHopPayloadCodec.decode(BitVector(payload.data)) match {
case Attempt.Successful(DecodeResult(perHopPayload, _)) => Success((perHopPayload, nextPacket, sharedSecret))
case Attempt.Failure(cause) => Failure(new RuntimeException(cause.messageWithContext))
}
} match {
case Success((perHopPayload, nextPacket, _)) if nextPacket.isLastPacket =>
val cmd = perHopPayload match {
case PerHopPayload(_, finalAmountToForward, _) if finalAmountToForward > add.amountMsat =>
sender ! CMD_FAIL_HTLC(add.id, Right(FinalIncorrectHtlcAmount(add.amountMsat)), commit = true)
Left(CMD_FAIL_HTLC(add.id, Right(FinalIncorrectHtlcAmount(add.amountMsat)), commit = true))
case PerHopPayload(_, _, finalOutgoingCltvValue) if finalOutgoingCltvValue != add.expiry =>
sender ! CMD_FAIL_HTLC(add.id, Right(FinalIncorrectCltvExpiry(add.expiry)), commit = true)
Left(CMD_FAIL_HTLC(add.id, Right(FinalIncorrectCltvExpiry(add.expiry)), commit = true))
case _ if add.expiry < Globals.blockCount.get() + 3 => // TODO: check hardcoded value
sender ! CMD_FAIL_HTLC(add.id, Right(FinalExpiryTooSoon), commit = true)
Left(CMD_FAIL_HTLC(add.id, Right(FinalExpiryTooSoon), commit = true))
case _ =>
paymentHandler forward add
Right(add)
}
case Success((Attempt.Successful(DecodeResult(perHopPayload, _)), nextPacket, _)) =>
channelUpdates.get(perHopPayload.channel_id) match {
cmd match {
case Left(cmdFail) =>
log.info(s"rejecting htlc #${add.id} paymentHash=${add.paymentHash} from channelId=${add.channelId} reason=${cmdFail.reason}")
sender ! cmdFail
case Right(addHtlc) =>
log.debug(s"forwarding htlc #${add.id} paymentHash=${add.paymentHash} to payment-handler")
paymentHandler forward addHtlc
}
case Success((perHopPayload, nextPacket, _)) =>
val cmd = channelUpdates.get(perHopPayload.channel_id) match {
case None =>
// if we don't (yet?) have a channel_update for the next channel, we consider the channel doesn't exist
// TODO: use a different channel to the same peer instead?
sender ! CMD_FAIL_HTLC(add.id, Right(UnknownNextPeer), commit = true)
Left(CMD_FAIL_HTLC(add.id, Right(UnknownNextPeer), commit = true))
case Some(channelUpdate) if !Announcements.isEnabled(channelUpdate.flags) =>
sender ! CMD_FAIL_HTLC(add.id, Right(ChannelDisabled(channelUpdate.flags, channelUpdate)), commit = true)
Left(CMD_FAIL_HTLC(add.id, Right(ChannelDisabled(channelUpdate.flags, channelUpdate)), commit = true))
case Some(channelUpdate) if add.amountMsat < channelUpdate.htlcMinimumMsat =>
sender ! CMD_FAIL_HTLC(add.id, Right(AmountBelowMinimum(add.amountMsat, channelUpdate)), commit = true)
Left(CMD_FAIL_HTLC(add.id, Right(AmountBelowMinimum(add.amountMsat, channelUpdate)), commit = true))
case Some(channelUpdate) if add.expiry != perHopPayload.outgoingCltvValue + channelUpdate.cltvExpiryDelta =>
sender ! CMD_FAIL_HTLC(add.id, Right(IncorrectCltvExpiry(add.expiry, channelUpdate)), commit = true)
Left(CMD_FAIL_HTLC(add.id, Right(IncorrectCltvExpiry(add.expiry, channelUpdate)), commit = true))
case Some(channelUpdate) if add.expiry < Globals.blockCount.get() + 3 => // TODO: hardcoded value
sender ! CMD_FAIL_HTLC(add.id, Right(ExpiryTooSoon(channelUpdate)), commit = true)
Left(CMD_FAIL_HTLC(add.id, Right(ExpiryTooSoon(channelUpdate)), commit = true))
case _ =>
log.info(s"forwarding htlc #${add.id} to shortChannelId=${perHopPayload.channel_id.toHexString}")
register ! Register.ForwardShortId(perHopPayload.channel_id, CMD_ADD_HTLC(perHopPayload.amtToForward, add.paymentHash, perHopPayload.outgoingCltvValue, nextPacket.serialize, upstream_opt = Some(add), commit = true))
Right(CMD_ADD_HTLC(perHopPayload.amtToForward, add.paymentHash, perHopPayload.outgoingCltvValue, nextPacket.serialize, upstream_opt = Some(add), commit = true))
}
cmd match {
case Left(cmdFail) =>
log.info(s"rejecting htlc #${add.id} paymentHash=${add.paymentHash} from channelId=${add.channelId} to shortChannelId=${perHopPayload.channel_id.toHexString} reason=${cmdFail.reason}")
sender ! cmdFail
case Right(cmdAdd) =>
log.info(s"forwarding htlc #${add.id} paymentHash=${add.paymentHash} from channelId=${add.channelId} to shortChannelId=${perHopPayload.channel_id.toHexString}")
register ! Register.ForwardShortId(perHopPayload.channel_id, cmdAdd)
}
case Success((Attempt.Failure(cause), _, _)) =>
log.error(s"couldn't parse payload: $cause")
sender ! CMD_FAIL_HTLC(add.id, Right(PermanentNodeFailure), commit = true)
case Failure(t) =>
log.error(t, "couldn't parse onion: ")
// we cannot even parse the onion packet
sender ! CMD_FAIL_MALFORMED_HTLC(add.id, Crypto.sha256(add.onionRoutingPacket), failureCode = FailureMessageCodecs.BADONION, commit = true)
log.warning(s"couldn't parse onion: reason=${t.getMessage}")
val cmdFail = CMD_FAIL_MALFORMED_HTLC(add.id, Crypto.sha256(add.onionRoutingPacket), failureCode = FailureMessageCodecs.BADONION, commit = true)
log.info(s"rejecting htlc #${add.id} paymentHash=${add.paymentHash} from channelId=${add.channelId} reason=malformed onionHash=${cmdFail.onionHash} failureCode=${cmdFail.failureCode}")
sender ! cmdFail
}
case Status.Failure(Register.ForwardShortIdFailure(Register.ForwardShortId(shortChannelId, CMD_ADD_HTLC(_, _, _, _, Some(add), _)))) =>
log.warning(s"couldn't resolve downstream channel $shortChannelId, failing htlc #${add.id}")
log.warning(s"couldn't resolve downstream channel ${shortChannelId.toHexString}, failing htlc #${add.id}")
register ! Register.Forward(add.channelId, CMD_FAIL_HTLC(add.id, Right(UnknownNextPeer), commit = true))
case Status.Failure(AddHtlcFailed(_, error, Local(Some(sender)), _)) =>

View file

@ -132,28 +132,28 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
// let's check that the output is indeed a P2WSH multisig 2-of-2 of nodeid1 and nodeid2)
val fundingOutputScript = write(pay2wsh(Scripts.multiSig2of2(PublicKey(c.bitcoinKey1), PublicKey(c.bitcoinKey2))))
if (tx.txOut.size < outputIndex + 1) {
log.error(s"invalid script for shortChannelId=${c.shortChannelId}: txid=${tx.txid} does not have outputIndex=$outputIndex ann=$c")
log.error(s"invalid script for shortChannelId=${c.shortChannelId.toHexString}: txid=${tx.txid} does not have outputIndex=$outputIndex ann=$c")
None
} else if (fundingOutputScript != tx.txOut(outputIndex).publicKeyScript) {
log.error(s"invalid script for shortChannelId=${c.shortChannelId} txid=${tx.txid} ann=$c")
log.error(s"invalid script for shortChannelId=${c.shortChannelId.toHexString} txid=${tx.txid} ann=$c")
None
} else {
watcher ! WatchSpentBasic(self, tx, outputIndex, BITCOIN_FUNDING_EXTERNAL_CHANNEL_SPENT(c.shortChannelId))
// TODO: check feature bit set
log.debug(s"added channel channelId=${c.shortChannelId}")
log.debug(s"added channel channelId=${c.shortChannelId.toHexString}")
context.system.eventStream.publish(ChannelDiscovered(c, tx.txOut(outputIndex).amount))
db.addChannel(c)
Some(c)
}
case IndividualResult(c, Some(tx), false) =>
// TODO: vulnerability if they flood us with spent funding tx?
log.warning(s"ignoring shortChannelId=${c.shortChannelId} tx=${tx.txid} (funding tx not found in utxo)")
log.warning(s"ignoring shortChannelId=${c.shortChannelId.toHexString} tx=${tx.txid} (funding tx not found in utxo)")
// there may be a record if we have just restarted
db.removeChannel(c.shortChannelId)
None
case IndividualResult(c, None, _) =>
// TODO: blacklist?
log.warning(s"could not retrieve tx for shortChannelId=${c.shortChannelId}")
log.warning(s"could not retrieve tx for shortChannelId=${c.shortChannelId.toHexString}")
None
}
@ -221,12 +221,12 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
stay
case Event(c: ChannelAnnouncement, d) =>
log.debug(s"received channel announcement for shortChannelId=${c.shortChannelId} nodeId1=${c.nodeId1} nodeId2=${c.nodeId2}")
log.debug(s"received channel announcement for shortChannelId=${c.shortChannelId.toHexString} nodeId1=${c.nodeId1} nodeId2=${c.nodeId2}")
if (d.channels.containsKey(c.shortChannelId) || d.awaiting.exists(_.shortChannelId == c.shortChannelId) || d.stash.contains(c)) {
log.debug(s"ignoring $c (duplicate)")
stay
} else if (!Announcements.checkSigs(c)) {
log.error(s"bad signature for announcement $c")
log.warning(s"bad signature for announcement $c")
sender ! Error(Peer.CHANNELID_ZERO, "bad announcement sig!!!".getBytes())
stay
} else {
@ -239,7 +239,7 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
log.debug(s"ignoring announcement $n (old timestamp or duplicate)")
stay
} else if (!Announcements.checkSig(n)) {
log.error(s"bad signature for announcement $n")
log.warning(s"bad signature for announcement $n")
sender ! Error(Peer.CHANNELID_ZERO, "bad announcement sig!!!".getBytes())
stay
} else if (d.nodes.containsKey(n.nodeId)) {
@ -256,7 +256,7 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
log.debug(s"stashing $n")
stay using d.copy(stash = d.stash :+ n, origins = d.origins + (n -> sender))
} else {
log.warning(s"ignoring $n (no related channel found)")
log.debug(s"ignoring $n (no related channel found)")
// there may be a record if we have just restarted
db.removeNode(n.nodeId)
stay
@ -271,7 +271,7 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
log.debug(s"ignoring $u (old timestamp or duplicate)")
stay
} else if (!Announcements.checkSig(u, desc.a)) {
log.error(s"bad signature for announcement shortChannelId=${u.shortChannelId.toHexString} $u")
log.warning(s"bad signature for announcement shortChannelId=${u.shortChannelId.toHexString} $u")
sender ! Error(Peer.CHANNELID_ZERO, "bad announcement sig!!!".getBytes())
stay
} else if (d.updates.contains(desc)) {
@ -296,7 +296,7 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
log.debug(s"ignoring $u (old timestamp or duplicate)")
stay
} else if (!Announcements.checkSig(u, desc.a)) {
log.error(s"bad signature for announcement shortChannelId=${u.shortChannelId.toHexString} $u")
log.warning(s"bad signature for announcement shortChannelId=${u.shortChannelId.toHexString} $u")
sender ! Error(Peer.CHANNELID_ZERO, "bad announcement sig!!!".getBytes())
stay
} else if (d.privateUpdates.contains(desc)) {
@ -309,7 +309,7 @@ class Router(nodeParams: NodeParams, watcher: ActorRef) extends FSM[State, Data]
stay using d.copy(privateUpdates = d.privateUpdates + (desc -> u))
}
} else {
log.warning(s"ignoring announcement $u (unknown channel)")
log.debug(s"ignoring announcement $u (unknown channel)")
stay
}

View file

@ -23,9 +23,9 @@ class ThrottleForwarder(target: ActorRef, messages: Iterable[Any], chunkSize: In
override def receive = group(messages)
def group(messages: Iterable[Any]): Receive = {
def group(remaining: Iterable[Any]): Receive = {
case Tick =>
messages.splitAt(chunkSize) match {
remaining.splitAt(chunkSize) match {
case (Nil, _) =>
clock.cancel()
log.debug(s"sent messages=${messages.size} with chunkSize=$chunkSize and delay=$delay")

View file

@ -37,6 +37,11 @@ object Transactions {
case class MainPenaltyTx(input: InputInfo, tx: Transaction) extends TransactionWithInputInfo
case class HtlcPenaltyTx(input: InputInfo, tx: Transaction) extends TransactionWithInputInfo
case class ClosingTx(input: InputInfo, tx: Transaction) extends TransactionWithInputInfo
sealed trait TxGenerationSkipped extends RuntimeException
case object OutputNotFound extends RuntimeException(s"output not found (probably trimmed)") with TxGenerationSkipped
case object AmountBelowDustLimit extends RuntimeException(s"amount is below dust limit") with TxGenerationSkipped
// @formatter:on
/**
@ -133,30 +138,31 @@ object Transactions {
*/
def getCommitTxNumber(commitTx: Transaction, isFunder: Boolean, localPaymentBasePoint: Point, remotePaymentBasePoint: Point): Long = {
val blind = obscuredCommitTxNumber(0, isFunder, localPaymentBasePoint, remotePaymentBasePoint)
val obscured = decodeTxNumber(commitTx.txIn(0).sequence, commitTx.lockTime)
val obscured = decodeTxNumber(commitTx.txIn.head.sequence, commitTx.lockTime)
obscured ^ blind
}
/**
* This is a trick to split and encode a 48-bit txnumber into the sequence and locktime fields of a tx
*
* @param txnumber
* @param txnumber commitment number
* @return (sequence, locktime)
*/
def encodeTxNumber(txnumber: Long) = {
def encodeTxNumber(txnumber: Long): (Long, Long) = {
require(txnumber <= 0xffffffffffffL, "txnumber must be lesser than 48 bits long")
(0x80000000L | (txnumber >> 24), (txnumber & 0xffffffL) | 0x20000000)
}
def decodeTxNumber(sequence: Long, locktime: Long) = ((sequence & 0xffffffL) << 24) + (locktime & 0xffffffL)
def decodeTxNumber(sequence: Long, locktime: Long): Long = ((sequence & 0xffffffL) << 24) + (locktime & 0xffffffL)
def makeCommitTx(commitTxInput: InputInfo, commitTxNumber: Long, localPaymentBasePoint: Point, remotePaymentBasePoint: Point, localIsFunder: Boolean, localDustLimit: Satoshi, localRevocationPubkey: PublicKey, toLocalDelay: Int, localDelayedPaymentPubkey: PublicKey, remotePaymentPubkey: PublicKey, localHtlcPubkey: PublicKey, remoteHtlcPubkey: PublicKey, spec: CommitmentSpec): CommitTx = {
val commitFee = commitTxFee(localDustLimit, spec)
val (toLocalAmount: Satoshi, toRemoteAmount: Satoshi) = localIsFunder match {
case true => (millisatoshi2satoshi(MilliSatoshi(spec.toLocalMsat)) - commitFee, millisatoshi2satoshi(MilliSatoshi(spec.toRemoteMsat)))
case false => (millisatoshi2satoshi(MilliSatoshi(spec.toLocalMsat)), millisatoshi2satoshi(MilliSatoshi(spec.toRemoteMsat)) - commitFee)
val (toLocalAmount: Satoshi, toRemoteAmount: Satoshi) = if (localIsFunder) {
(millisatoshi2satoshi(MilliSatoshi(spec.toLocalMsat)) - commitFee, millisatoshi2satoshi(MilliSatoshi(spec.toRemoteMsat)))
} else {
(millisatoshi2satoshi(MilliSatoshi(spec.toLocalMsat)), millisatoshi2satoshi(MilliSatoshi(spec.toRemoteMsat)) - commitFee)
} // NB: we don't care if values are < 0, they will be trimmed if they are < dust limit anyway
val toLocalDelayedOutput_opt = if (toLocalAmount >= localDustLimit) Some(TxOut(toLocalAmount, pay2wsh(toLocalDelayed(localRevocationPubkey, toLocalDelay, localDelayedPaymentPubkey)))) else None
@ -183,9 +189,10 @@ object Transactions {
val redeemScript = htlcOffered(localHtlcPubkey, remoteHtlcPubkey, localRevocationPubkey, ripemd160(htlc.paymentHash))
val pubkeyScript = write(pay2wsh(redeemScript))
val outputIndex = findPubKeyScriptIndex(commitTx, pubkeyScript)
require(outputIndex >= 0, "output not found (was trimmed?)")
val amount = MilliSatoshi(htlc.amountMsat) - fee
require(amount >= localDustLimit, "amount lesser than dust limit")
if (amount < localDustLimit) {
throw AmountBelowDustLimit
}
val input = InputInfo(OutPoint(commitTx, outputIndex), commitTx.txOut(outputIndex), write(redeemScript))
HtlcTimeoutTx(input, Transaction(
version = 2,
@ -199,9 +206,10 @@ object Transactions {
val redeemScript = htlcReceived(localHtlcPubkey, remoteHtlcPubkey, localRevocationPubkey, ripemd160(htlc.paymentHash), htlc.expiry)
val pubkeyScript = write(pay2wsh(redeemScript))
val outputIndex = findPubKeyScriptIndex(commitTx, pubkeyScript)
require(outputIndex >= 0, "output not found (was trimmed?)")
val amount = MilliSatoshi(htlc.amountMsat) - fee
require(amount >= localDustLimit, "amount lesser than dust limit")
if (amount < localDustLimit) {
throw AmountBelowDustLimit
}
val input = InputInfo(OutPoint(commitTx, outputIndex), commitTx.txOut(outputIndex), write(redeemScript))
HtlcSuccessTx(input, Transaction(
version = 2,
@ -223,10 +231,11 @@ object Transactions {
val redeemScript = htlcOffered(remoteHtlcPubkey, localHtlcPubkey, remoteRevocationPubkey, ripemd160(htlc.paymentHash))
val pubkeyScript = write(pay2wsh(redeemScript))
val outputIndex = findPubKeyScriptIndex(commitTx, pubkeyScript)
require(outputIndex >= 0, "output not found (was trimmed?)")
val input = InputInfo(OutPoint(commitTx, outputIndex), commitTx.txOut(outputIndex), write(redeemScript))
val amount = input.txOut.amount - fee
require(amount >= localDustLimit, "amount lesser than dust limit")
if (amount < localDustLimit) {
throw AmountBelowDustLimit
}
ClaimHtlcSuccessTx(input, Transaction(
version = 2,
txIn = TxIn(input.outPoint, Array.emptyByteArray, 0xffffffffL) :: Nil,
@ -239,10 +248,11 @@ object Transactions {
val redeemScript = htlcReceived(remoteHtlcPubkey, localHtlcPubkey, remoteRevocationPubkey, ripemd160(htlc.paymentHash), htlc.expiry)
val pubkeyScript = write(pay2wsh(redeemScript))
val outputIndex = findPubKeyScriptIndex(commitTx, pubkeyScript)
require(outputIndex >= 0, "output not found (was trimmed?)")
val input = InputInfo(OutPoint(commitTx, outputIndex), commitTx.txOut(outputIndex), write(redeemScript))
val amount = input.txOut.amount - fee
require(amount >= localDustLimit, "amount lesser than dust limit")
if (amount < localDustLimit) {
throw AmountBelowDustLimit
}
ClaimHtlcTimeoutTx(input, Transaction(
version = 2,
txIn = TxIn(input.outPoint, Array.emptyByteArray, 0x00000000L) :: Nil,
@ -255,10 +265,11 @@ object Transactions {
val redeemScript = Script.pay2pkh(localPaymentPubkey)
val pubkeyScript = write(pay2wpkh(localPaymentPubkey))
val outputIndex = findPubKeyScriptIndex(delayedOutputTx, pubkeyScript)
require(outputIndex >= 0, "output not found (was trimmed?)")
val input = InputInfo(OutPoint(delayedOutputTx, outputIndex), delayedOutputTx.txOut(outputIndex), write(redeemScript))
val amount = input.txOut.amount - fee
require(amount >= localDustLimit, "amount lesser than dust limit")
if (amount < localDustLimit) {
throw AmountBelowDustLimit
}
ClaimP2WPKHOutputTx(input, Transaction(
version = 2,
txIn = TxIn(input.outPoint, Array.emptyByteArray, 0x00000000L) :: Nil,
@ -271,10 +282,11 @@ object Transactions {
val redeemScript = toLocalDelayed(localRevocationPubkey, toLocalDelay, localDelayedPaymentPubkey)
val pubkeyScript = write(pay2wsh(redeemScript))
val outputIndex = findPubKeyScriptIndex(delayedOutputTx, pubkeyScript)
require(outputIndex >= 0, "output not found (was trimmed?)")
val input = InputInfo(OutPoint(delayedOutputTx, outputIndex), delayedOutputTx.txOut(outputIndex), write(redeemScript))
val amount = input.txOut.amount - fee
require(amount >= localDustLimit, "amount lesser than dust limit")
if (amount < localDustLimit) {
throw AmountBelowDustLimit
}
ClaimDelayedOutputTx(input, Transaction(
version = 2,
txIn = TxIn(input.outPoint, Array.emptyByteArray, toLocalDelay) :: Nil,
@ -287,10 +299,11 @@ object Transactions {
val redeemScript = toLocalDelayed(remoteRevocationPubkey, toRemoteDelay, remoteDelayedPaymentPubkey)
val pubkeyScript = write(pay2wsh(redeemScript))
val outputIndex = findPubKeyScriptIndex(commitTx, pubkeyScript)
require(outputIndex >= 0, "output not found (was trimmed?)")
val input = InputInfo(OutPoint(commitTx, outputIndex), commitTx.txOut(outputIndex), write(redeemScript))
val amount = input.txOut.amount - fee
require(amount >= localDustLimit, "amount lesser than dust limit")
if (amount < localDustLimit) {
throw AmountBelowDustLimit
}
MainPenaltyTx(input, Transaction(
version = 2,
txIn = TxIn(input.outPoint, Array.emptyByteArray, 0xffffffffL) :: Nil,
@ -301,11 +314,12 @@ object Transactions {
def makeHtlcPenaltyTx(commitTx: Transaction, localDustLimit: Satoshi): HtlcPenaltyTx = ???
def makeClosingTx(commitTxInput: InputInfo, localScriptPubKey: BinaryData, remoteScriptPubKey: BinaryData, localIsFunder: Boolean, dustLimit: Satoshi, closingFee: Satoshi, spec: CommitmentSpec): ClosingTx = {
require(spec.htlcs.size == 0, "there shouldn't be any pending htlcs")
require(spec.htlcs.isEmpty, "there shouldn't be any pending htlcs")
val (toLocalAmount: Satoshi, toRemoteAmount: Satoshi) = localIsFunder match {
case true => (millisatoshi2satoshi(MilliSatoshi(spec.toLocalMsat)) - closingFee, millisatoshi2satoshi(MilliSatoshi(spec.toRemoteMsat)))
case false => (millisatoshi2satoshi(MilliSatoshi(spec.toLocalMsat)), millisatoshi2satoshi(MilliSatoshi(spec.toRemoteMsat)) - closingFee)
val (toLocalAmount: Satoshi, toRemoteAmount: Satoshi) = if (localIsFunder) {
(millisatoshi2satoshi(MilliSatoshi(spec.toLocalMsat)) - closingFee, millisatoshi2satoshi(MilliSatoshi(spec.toRemoteMsat)))
} else {
(millisatoshi2satoshi(MilliSatoshi(spec.toLocalMsat)), millisatoshi2satoshi(MilliSatoshi(spec.toRemoteMsat)) - closingFee)
} // NB: we don't care if values are < 0, they will be trimmed if they are < dust limit anyway
val toLocalOutput_opt = if (toLocalAmount >= dustLimit) Some(TxOut(toLocalAmount, localScriptPubKey)) else None
@ -319,7 +333,14 @@ object Transactions {
ClosingTx(commitTxInput, LexicographicalOrdering.sort(tx))
}
def findPubKeyScriptIndex(tx: Transaction, pubkeyScript: BinaryData): Int = tx.txOut.indexWhere(_.publicKeyScript == pubkeyScript)
def findPubKeyScriptIndex(tx: Transaction, pubkeyScript: BinaryData): Int = {
val outputIndex = tx.txOut.indexWhere(_.publicKeyScript == pubkeyScript)
if (outputIndex >= 0) {
outputIndex
} else {
throw OutputNotFound
}
}
def findPubKeyScriptIndex(tx: Transaction, pubkeyScript: Seq[ScriptElt]): Int = findPubKeyScriptIndex(tx, write(pubkeyScript))
@ -336,7 +357,7 @@ object Transactions {
}
def sign(txinfo: TransactionWithInputInfo, key: PrivateKey): BinaryData = {
require(txinfo.tx.txIn.size == 1, "only one input allowed")
require(txinfo.tx.txIn.lengthCompare(1) == 0, "only one input allowed")
sign(txinfo.tx, inputIndex = 0, txinfo.input.redeemScript, txinfo.input.txOut.amount, key)
}
@ -386,7 +407,7 @@ object Transactions {
}
def checkSpendable(txinfo: TransactionWithInputInfo): Try[Unit] =
Try(Transaction.correctlySpends(txinfo.tx, Map(txinfo.tx.txIn(0).outPoint -> txinfo.input.txOut), ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS))
Try(Transaction.correctlySpends(txinfo.tx, Map(txinfo.tx.txIn.head.outPoint -> txinfo.input.txOut), ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS))
def checkSig(txinfo: TransactionWithInputInfo, sig: BinaryData, pubKey: PublicKey): Boolean = {
val data = Transaction.hashForSigning(txinfo.tx, inputIndex = 0, txinfo.input.redeemScript, SIGHASH_ALL, txinfo.input.txOut.amount, SIGVERSION_WITNESS_V0)

View file

@ -17,10 +17,6 @@
</encoder>
</appender>
<logger name="fr.acinq.eclair.channel" level="DEBUG"/>
<logger name="fr.acinq.eclair.channel.Register" level="DEBUG"/>
<logger name="fr.acinq.eclair.router" level="INFO"/>
<if condition='isDefined("eclair.printToConsole")'>
<then>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">

View file

@ -13,18 +13,7 @@
<target>System.out</target>
<withJansi>false</withJansi>
<encoder>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %yellow(%msg) %ex{12}%n
</pattern>
</encoder>
</appender>
<appender name="ORANGE" class="ch.qos.logback.core.ConsoleAppender">
<target>System.out</target>
<withJansi>false</withJansi>
<encoder>
<pattern>%boldYellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %yellow(%msg)
%ex{12}%n
</pattern>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %yellow(%msg) %ex{12}%n</pattern>
</encoder>
</appender>
@ -32,8 +21,7 @@
<target>System.out</target>
<withJansi>false</withJansi>
<encoder>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %red(%msg) %ex{12}%n
</pattern>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %red(%msg) %ex{12}%n</pattern>
</encoder>
</appender>
@ -41,9 +29,7 @@
<target>System.out</target>
<withJansi>false</withJansi>
<encoder>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} channelId=%X{channelId} - %blue(%msg)
%ex{12}%n
</pattern>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} channelId=%X{channelId} - %blue(%msg) %ex{12}%n</pattern>
</encoder>
</appender>
@ -51,8 +37,7 @@
<target>System.out</target>
<withJansi>false</withJansi>
<encoder>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %cyan(%msg) %ex{12}%n
</pattern>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %cyan(%msg) %ex{12}%n</pattern>
</encoder>
</appender>
@ -60,8 +45,7 @@
<target>System.out</target>
<withJansi>false</withJansi>
<encoder>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %green(%msg) %ex{12}%n
</pattern>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %green(%msg) %ex{12}%n</pattern>
</encoder>
</appender>
@ -69,8 +53,7 @@
<target>System.out</target>
<withJansi>false</withJansi>
<encoder>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %magenta(%msg) %ex{12}%n
</pattern>
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %magenta(%msg) %ex{12}%n</pattern>
</encoder>
</appender>
@ -78,19 +61,19 @@
<appender-ref ref="RED"/>
</logger>
<logger name="fr.acinq.eclair.channel" level="DEBUG" additivity="false">
<logger name="fr.acinq.eclair.channel" level="INFO" additivity="false">
<appender-ref ref="BLUE"/>
</logger>
<logger name="fr.acinq.eclair.payment" level="DEBUG" additivity="false">
<logger name="fr.acinq.eclair.payment" level="INFO" additivity="false">
<appender-ref ref="GREEN"/>
</logger>
<logger name="fr.acinq.eclair.blockchain.bitcoind.ZmqWatcher" level="DEBUG" additivity="false">
<logger name="fr.acinq.eclair.blockchain.bitcoind.ZmqWatcher" level="INFO" additivity="false">
<appender-ref ref="YELLOW"/>
</logger>
<logger name="fr.acinq.eclair.blockchain" level="DEBUG" additivity="false">
<logger name="fr.acinq.eclair.blockchain" level="INFO" additivity="false">
<appender-ref ref="YELLOW"/>
</logger>
@ -98,7 +81,7 @@
<appender-ref ref="CYAN"/>
</logger>
<logger name="fr.acinq.eclair.gui" level="DEBUG" additivity="false">
<logger name="fr.acinq.eclair.gui" level="ERROR" additivity="false">
<appender-ref ref="MAGENTA"/>
</logger>