mirror of
https://github.com/ACINQ/eclair.git
synced 2025-02-22 22:25:26 +01:00
Added context to logs (#638)
* improved logs on sig sent/received * put 'sent announcements' log in debug * added logging of IN/OUT wire messages * added mdc support to IO classes * reduced package length to 24 chars in logs
This commit is contained in:
parent
923d8661f8
commit
86e91f76fc
10 changed files with 127 additions and 30 deletions
34
eclair-core/src/main/scala/fr/acinq/eclair/Logs.scala
Normal file
34
eclair-core/src/main/scala/fr/acinq/eclair/Logs.scala
Normal file
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Copyright 2018 ACINQ SAS
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fr.acinq.eclair
|
||||
|
||||
import akka.event.Logging.MDC
|
||||
import fr.acinq.bitcoin.BinaryData
|
||||
import fr.acinq.bitcoin.Crypto.PublicKey
|
||||
|
||||
object Logs {
|
||||
|
||||
def mdc(remoteNodeId_opt: Option[PublicKey] = None, channelId_opt: Option[BinaryData] = None): MDC =
|
||||
Seq(
|
||||
remoteNodeId_opt.map(n => "nodeId" -> s" n:$n"), // nb: we preformat MDC values so that there is no white spaces in logs
|
||||
channelId_opt.map(c => "channelId" -> s" c:$c")
|
||||
).flatten.toMap
|
||||
|
||||
}
|
||||
|
||||
// we use a dedicated class so that the logging can be independently adjusted
|
||||
case class Diagnostics()
|
|
@ -1893,7 +1893,7 @@ class Channel(val nodeParams: NodeParams, wallet: EclairWallet, remoteNodeId: Pu
|
|||
|
||||
override def mdc(currentMessage: Any): MDC = {
|
||||
val id = Helpers.getChannelId(stateData)
|
||||
Map("channelId" -> id)
|
||||
Logs.mdc(remoteNodeId_opt = Some(remoteNodeId), channelId_opt = Some(id))
|
||||
}
|
||||
|
||||
// we let the peer decide what to do
|
||||
|
|
|
@ -362,7 +362,7 @@ object Commitments {
|
|||
|
||||
def revocationHash(seed: BinaryData, index: Long): BinaryData = Crypto.sha256(revocationPreimage(seed, index))
|
||||
|
||||
def sendCommit(commitments: Commitments, keyManager: KeyManager): (Commitments, CommitSig) = {
|
||||
def sendCommit(commitments: Commitments, keyManager: KeyManager)(implicit log: LoggingAdapter): (Commitments, CommitSig) = {
|
||||
import commitments._
|
||||
commitments.remoteNextCommitInfo match {
|
||||
case Right(_) if !localHasChanges(commitments) =>
|
||||
|
@ -376,6 +376,9 @@ object Commitments {
|
|||
val sortedHtlcTxs: Seq[TransactionWithInputInfo] = (htlcTimeoutTxs ++ htlcSuccessTxs).sortBy(_.input.outPoint.index)
|
||||
val htlcSigs = sortedHtlcTxs.map(keyManager.sign(_, keyManager.htlcPoint(localParams.channelKeyPath), remoteNextPerCommitmentPoint))
|
||||
|
||||
// NB: IN/OUT htlcs are inverted because this is the remote commit
|
||||
log.debug(s"built remote commit number=${remoteCommit.index + 1} htlc_in={} htlc_out={} feeratePerKw=${spec.feeratePerKw} txid=${remoteCommitTx.tx.txid} tx={}", spec.htlcs.filter(_.direction == OUT).size, spec.htlcs.filter(_.direction == IN).size, remoteCommitTx.tx)
|
||||
|
||||
// don't sign if they don't get paid
|
||||
val commitSig = CommitSig(
|
||||
channelId = commitments.channelId,
|
||||
|
@ -416,6 +419,8 @@ object Commitments {
|
|||
val (localCommitTx, htlcTimeoutTxs, htlcSuccessTxs) = makeLocalTxs(keyManager, localCommit.index + 1, localParams, remoteParams, commitInput, localPerCommitmentPoint, spec)
|
||||
val sig = keyManager.sign(localCommitTx, keyManager.fundingPublicKey(localParams.channelKeyPath))
|
||||
|
||||
log.debug(s"built local commit number=${localCommit.index + 1} htlc_in={} htlc_out={} feeratePerKw=${spec.feeratePerKw} txid=${localCommitTx.tx.txid} tx={}", spec.htlcs.filter(_.direction == IN).size, spec.htlcs.filter(_.direction == OUT).size, localCommitTx.tx)
|
||||
|
||||
// TODO: should we have optional sig? (original comment: this tx will NOT be signed if our output is empty)
|
||||
|
||||
// no need to compute htlc sigs if commit sig doesn't check out
|
||||
|
@ -467,8 +472,6 @@ object Commitments {
|
|||
val originChannels1 = commitments.originChannels -- completedOutgoingHtlcs
|
||||
val commitments1 = commitments.copy(localCommit = localCommit1, localChanges = ourChanges1, remoteChanges = theirChanges1, originChannels = originChannels1)
|
||||
|
||||
log.debug(s"current commit: index=${localCommit1.index} htlc_in=${localCommit1.spec.htlcs.filter(_.direction == IN).size} htlc_out=${localCommit1.spec.htlcs.filter(_.direction == OUT).size} txid=${localCommit1.publishableTxs.commitTx.tx.txid} tx=${Transaction.write(localCommit1.publishableTxs.commitTx.tx)}")
|
||||
|
||||
(commitments1, revocation)
|
||||
}
|
||||
|
||||
|
@ -493,7 +496,6 @@ object Commitments {
|
|||
}
|
||||
|
||||
def makeLocalTxs(keyManager: KeyManager, commitTxNumber: Long, localParams: LocalParams, remoteParams: RemoteParams, commitmentInput: InputInfo, localPerCommitmentPoint: Point, spec: CommitmentSpec): (CommitTx, Seq[HtlcTimeoutTx], Seq[HtlcSuccessTx]) = {
|
||||
val localPaymentPubkey = Generators.derivePubKey(keyManager.paymentPoint(localParams.channelKeyPath).publicKey, localPerCommitmentPoint)
|
||||
val localDelayedPaymentPubkey = Generators.derivePubKey(keyManager.delayedPaymentPoint(localParams.channelKeyPath).publicKey, localPerCommitmentPoint)
|
||||
val localHtlcPubkey = Generators.derivePubKey(keyManager.htlcPoint(localParams.channelKeyPath).publicKey, localPerCommitmentPoint)
|
||||
val remotePaymentPubkey = Generators.derivePubKey(remoteParams.paymentBasepoint, localPerCommitmentPoint)
|
||||
|
@ -507,7 +509,6 @@ object Commitments {
|
|||
def makeRemoteTxs(keyManager: KeyManager, commitTxNumber: Long, localParams: LocalParams, remoteParams: RemoteParams, commitmentInput: InputInfo, remotePerCommitmentPoint: Point, spec: CommitmentSpec): (CommitTx, Seq[HtlcTimeoutTx], Seq[HtlcSuccessTx]) = {
|
||||
val localPaymentPubkey = Generators.derivePubKey(keyManager.paymentPoint(localParams.channelKeyPath).publicKey, remotePerCommitmentPoint)
|
||||
val localHtlcPubkey = Generators.derivePubKey(keyManager.htlcPoint(localParams.channelKeyPath).publicKey, remotePerCommitmentPoint)
|
||||
val remotePaymentPubkey = Generators.derivePubKey(remoteParams.paymentBasepoint, remotePerCommitmentPoint)
|
||||
val remoteDelayedPaymentPubkey = Generators.derivePubKey(remoteParams.delayedPaymentBasepoint, remotePerCommitmentPoint)
|
||||
val remoteHtlcPubkey = Generators.derivePubKey(remoteParams.htlcBasepoint, remotePerCommitmentPoint)
|
||||
val remoteRevocationPubkey = Generators.revocationPubKey(keyManager.revocationPoint(localParams.channelKeyPath).publicKey, remotePerCommitmentPoint)
|
||||
|
|
|
@ -18,12 +18,16 @@ package fr.acinq.eclair.crypto
|
|||
|
||||
import java.nio.ByteOrder
|
||||
|
||||
import akka.actor.{Actor, ActorRef, FSM, PoisonPill, Props, Terminated}
|
||||
import akka.actor.{Actor, ActorRef, ExtendedActorSystem, FSM, PoisonPill, Props, Terminated}
|
||||
import akka.event.Logging.MDC
|
||||
import akka.event._
|
||||
import akka.io.Tcp
|
||||
import akka.util.ByteString
|
||||
import fr.acinq.bitcoin.Crypto.PublicKey
|
||||
import fr.acinq.bitcoin.{BinaryData, Protocol}
|
||||
import fr.acinq.eclair.{Diagnostics, FSMDiagnosticActorLogging, Logs}
|
||||
import fr.acinq.eclair.crypto.Noise._
|
||||
import fr.acinq.eclair.wire._
|
||||
import fr.acinq.eclair.wire.{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement}
|
||||
import scodec.bits.BitVector
|
||||
import scodec.{Attempt, Codec, DecodeResult}
|
||||
|
@ -46,7 +50,31 @@ import scala.util.{Failure, Success, Try}
|
|||
* @param rs remote node static public key (which must be known before we initiate communication)
|
||||
* @param connection actor that represents the other node's
|
||||
*/
|
||||
class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], connection: ActorRef, codec: Codec[T]) extends Actor with FSM[TransportHandler.State, TransportHandler.Data] {
|
||||
class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], connection: ActorRef, codec: Codec[T]) extends Actor with FSMDiagnosticActorLogging[TransportHandler.State, TransportHandler.Data] {
|
||||
|
||||
// will hold the peer's public key once it is available (we don't know it right away in case of an incoming connection)
|
||||
var remoteNodeId_opt: Option[PublicKey] = rs.map(PublicKey(_))
|
||||
|
||||
val wireLog = new BusLogging(context.system.eventStream, "", classOf[Diagnostics], context.system.asInstanceOf[ExtendedActorSystem].logFilter) with DiagnosticLoggingAdapter
|
||||
|
||||
def diag(message: T, direction: String) = {
|
||||
require(direction == "IN" || direction == "OUT")
|
||||
val channelId_opt = message match {
|
||||
case msg: HasTemporaryChannelId => Some(msg.temporaryChannelId)
|
||||
case msg: HasChannelId => Some(msg.channelId)
|
||||
case _ => None
|
||||
}
|
||||
|
||||
wireLog.mdc(Logs.mdc(remoteNodeId_opt, channelId_opt))
|
||||
if (channelId_opt.isDefined) {
|
||||
// channel-related messages are logged as info
|
||||
wireLog.info(s"$direction msg={}", message)
|
||||
} else {
|
||||
// other messages (e.g. routing gossip) are logged as debug
|
||||
wireLog.debug(s"$direction msg={}", message)
|
||||
}
|
||||
wireLog.clearMDC()
|
||||
}
|
||||
|
||||
import TransportHandler._
|
||||
|
||||
|
@ -74,6 +102,7 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
|||
var m: Map[T, Int] = Map()
|
||||
plaintextMessages.foreach(plaintext => Try(codec.decode(BitVector(plaintext.data))) match {
|
||||
case Success(Attempt.Successful(DecodeResult(message, _))) =>
|
||||
diag(message, "IN")
|
||||
listener ! message
|
||||
m += (message -> (m.getOrElse(message, 0) + 1))
|
||||
case Success(Attempt.Failure(err)) =>
|
||||
|
@ -100,6 +129,7 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
|||
reader.read(payload) match {
|
||||
case (writer, _, Some((dec, enc, ck))) =>
|
||||
val remoteNodeId = PublicKey(writer.rs)
|
||||
remoteNodeId_opt = Some(remoteNodeId)
|
||||
context.parent ! HandshakeCompleted(connection, self, remoteNodeId)
|
||||
val nextStateData = WaitingForListenerData(Encryptor(ExtendedCipherState(enc, ck)), Decryptor(ExtendedCipherState(dec, ck), ciphertextLength = None, remainder))
|
||||
goto(WaitingForListener) using nextStateData
|
||||
|
@ -116,6 +146,7 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
|||
case (_, message, Some((enc, dec, ck))) => {
|
||||
connection ! Tcp.Write(buf(TransportHandler.prefix +: message))
|
||||
val remoteNodeId = PublicKey(writer.rs)
|
||||
remoteNodeId_opt = Some(remoteNodeId)
|
||||
context.parent ! HandshakeCompleted(connection, self, remoteNodeId)
|
||||
val nextStateData = WaitingForListenerData(Encryptor(ExtendedCipherState(enc, ck)), Decryptor(ExtendedCipherState(dec, ck), ciphertextLength = None, remainder))
|
||||
goto(WaitingForListener) using nextStateData
|
||||
|
@ -183,6 +214,7 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
|||
}
|
||||
stay using d.copy(sendBuffer = sendBuffer1)
|
||||
} else {
|
||||
diag(t, "OUT")
|
||||
val blob = codec.encode(t).require.toByteArray
|
||||
val (enc1, ciphertext) = d.encryptor.encrypt(blob)
|
||||
connection ! Tcp.Write(buf(ciphertext), WriteAck)
|
||||
|
@ -191,11 +223,13 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
|||
|
||||
case Event(WriteAck, d: NormalData[T]) =>
|
||||
def send(t: T) = {
|
||||
diag(t, "OUT")
|
||||
val blob = codec.encode(t).require.toByteArray
|
||||
val (enc1, ciphertext) = d.encryptor.encrypt(blob)
|
||||
connection ! Tcp.Write(buf(ciphertext), WriteAck)
|
||||
enc1
|
||||
}
|
||||
|
||||
d.sendBuffer.normalPriority.dequeueOption match {
|
||||
case Some((t, normalPriority1)) =>
|
||||
val enc1 = send(t)
|
||||
|
@ -220,12 +254,21 @@ class TransportHandler[T: ClassTag](keyPair: KeyPair, rs: Option[BinaryData], co
|
|||
log.info(s"connection terminated, stopping the transport")
|
||||
// this can be the connection or the listener, either way it is a cause of death
|
||||
stop(FSM.Normal)
|
||||
|
||||
case Event(msg, d) =>
|
||||
d match {
|
||||
case n: NormalData[T] => log.warning(s"unhandled message $msg in state normal unackedSent=${n.unackedSent.size} unackedReceived=${n.unackedReceived.size} sendBuffer.lowPriority=${n.sendBuffer.lowPriority.size} sendBuffer.normalPriority=${n.sendBuffer.normalPriority.size}")
|
||||
case _ => log.warning(s"unhandled message $msg in state ${d.getClass.getSimpleName}")
|
||||
}
|
||||
stay
|
||||
}
|
||||
|
||||
override def aroundPostStop(): Unit = connection ! Tcp.Close // attempts to gracefully close the connection when dying
|
||||
|
||||
initialize()
|
||||
|
||||
override def mdc(currentMessage: Any): MDC = Logs.mdc(remoteNodeId_opt = remoteNodeId_opt)
|
||||
|
||||
}
|
||||
|
||||
object TransportHandler {
|
||||
|
@ -372,5 +415,4 @@ object TransportHandler {
|
|||
case object WriteAck extends Tcp.Event
|
||||
// @formatter:on
|
||||
|
||||
|
||||
}
|
|
@ -18,9 +18,10 @@ package fr.acinq.eclair.io
|
|||
|
||||
import java.net.InetSocketAddress
|
||||
|
||||
import akka.actor.{Actor, ActorLogging, ActorRef, OneForOneStrategy, Props, Status, SupervisorStrategy, Terminated}
|
||||
import akka.actor.{Actor, ActorLogging, ActorRef, DiagnosticActorLogging, OneForOneStrategy, Props, Status, SupervisorStrategy, Terminated}
|
||||
import akka.event.Logging.MDC
|
||||
import fr.acinq.bitcoin.Crypto.PublicKey
|
||||
import fr.acinq.eclair.NodeParams
|
||||
import fr.acinq.eclair.{Logs, NodeParams}
|
||||
import fr.acinq.eclair.crypto.Noise.KeyPair
|
||||
import fr.acinq.eclair.crypto.TransportHandler
|
||||
import fr.acinq.eclair.crypto.TransportHandler.HandshakeCompleted
|
||||
|
@ -33,7 +34,7 @@ import fr.acinq.eclair.wire.LightningMessageCodecs
|
|||
*
|
||||
* All incoming/outgoing connections are processed here, before being sent to the switchboard
|
||||
*/
|
||||
class Authenticator(nodeParams: NodeParams) extends Actor with ActorLogging {
|
||||
class Authenticator(nodeParams: NodeParams) extends Actor with DiagnosticActorLogging {
|
||||
|
||||
override def receive: Receive = {
|
||||
case switchboard: ActorRef => context become ready(switchboard, Map.empty)
|
||||
|
@ -71,6 +72,15 @@ class Authenticator(nodeParams: NodeParams) extends Actor with ActorLogging {
|
|||
|
||||
// we should not restart a failing transport-handler
|
||||
override val supervisorStrategy = OneForOneStrategy(loggingEnabled = true) { case _ => SupervisorStrategy.Stop }
|
||||
|
||||
override def mdc(currentMessage: Any): MDC = {
|
||||
val remoteNodeId_opt = currentMessage match {
|
||||
case PendingAuth(_, remoteNodeId_opt, _, _) => remoteNodeId_opt
|
||||
case HandshakeCompleted(_, _, remoteNodeId) => Some(remoteNodeId)
|
||||
case _ => None
|
||||
}
|
||||
Logs.mdc(remoteNodeId_opt = remoteNodeId_opt)
|
||||
}
|
||||
}
|
||||
|
||||
object Authenticator {
|
||||
|
|
|
@ -19,10 +19,11 @@ package fr.acinq.eclair.io
|
|||
import java.net.InetSocketAddress
|
||||
|
||||
import akka.actor.{Props, _}
|
||||
import akka.event.Logging.MDC
|
||||
import akka.io.Tcp.SO.KeepAlive
|
||||
import akka.io.{IO, Tcp}
|
||||
import fr.acinq.bitcoin.Crypto.PublicKey
|
||||
import fr.acinq.eclair.NodeParams
|
||||
import fr.acinq.eclair.{Logs, NodeParams}
|
||||
import fr.acinq.eclair.io.Client.ConnectionFailed
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
@ -31,15 +32,19 @@ import scala.concurrent.duration._
|
|||
* Created by PM on 27/10/2015.
|
||||
*
|
||||
*/
|
||||
class Client(nodeParams: NodeParams, authenticator: ActorRef, address: InetSocketAddress, remoteNodeId: PublicKey, origin_opt: Option[ActorRef]) extends Actor with ActorLogging {
|
||||
class Client(nodeParams: NodeParams, authenticator: ActorRef, address: InetSocketAddress, remoteNodeId: PublicKey, origin_opt: Option[ActorRef]) extends Actor with DiagnosticActorLogging {
|
||||
|
||||
import Tcp._
|
||||
import context.system
|
||||
|
||||
log.info(s"connecting to pubkey=$remoteNodeId host=${address.getHostString} port=${address.getPort}")
|
||||
IO(Tcp) ! Connect(address, timeout = Some(5 seconds), options = KeepAlive(true) :: Nil, pullMode = true)
|
||||
// we could connect directly here but this allows to take advantage of the automated mdc configuration on message reception
|
||||
self ! 'connect
|
||||
|
||||
def receive = {
|
||||
case 'connect =>
|
||||
log.info(s"connecting to pubkey=$remoteNodeId host=${address.getHostString} port=${address.getPort}")
|
||||
IO(Tcp) ! Connect(address, timeout = Some(5 seconds), options = KeepAlive(true) :: Nil, pullMode = true)
|
||||
|
||||
case CommandFailed(_: Connect) =>
|
||||
log.info(s"connection failed to $remoteNodeId@${address.getHostString}:${address.getPort}")
|
||||
origin_opt.map(_ ! Status.Failure(ConnectionFailed(address)))
|
||||
|
@ -59,6 +64,8 @@ class Client(nodeParams: NodeParams, authenticator: ActorRef, address: InetSocke
|
|||
}
|
||||
|
||||
override def unhandled(message: Any): Unit = log.warning(s"unhandled message=$message")
|
||||
|
||||
override def mdc(currentMessage: Any): MDC = Logs.mdc(remoteNodeId_opt = Some(remoteNodeId))
|
||||
}
|
||||
|
||||
object Client extends App {
|
||||
|
|
|
@ -20,7 +20,8 @@ import java.io.ByteArrayInputStream
|
|||
import java.net.InetSocketAddress
|
||||
import java.nio.ByteOrder
|
||||
|
||||
import akka.actor.{ActorRef, FSM, OneForOneStrategy, PoisonPill, Props, Status, SupervisorStrategy, Terminated}
|
||||
import akka.actor.{ActorRef, OneForOneStrategy, PoisonPill, Props, Status, SupervisorStrategy, Terminated}
|
||||
import akka.event.Logging.MDC
|
||||
import fr.acinq.bitcoin.Crypto.PublicKey
|
||||
import fr.acinq.bitcoin.{BinaryData, DeterministicWallet, MilliSatoshi, Protocol, Satoshi}
|
||||
import fr.acinq.eclair.blockchain.EclairWallet
|
||||
|
@ -37,7 +38,7 @@ import scala.util.Random
|
|||
/**
|
||||
* Created by PM on 26/08/2016.
|
||||
*/
|
||||
class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: ActorRef, watcher: ActorRef, router: ActorRef, relayer: ActorRef, wallet: EclairWallet) extends FSM[Peer.State, Peer.Data] {
|
||||
class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: ActorRef, watcher: ActorRef, router: ActorRef, relayer: ActorRef, wallet: EclairWallet) extends FSMDiagnosticActorLogging[Peer.State, Peer.Data] {
|
||||
|
||||
import Peer._
|
||||
|
||||
|
@ -241,7 +242,7 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
|
|||
val updatesSent = sendIfNeeded(updates)
|
||||
val nodesSent = sendIfNeeded(nodes)
|
||||
if (channelsSent > 0 || updatesSent > 0 || nodesSent > 0) {
|
||||
log.info(s"sent announcements to {}: channels={} updates={} nodes={}", remoteNodeId, channelsSent, updatesSent, nodesSent)
|
||||
log.debug(s"sent announcements to {}: channels={} updates={} nodes={}", remoteNodeId, channelsSent, updatesSent, nodesSent)
|
||||
}
|
||||
stay
|
||||
|
||||
|
@ -326,6 +327,8 @@ class Peer(nodeParams: NodeParams, remoteNodeId: PublicKey, authenticator: Actor
|
|||
|
||||
initialize()
|
||||
|
||||
override def mdc(currentMessage: Any): MDC = Logs.mdc(remoteNodeId_opt = Some(remoteNodeId))
|
||||
|
||||
}
|
||||
|
||||
object Peer {
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<target>System.out</target>
|
||||
<encoder>
|
||||
<pattern>%date{HH:mm:ss.SSS} %highlight(%-5level) %X{akkaSource} - %msg%ex{12}%n</pattern>
|
||||
<pattern>%date{HH:mm:ss.SSS} %highlight(%-5level) %logger{0} %X{nodeId}%X{channelId} - %msg%ex{12}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
<target>System.out</target>
|
||||
<withJansi>false</withJansi>
|
||||
<encoder>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %msg%ex{12}%n</pattern>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{24} %X{nodeId}%X{channelId} - %msg%ex{12}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
|||
<file>${eclair.datadir:-${user.home}/.eclair}/eclair.log</file>
|
||||
<append>true</append>
|
||||
<encoder>
|
||||
<pattern>%d %-5level %logger{36} %X{akkaSource} %X{channelId} - %msg%ex{24}%n</pattern>
|
||||
<pattern>%d %-5level %logger{24} %X{nodeId}%X{channelId} - %msg%ex{24}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
@ -37,7 +37,7 @@
|
|||
<then>
|
||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%d %-5level %logger{36} %X{akkaSource} %X{channelId} - %msg%ex{24}%n</pattern>
|
||||
<pattern>%d %-5level %logger{24} %X{nodeId}%X{channelId} - %msg%ex{24}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
<root>
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
<target>System.out</target>
|
||||
<withJansi>false</withJansi>
|
||||
<encoder>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %msg%ex{12}%n</pattern>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{24} %X{nodeId}%X{channelId} - %msg%ex{12}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
|||
<target>System.out</target>
|
||||
<withJansi>false</withJansi>
|
||||
<encoder>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %yellow(%msg) %ex{12}%n</pattern>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{24} %X{nodeId}%X{channelId} - %yellow(%msg) %ex{12}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
@ -37,7 +37,7 @@
|
|||
<target>System.out</target>
|
||||
<withJansi>false</withJansi>
|
||||
<encoder>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %red(%msg) %ex{12}%n</pattern>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{24} %X{nodeId}%X{channelId} - %red(%msg) %ex{12}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
@ -45,7 +45,7 @@
|
|||
<target>System.out</target>
|
||||
<withJansi>false</withJansi>
|
||||
<encoder>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} channelId=%X{channelId} - %blue(%msg) %ex{12}%n</pattern>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{24} %X{nodeId}%X{channelId} - %blue(%msg) %ex{12}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
@ -53,7 +53,7 @@
|
|||
<target>System.out</target>
|
||||
<withJansi>false</withJansi>
|
||||
<encoder>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %cyan(%msg) %ex{12}%n</pattern>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{24} %X{nodeId}%X{channelId} - %cyan(%msg) %ex{12}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
@ -61,7 +61,7 @@
|
|||
<target>System.out</target>
|
||||
<withJansi>false</withJansi>
|
||||
<encoder>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %green(%msg) %ex{12}%n</pattern>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{24} %X{nodeId}%X{channelId} - %green(%msg) %ex{12}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
@ -69,7 +69,7 @@
|
|||
<target>System.out</target>
|
||||
<withJansi>false</withJansi>
|
||||
<encoder>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{36} %X{akkaSource} - %magenta(%msg) %ex{12}%n</pattern>
|
||||
<pattern>%yellow(${HOSTNAME} %d) %highlight(%-5level) %logger{24} %X{nodeId}%X{channelId} - %magenta(%msg) %ex{12}%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue