2023 04 22 peermanager dmh refactor (#5057)

* WIP: Move DataMessageHandler into PeerManager

* Get things compiling

* Turn off logging
This commit is contained in:
Chris Stewart 2023-04-24 08:14:02 -05:00 committed by GitHub
parent 1461782865
commit ce6d2212c1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 79 additions and 92 deletions

View file

@ -63,8 +63,7 @@ class NeutrinoNodeWithUncachedBitcoindTest extends NodeUnitTest with CachedTor {
nodeConnectedWithBitcoinds =>
val node = nodeConnectedWithBitcoinds.node
val bitcoinds = nodeConnectedWithBitcoinds.bitcoinds
val peerManager = node.peerManager
def peers = peerManager.peers
def peers = node.peerManager.peers
for {
bitcoindPeers <- bitcoinPeersF
@ -72,15 +71,15 @@ class NeutrinoNodeWithUncachedBitcoindTest extends NodeUnitTest with CachedTor {
maxTries = 30,
interval = 1.second)
//sync from first bitcoind
_ = node.updateDataMessageHandler(
node.getDataMessageHandler.copy(syncPeer = Some(bitcoindPeers(0)))(
executionContext,
node.nodeAppConfig,
node.chainAppConfig))
_ = node.peerManager.updateDataMessageHandler(
node.peerManager.getDataMessageHandler.copy(syncPeer =
Some(bitcoindPeers(0)))(executionContext,
node.nodeAppConfig,
node.chainAppConfig))
expectHeaders = ExpectResponseCommand(
GetHeadersMessage(node.chainConfig.chain.genesisHash))
//waiting for response to header query now
client <- peerManager
client <- node.peerManager
.peerDataMap(bitcoindPeers(0))
.peerMessageSender
.map(_.client)
@ -89,7 +88,7 @@ class NeutrinoNodeWithUncachedBitcoindTest extends NodeUnitTest with CachedTor {
_ <- bitcoinds(0).disconnectNode(nodeUri)
_ = logger.info(s"Disconnected $nodeUri from bitcoind")
//old peer we were syncing with that just disconnected us
oldSyncPeer = node.getDataMessageHandler.syncPeer.get
oldSyncPeer = node.peerManager.getDataMessageHandler.syncPeer.get
_ <- NodeTestUtil.awaitAllSync(node, bitcoinds(1))
expectedSyncPeer = bitcoindPeers(1)
} yield {
@ -133,17 +132,18 @@ class NeutrinoNodeWithUncachedBitcoindTest extends NodeUnitTest with CachedTor {
_ <- AsyncUtil.retryUntilSatisfied(node.peerManager.peers.size == 2)
peers <- bitcoinPeersF
peer = peers.head
_ = node.updateDataMessageHandler(
node.getDataMessageHandler.copy(syncPeer = Some(peer))(
_ = node.peerManager.updateDataMessageHandler(
node.peerManager.getDataMessageHandler.copy(syncPeer = Some(peer))(
executionContext,
node.nodeConfig,
node.chainConfig))
invalidHeaderMessage = HeadersMessage(headers = Vector(invalidHeader))
sender <- node.peerManager.peerDataMap(peer).peerMessageSender
_ <- node.getDataMessageHandler.addToStream(invalidHeaderMessage,
sender,
peer)
_ <- node.peerManager.getDataMessageHandler.addToStream(
invalidHeaderMessage,
sender,
peer)
bestChain = bitcoinds(1)
_ <- NodeTestUtil.awaitSync(node, bestChain)
} yield {
@ -166,9 +166,8 @@ class NeutrinoNodeWithUncachedBitcoindTest extends NodeUnitTest with CachedTor {
sendFs = 1
.to(node.nodeConfig.maxInvalidResponsesAllowed + 1)
.map(_ =>
node.getDataMessageHandler.addToStream(invalidHeaderMessage,
sender,
peer))
node.peerManager.getDataMessageHandler
.addToStream(invalidHeaderMessage, sender, peer))
_ <- Future.sequence(sendFs)
} yield ()
}
@ -178,8 +177,8 @@ class NeutrinoNodeWithUncachedBitcoindTest extends NodeUnitTest with CachedTor {
peers <- bitcoinPeersF
peer = peers(0)
_ <- node.peerManager.isConnected(peer).map(assert(_))
_ = node.updateDataMessageHandler(
node.getDataMessageHandler.copy(syncPeer = Some(peer))(
_ = node.peerManager.updateDataMessageHandler(
node.peerManager.getDataMessageHandler.copy(syncPeer = Some(peer))(
executionContext,
node.nodeConfig,
node.chainConfig))

View file

@ -102,7 +102,7 @@ class P2PClientActorTest
node <- NodeUnitTest.buildNode(peer, None)
} yield PeerMessageReceiver(
controlMessageHandler = node.controlMessageHandler,
dataMessageHandler = node.getDataMessageHandler,
dataMessageHandler = node.peerManager.getDataMessageHandler,
peer = peer)
val clientActorF: Future[TestActorRef[P2PClientActor]] =

View file

@ -15,15 +15,10 @@ import org.bitcoins.core.p2p.ServiceIdentifier
import org.bitcoins.core.protocol.BlockStamp
import org.bitcoins.node.config.NodeAppConfig
import org.bitcoins.node.models.Peer
import org.bitcoins.node.networking.peer.DataMessageHandlerState.HeaderSync
import org.bitcoins.node.networking.peer.{
ControlMessageHandler,
DataMessageHandler
}
import org.bitcoins.node.networking.peer.{ControlMessageHandler}
import java.time.Instant
import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, Future}
import scala.concurrent.{Future}
case class NeutrinoNode(
chainApi: ChainApi,
@ -45,33 +40,8 @@ case class NeutrinoNode(
val controlMessageHandler: ControlMessageHandler = ControlMessageHandler(this)
private var dataMessageHandler: DataMessageHandler = {
val result = for {
chainApi <- chainApiFromDb()
} yield {
DataMessageHandler(
chainApi = chainApi,
walletCreationTimeOpt = walletCreationTimeOpt,
peerManager = peerManager,
state = HeaderSync,
initialSyncDone = None,
filterBatchCache = Set.empty,
syncPeer = None
)
}
Await.result(result, 10.seconds)
}
override def getDataMessageHandler: DataMessageHandler = dataMessageHandler
override def updateDataMessageHandler(
dataMessageHandler: DataMessageHandler): NeutrinoNode = {
this.dataMessageHandler = dataMessageHandler
this
}
override lazy val peerManager: PeerManager = PeerManager(paramPeers, this)
override lazy val peerManager: PeerManager =
PeerManager(paramPeers, this, walletCreationTimeOpt)
override def start(): Future[NeutrinoNode] = {
val res = for {
@ -102,8 +72,8 @@ case class NeutrinoNode(
syncPeer <- peerManager.randomPeerWithService(
ServiceIdentifier.NODE_COMPACT_FILTERS)
_ = logger.info(s"Syncing with $syncPeer")
_ = updateDataMessageHandler(
dataMessageHandler.copy(syncPeer = Some(syncPeer)))
_ = peerManager.updateDataMessageHandler(
peerManager.getDataMessageHandler.copy(syncPeer = Some(syncPeer)))
peerMsgSender <- peerManager.peerDataMap(syncPeer).peerMessageSender
header <- chainApi.getBestBlockHeader()
bestFilterHeaderOpt <- chainApi.getBestFilterHeader()
@ -165,6 +135,13 @@ case class NeutrinoNode(
}
}
override def syncFromNewPeer(): Future[Unit] = {
logger.info(s"Trying to sync from new peer")
val _ = peerManager.updateDataMessageHandler(
peerManager.getDataMessageHandler.reset)
sync().map(_ => ())
}
/** Starts sync compact filer headers.
* Only starts syncing compact filters if our compact filter headers are in sync with block headers
*/
@ -173,7 +150,7 @@ case class NeutrinoNode(
chainApi: ChainApi,
bestFilterOpt: Option[CompactFilterDb]): Future[Unit] = {
val syncPeerMsgSenderOptF = {
dataMessageHandler.syncPeer.map { peer =>
peerManager.getDataMessageHandler.syncPeer.map { peer =>
peerManager.peerDataMap(peer).peerMessageSender
}
}

View file

@ -18,7 +18,6 @@ import org.bitcoins.node.config.NodeAppConfig
import org.bitcoins.node.models._
import org.bitcoins.node.networking.peer.{
ControlMessageHandler,
DataMessageHandler,
PeerMessageSender
}
@ -39,22 +38,12 @@ trait Node extends NodeApi with ChainQueryApi with P2PLogger {
def peerManager: PeerManager
/** The current data message handler.
* It should be noted that the dataMessageHandler contains
* chainstate. When we update with a new chainstate, we need to
* make sure we update the [[DataMessageHandler]] via [[updateDataMessageHandler()]]
* to make sure we don't corrupt our chainstate cache
*/
def getDataMessageHandler: DataMessageHandler
def controlMessageHandler: ControlMessageHandler
def nodeCallbacks: NodeCallbacks = nodeAppConfig.callBacks
lazy val txDAO: BroadcastAbleTransactionDAO = BroadcastAbleTransactionDAO()
def updateDataMessageHandler(dataMessageHandler: DataMessageHandler): Node
/** This is constructing a chain api from disk every time we call this method
* This involves database calls which can be slow and expensive to construct
* our [[org.bitcoins.chain.blockchain.Blockchain Blockchain]]
@ -134,6 +123,8 @@ trait Node extends NodeApi with ChainQueryApi with P2PLogger {
*/
def sync(): Future[Unit]
def syncFromNewPeer(): Future[Unit]
/** Broadcasts the given transaction over the P2P network */
override def broadcastTransactions(
transactions: Vector[Transaction]): Future[Unit] = {
@ -193,7 +184,7 @@ trait Node extends NodeApi with ChainQueryApi with P2PLogger {
isIBD: Boolean,
blockHashes: Vector[DoubleSha256Digest]): Future[Unit] = {
if (isIBD) {
val syncPeerOpt = getDataMessageHandler.syncPeer
val syncPeerOpt = peerManager.getDataMessageHandler.syncPeer
syncPeerOpt match {
case Some(peer) =>
peerManager

View file

@ -34,7 +34,7 @@ case class PeerData(
private lazy val client: Future[P2PClient] = {
val peerMessageReceiver =
PeerMessageReceiver(node.controlMessageHandler,
node.getDataMessageHandler,
node.peerManager.getDataMessageHandler,
peer)
P2PClient(
peer = peer,

View file

@ -4,6 +4,7 @@ import akka.actor.{ActorRef, ActorSystem, Cancellable, Props}
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Sink, Source, SourceQueueWithComplete}
import org.bitcoins.asyncutil.AsyncUtil
import org.bitcoins.chain.blockchain.{ChainHandler}
import org.bitcoins.chain.config.ChainAppConfig
import org.bitcoins.core.api.chain.ChainApi
import org.bitcoins.core.api.node.NodeType
@ -19,7 +20,7 @@ import org.bitcoins.node.util.BitcoinSNodeUtil
import scodec.bits.ByteVector
import java.net.InetAddress
import java.time.Duration
import java.time.{Duration, Instant}
import scala.collection.mutable
import scala.concurrent.duration.DurationInt
import scala.concurrent.{ExecutionContext, Future, Promise}
@ -27,7 +28,8 @@ import scala.util.Random
case class PeerManager(
paramPeers: Vector[Peer] = Vector.empty,
node: NeutrinoNode)(implicit
node: NeutrinoNode,
walletCreationTimeOpt: Option[Instant])(implicit
ec: ExecutionContext,
system: ActorSystem,
nodeAppConfig: NodeAppConfig,
@ -363,9 +365,9 @@ case class PeerManager(
//actor stopped for one of the persistent peers, can happen in case a reconnection attempt failed due to
//reconnection tries exceeding the max limit in which the client was stopped to disconnect from it, remove it
_peerDataMap.remove(peer)
val syncPeer = node.getDataMessageHandler.syncPeer
val syncPeer = getDataMessageHandler.syncPeer
if (peers.length > 1 && syncPeer.isDefined && syncPeer.get == peer) {
syncFromNewPeer().map(_ => ())
node.syncFromNewPeer().map(_ => ())
} else if (syncPeer.isEmpty) {
Future.unit
} else {
@ -410,8 +412,8 @@ case class PeerManager(
case _: GetHeadersMessage =>
dataMessageStream.offer(HeaderTimeoutWrapper(peer)).map(_ => ())
case _ =>
if (peer == node.getDataMessageHandler.syncPeer.get)
syncFromNewPeer().map(_ => ())
if (peer == getDataMessageHandler.syncPeer.get)
node.syncFromNewPeer().map(_ => ())
else Future.unit
}
}
@ -421,24 +423,27 @@ case class PeerManager(
Future.unit
}
def syncFromNewPeer(): Future[DataMessageHandler] =
node.syncFromNewPeer().map(_ => getDataMessageHandler)
private def onHeaderRequestTimeout(
peer: Peer,
state: DataMessageHandlerState): Future[DataMessageHandler] = {
logger.info(s"Header request timed out from $peer in state $state")
state match {
case HeaderSync =>
syncFromNewPeer()
node.syncFromNewPeer().map(_ => getDataMessageHandler)
case headerState @ ValidatingHeaders(_, failedCheck, _) =>
val newHeaderState = headerState.copy(failedCheck = failedCheck + peer)
val newDmh = node.getDataMessageHandler.copy(state = newHeaderState)
val newDmh = getDataMessageHandler.copy(state = newHeaderState)
if (newHeaderState.validated) {
fetchCompactFilterHeaders(newDmh)
.map(_.copy(state = PostHeaderSync))
} else Future.successful(newDmh)
case PostHeaderSync => Future.successful(node.getDataMessageHandler)
case PostHeaderSync => Future.successful(getDataMessageHandler)
}
}
@ -454,13 +459,6 @@ case class PeerManager(
}
}
def syncFromNewPeer(): Future[DataMessageHandler] = {
logger.info(s"Trying to sync from new peer")
val newNode =
node.updateDataMessageHandler(node.getDataMessageHandler.reset)
newNode.sync().map(_ => node.getDataMessageHandler)
}
private val dataMessageStreamSource = Source
.queue[StreamDataMessageWrapper](1500,
overflowStrategy =
@ -468,17 +466,17 @@ case class PeerManager(
.mapAsync(1) {
case msg @ DataMessageWrapper(payload, peerMsgSender, peer) =>
logger.debug(s"Got ${payload.commandName} from peer=${peer} in stream")
node.getDataMessageHandler
getDataMessageHandler
.handleDataPayload(payload, peerMsgSender, peer)
.map { newDmh =>
node.updateDataMessageHandler(newDmh)
updateDataMessageHandler(newDmh)
msg
}
case msg @ HeaderTimeoutWrapper(peer) =>
logger.debug(s"Processing timeout header for $peer")
onHeaderRequestTimeout(peer, node.getDataMessageHandler.state).map {
onHeaderRequestTimeout(peer, getDataMessageHandler.state).map {
newDmh =>
node.updateDataMessageHandler(newDmh)
updateDataMessageHandler(newDmh)
logger.debug(s"Done processing timeout header for $peer")
msg
}
@ -514,6 +512,26 @@ case class PeerManager(
}
}
private var dataMessageHandler: DataMessageHandler = {
DataMessageHandler(
chainApi = ChainHandler.fromDatabase(),
walletCreationTimeOpt = walletCreationTimeOpt,
peerManager = this,
state = HeaderSync,
initialSyncDone = None,
filterBatchCache = Set.empty,
syncPeer = None
)
}
def getDataMessageHandler: DataMessageHandler = dataMessageHandler
def updateDataMessageHandler(
dataMessageHandler: DataMessageHandler): PeerManager = {
this.dataMessageHandler = dataMessageHandler
this
}
}
case class ResponseTimeout(payload: NetworkPayload)

View file

@ -213,7 +213,8 @@ object NodeUnitTest extends P2PLogger {
system)
val receiver =
PeerMessageReceiver(controlMessageHandler = node.controlMessageHandler,
dataMessageHandler = node.getDataMessageHandler,
dataMessageHandler =
node.peerManager.getDataMessageHandler,
peer = peer)(system, appConfig.nodeConf)
Future.successful(receiver)
}
@ -392,7 +393,8 @@ object NodeUnitTest extends P2PLogger {
val node = buildNode(peer, chainApi, walletCreationTimeOpt)
val receiver =
PeerMessageReceiver(controlMessageHandler = node.controlMessageHandler,
dataMessageHandler = node.getDataMessageHandler,
dataMessageHandler =
node.peerManager.getDataMessageHandler,
peer = peer)
Future.successful(receiver)
}