Optimize filter sync and fetching filter heights (#2568)

This commit is contained in:
benthecarman 2021-01-30 14:56:47 -06:00 committed by GitHub
parent 2055c5a3c7
commit 03ca6f617e
14 changed files with 84 additions and 68 deletions

View file

@ -169,7 +169,7 @@ class BitcoindRpcClient(val instance: BitcoindInstance)(implicit
s"Bitcoind chainApi doesn't allow you fetch block header batch range"))
override def nextFilterHeaderBatchRange(
stopHash: DoubleSha256DigestBE,
startHeight: Int,
batchSize: Int): Future[Option[FilterSyncMarker]] =
Future.failed(
new UnsupportedOperationException(

View file

@ -426,8 +426,7 @@ class ChainHandlerTest extends ChainDbUnitTest {
for {
bestBlock <- chainHandler.getBestBlockHeader()
bestBlockHashBE = bestBlock.hashBE
rangeOpt <-
chainHandler.nextFilterHeaderBatchRange(DoubleSha256DigestBE.empty, 1)
rangeOpt <- chainHandler.nextFilterHeaderBatchRange(0, 1)
} yield {
val marker = rangeOpt.get
assert(rangeOpt.nonEmpty)

View file

@ -317,30 +317,24 @@ class ChainHandler(
/** @inheritdoc */
override def nextFilterHeaderBatchRange(
prevStopHash: DoubleSha256DigestBE,
filterHeight: Int,
batchSize: Int): Future[Option[FilterSyncMarker]] = {
val startHeightF = if (prevStopHash == DoubleSha256DigestBE.empty) {
Future.successful(0)
} else {
for {
prevStopHeaderOpt <- getFilterHeader(prevStopHash)
prevStopHeader = prevStopHeaderOpt.getOrElse(
throw UnknownBlockHash(s"Unknown block hash ${prevStopHash}"))
} yield prevStopHeader.height + 1
}
val startHeight = if (filterHeight <= 0) 0 else filterHeight + 1
val stopHeight = startHeight - 1 + batchSize
for {
startHeight <- startHeightF
filterHeaderCount <- getFilterHeaderCount()
stopHeight =
if (startHeight - 1 + batchSize > filterHeaderCount)
filterHeaderCount
else startHeight - 1 + batchSize
stopBlockOpt <- getFilterHeadersAtHeight(stopHeight).map(_.headOption)
stopBlock = stopBlockOpt.getOrElse(
throw UnknownBlockHeight(s"Unknown filter header height ${stopHeight}"))
} yield {
if (startHeight > stopHeight)
val stopBlockF =
getFilterHeadersAtHeight(stopHeight).map(_.headOption).flatMap {
case Some(stopBlock) =>
Future.successful(stopBlock)
case None =>
// This means the stop height is past the filter header height
getBestFilterHeader().map(
_.getOrElse(throw UnknownBlockHeight(
s"Unknown filter header height $stopHeight")))
}
stopBlockF.map { stopBlock =>
if (startHeight > stopBlock.height)
None
else
Some(FilterSyncMarker(startHeight, stopBlock.blockHashBE.flip))
@ -525,13 +519,9 @@ class ChainHandler(
/** @inheritdoc */
override def getFilterHeaderCount(): Future[Int] = {
logger.debug(s"Querying for filter header count")
filterHeaderDAO.getBestFilterHeader.map {
case Some(filterHeader) =>
val height = filterHeader.height
logger.debug(s"getFilterHeaderCount result: count=$height")
height
case None =>
0
filterHeaderDAO.getBestFilterHeaderHeight.map { height =>
logger.debug(s"getFilterHeaderCount result: count=$height")
height
}
}
@ -639,12 +629,9 @@ class ChainHandler(
/** @inheritdoc */
override def getFilterCount(): Future[Int] = {
logger.debug(s"Querying for filter count")
filterDAO.getBestFilter.map {
case Some(filter) =>
val height = filter.height
logger.debug(s"getFilterCount result: count=$height")
height
case None => 0
filterDAO.getBestFilterHeight.map { height =>
logger.debug(s"getFilterCount result: count=$height")
height
}
}

View file

@ -147,4 +147,23 @@ case class CompactFilterDAO()(implicit
def getBestFilter: Future[Option[CompactFilterDb]] = {
safeDatabase.run(bestFilterQuery).map(_.headOption)
}
private val bestFilterHeightQuery = {
val join = table
.join(blockHeaderTable)
.on(_.blockHash === _.hash)
val maxQuery = join.map(_._2.chainWork).max
join
.filter(_._2.chainWork === maxQuery)
.take(1)
.map(_._1.height)
.result
.transactionally
}
def getBestFilterHeight: Future[Int] = {
safeDatabase.run(bestFilterHeightQuery).map(_.headOption.getOrElse(0))
}
}

View file

@ -145,6 +145,25 @@ case class CompactFilterHeaderDAO()(implicit
safeDatabase.run(bestFilterHeaderQuery).map(_.headOption)
}
private val bestFilterHeaderHeightQuery = {
val join = table
.join(blockHeaderTable)
.on(_.blockHash === _.hash)
val maxQuery = join.map(_._2.chainWork).max
join
.filter(_._2.chainWork === maxQuery)
.take(1)
.map(_._1.height)
.result
.transactionally
}
def getBestFilterHeaderHeight: Future[Int] = {
safeDatabase.run(bestFilterHeaderHeightQuery).map(_.headOption.getOrElse(0))
}
/** This looks for best filter headers whose [[CompactFilterHeaderDb.blockHashBE]] are associated with the given
* [[BlockHeaderDb.hashBE]] given as a parameter.
*/

View file

@ -39,10 +39,10 @@ trait ChainApi extends ChainQueryApi {
*/
def processHeaders(headers: Vector[BlockHeader]): Future[ChainApi]
/** Gets a [[org.bitcoins.chain.models.BlockHeaderDb]] from the chain's database */
/** Gets a [[org.bitcoins.core.api.chain.db.BlockHeaderDb]] from the chain's database */
def getHeader(hash: DoubleSha256DigestBE): Future[Option[BlockHeaderDb]]
/** Gets all [[org.bitcoins.chain.models.BlockHeaderDb]]s at a given height */
/** Gets all [[org.bitcoins.core.api.chain.db.BlockHeaderDb]]s at a given height */
def getHeadersAtHeight(height: Int): Future[Vector[BlockHeaderDb]]
/** Gets the number of blocks in the database */
@ -83,7 +83,7 @@ trait ChainApi extends ChainQueryApi {
* Generates a filter header range in form of (startHeight, stopHash) by the given stop hash.
*/
def nextFilterHeaderBatchRange(
prevStopHash: DoubleSha256DigestBE,
startHeight: Int,
batchSize: Int): Future[Option[FilterSyncMarker]]
/**

View file

@ -60,7 +60,7 @@ bitcoin-s {
# to keep the sync time fast, however, for regtest it should be small
# so it does not exceed the chain size.
filter-batch-size = 100
filter-batch-size = 1000
}
# this config key is read by Slick
db {

View file

@ -168,7 +168,7 @@ bitcoin-s {
# to keep the sync time fast, however, for regtest it should be small
# so it does not exceed the chain size.
filter-batch-size = 100
filter-batch-size = 1000
}
hikari-logging = true

View file

@ -77,11 +77,11 @@ case class NeutrinoNode(
prevStopHash = header.hashBE)
// If we have started syncing filters
if (filterCount != filterHeaderCount)
if (filterCount != filterHeaderCount && filterCount != 0)
peerMsgSender.sendNextGetCompactFilterCommand(
chainApi = chainApi,
filterBatchSize = chainConfig.filterBatchSize,
stopHash = header.hashBE)
startHeight = filterCount)
}
logger.info(

View file

@ -100,9 +100,10 @@ case class DataMessageHandler(
Future.successful((filterHeaderHeight, filterHeight + 1))
case (_, _) => // If either are None
for {
filterHeaderCount <- chainApi.getFilterHeaderCount()
filterCount <- chainApi.getFilterCount()
} yield (filterHeaderCount, filterCount + 1)
filterHeaderHeight <- chainApi.getFilterHeaderCount()
filterHeight <- chainApi.getFilterCount()
} yield (filterHeaderHeight,
if (filterHeight == 0) 0 else filterHeight + 1)
}
newSyncing =
if (batchSizeFull) {
@ -136,8 +137,7 @@ case class DataMessageHandler(
if (batchSizeFull) {
logger.info(
s"Received maximum amount of filters in one batch. This means we are not synced, requesting more")
sendNextGetCompactFilterCommand(peerMsgSender,
filter.blockHash.flip)
sendNextGetCompactFilterCommand(peerMsgSender, newFilterHeight)
} else FutureUtil.unit
} yield {
this.copy(
@ -352,26 +352,17 @@ case class DataMessageHandler(
private def sendNextGetCompactFilterCommand(
peerMsgSender: PeerMessageSender,
stopHash: DoubleSha256DigestBE): Future[Boolean] =
startHeight: Int): Future[Boolean] =
peerMsgSender.sendNextGetCompactFilterCommand(chainApi = chainApi,
filterBatchSize =
chainConfig.filterBatchSize,
stopHash = stopHash)
startHeight = startHeight)
private def sendFirstGetCompactFilterCommand(
peerMsgSender: PeerMessageSender): Future[Boolean] =
for {
filterCount <- chainApi.getFilterCount()
highestFilterOpt <-
chainApi
.getFiltersAtHeight(filterCount)
.map(_.headOption)
highestFilterBlockHash =
highestFilterOpt
.map(_.blockHashBE)
.getOrElse(DoubleSha256DigestBE.empty)
res <-
sendNextGetCompactFilterCommand(peerMsgSender, highestFilterBlockHash)
res <- sendNextGetCompactFilterCommand(peerMsgSender, filterCount)
} yield res
private def handleInventoryMsg(

View file

@ -194,11 +194,10 @@ case class PeerMessageSender(client: P2PClient)(implicit conf: NodeAppConfig)
private[node] def sendNextGetCompactFilterCommand(
chainApi: ChainApi,
filterBatchSize: Int,
stopHash: DoubleSha256DigestBE)(implicit
ec: ExecutionContext): Future[Boolean] = {
startHeight: Int)(implicit ec: ExecutionContext): Future[Boolean] = {
for {
filterSyncMarkerOpt <-
chainApi.nextFilterHeaderBatchRange(stopHash, filterBatchSize)
chainApi.nextFilterHeaderBatchRange(startHeight, filterBatchSize)
res <- filterSyncMarkerOpt match {
case Some(filterSyncMarker) =>
logger.info(s"Requesting compact filters from $filterSyncMarker")

View file

@ -76,7 +76,9 @@ bitcoin-s {
# to keep the sync time fast, however, for regtest it should be small
# so it does not exceed the chain size.
filter-batch-size = 100
# Set a small filter batch size in testkit so we test fetching
# multiple filter batches
filter-batch-size = 10
}
# this config key is read by Slick
db {

View file

@ -108,7 +108,7 @@ trait NodeUnitTest extends BitcoinSFixture with EmbeddedPg {
Future.successful(None)
override def nextFilterHeaderBatchRange(
stopHash: DoubleSha256DigestBE,
startHeight: Int,
batchSize: Int): Future[Option[FilterSyncMarker]] =
Future.successful(None)

View file

@ -153,7 +153,7 @@ private[wallet] trait UtxoHandling extends WalletLogger {
_ =
if (toUpdate.nonEmpty)
logger.info(s"${toUpdate.size} txos are now confirmed!")
else logger.info("No txos to be confirmed")
else logger.debug("No txos to be confirmed")
updated <- spendingInfoDAO.upsertAllSpendingInfoDb(toUpdate.flatten)
} yield updated
}