mirror of
https://github.com/bitcoin-s/bitcoin-s.git
synced 2024-11-19 01:40:55 +01:00
2021 01 27 conectionpool (#2578)
* Give connection pool example config * Get hikari logging working on postgres * fix 2.12.x * Bump metrics core to a supported version * Add hikari-logging and hikari-logging-interval configuration options to turn off/on hikari and schedule how often logs appear * Turn off hikari logging in the db-commons/reference.conf and add it to appserver/reference.conf * default the logging to off in scala code * Make sure connection pool is enabled in db-commons, add default database configuration to website * Address code review, fix things that didn't need to be changed
This commit is contained in:
parent
092b5b2cab
commit
756bfc4427
@ -1,3 +1,10 @@
|
||||
bitcoin-s {
|
||||
oracle {
|
||||
hikari-logging = true
|
||||
hikari-logging-interval = 1 minute
|
||||
}
|
||||
}
|
||||
|
||||
akka {
|
||||
|
||||
# Set these to the defaults instead of the
|
||||
@ -5,4 +12,4 @@ akka {
|
||||
http.server.request-timeout = 10s
|
||||
http.server.parsing.max-content-length = 8m
|
||||
http.client.parsing.max-content-length = 8m
|
||||
}
|
||||
}
|
@ -93,7 +93,7 @@
|
||||
|
||||
<!-- Disable slick logging in server -->
|
||||
<logger name="slick" level="OFF"/>
|
||||
<logger name="com.zaxxer" level="OFF"/>
|
||||
<logger name="com.zaxxer" level="INFO"/>
|
||||
|
||||
<!-- Get rid of messages like this:
|
||||
Connection attempt failed. Backing off new connection
|
||||
|
@ -1,8 +1,21 @@
|
||||
bitcoin-s {
|
||||
network = mainnet
|
||||
|
||||
chain {
|
||||
hikari-logging = true
|
||||
hikari-logging-interval = 1 minute
|
||||
}
|
||||
|
||||
wallet {
|
||||
hikari-logging = true
|
||||
hikari-logging-interval = 1 minute
|
||||
}
|
||||
|
||||
node {
|
||||
mode = neutrino # neutrino, spv, bitcoind
|
||||
peers = ["neutrino.suredbits.com:8333"]
|
||||
|
||||
hikari-logging = true
|
||||
hikari-logging-interval = 1 minute
|
||||
}
|
||||
}
|
||||
|
@ -92,11 +92,23 @@ case class ChainAppConfig(
|
||||
}
|
||||
}
|
||||
} yield {
|
||||
if (isHikariLoggingEnabled) {
|
||||
//.get is safe because hikari logging is enabled
|
||||
startHikariLogger(hikariLoggingInterval.get)
|
||||
()
|
||||
}
|
||||
|
||||
logger.info(s"Applied ${numMigrations} to chain project")
|
||||
()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
override def stop(): Future[Unit] = {
|
||||
val _ = stopHikariLogger()
|
||||
FutureUtil.unit
|
||||
}
|
||||
|
||||
lazy val filterHeaderBatchSize: Int = {
|
||||
// try by network, if that fails, try general
|
||||
try {
|
||||
|
@ -97,7 +97,8 @@ class AppConfigTest extends BitcoinSAsyncTest {
|
||||
//to freshly load all system properties
|
||||
ConfigFactory.invalidateCaches()
|
||||
|
||||
val walletAppConfig = WalletAppConfig(datadir)
|
||||
val walletAppConfig =
|
||||
WalletAppConfig(datadir)
|
||||
val assertF = for {
|
||||
_ <- walletAppConfig.start()
|
||||
} yield {
|
||||
|
@ -20,8 +20,11 @@ bitcoin-s {
|
||||
# see: https://github.com/bitcoin-s/bitcoin-s/pull/1840
|
||||
numThreads = 1
|
||||
queueSize=5000
|
||||
connectionPool = disabled
|
||||
connectionPool = "HikariCP"
|
||||
registerMbeans = true
|
||||
}
|
||||
hikari-logging = false
|
||||
hikari-logging-interval = 1 minute
|
||||
}
|
||||
|
||||
node = ${bitcoin-s.dbDefault}
|
||||
|
@ -107,7 +107,7 @@ abstract class AppConfig extends StartStopAsync[Unit] with BitcoinSLogger {
|
||||
/**
|
||||
* Name of the module. `chain`, `wallet`, `node` etc.
|
||||
*/
|
||||
protected[bitcoins] def moduleName: String
|
||||
private[bitcoins] def moduleName: String
|
||||
|
||||
/** Chain parameters for the blockchain we're on */
|
||||
lazy val chain: BitcoinChainParams = {
|
||||
|
@ -7,7 +7,9 @@ import slick.basic.DatabaseConfig
|
||||
import slick.jdbc.JdbcProfile
|
||||
|
||||
import java.nio.file.{Path, Paths}
|
||||
import java.util.concurrent.TimeUnit
|
||||
import scala.concurrent.Future
|
||||
import scala.concurrent.duration.{Duration, FiniteDuration}
|
||||
import scala.util.{Failure, Success, Try}
|
||||
|
||||
abstract class DbAppConfig extends AppConfig {
|
||||
@ -120,4 +122,35 @@ abstract class DbAppConfig extends AppConfig {
|
||||
throw exception
|
||||
}
|
||||
}
|
||||
|
||||
lazy val isHikariLoggingEnabled: Boolean = {
|
||||
val hikariLoggingOpt =
|
||||
config.getBooleanOpt(s"bitcoin-s.$moduleName.hikari-logging")
|
||||
hikariLoggingOpt match {
|
||||
case Some(bool) => bool
|
||||
case None =>
|
||||
//default hikari logging off
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/** Gets how often we should log hikari connection pool stats
|
||||
* if None, this means [[isHikariLoggingEnabled]] is not enabled
|
||||
*/
|
||||
lazy val hikariLoggingInterval: Option[Duration] = {
|
||||
if (isHikariLoggingEnabled) {
|
||||
val intervalOpt =
|
||||
config.getDurationOpt(s"bitcoin-s.$moduleName.hikari-logging-interval")
|
||||
val interval = intervalOpt match {
|
||||
case Some(interval) => interval
|
||||
case None =>
|
||||
//default to 1 minute if nothing is set
|
||||
new FiniteDuration(1, TimeUnit.MINUTES)
|
||||
}
|
||||
Some(interval)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
239
db-commons/src/main/scala/org/bitcoins/db/HikariLogging.scala
Normal file
239
db-commons/src/main/scala/org/bitcoins/db/HikariLogging.scala
Normal file
@ -0,0 +1,239 @@
|
||||
package org.bitcoins.db
|
||||
|
||||
import com.codahale.metrics.{Histogram, MetricRegistry}
|
||||
import com.zaxxer.hikari.{HikariDataSource, HikariPoolMXBean}
|
||||
import org.bitcoins.core.util.{BitcoinSLogger, StartStop}
|
||||
import slick.jdbc.JdbcDataSource
|
||||
import slick.jdbc.hikaricp.HikariCPJdbcDataSource
|
||||
import slick.util.AsyncExecutorMXBean
|
||||
|
||||
import java.lang.management.ManagementFactory
|
||||
import java.util.concurrent.{Executors, ScheduledFuture, TimeUnit}
|
||||
import javax.management.{JMX, ObjectName}
|
||||
import scala.concurrent.duration._
|
||||
|
||||
case class HikariLogging(
|
||||
hikariDataSource: HikariDataSource,
|
||||
moduleName: String,
|
||||
interval: Duration
|
||||
) extends BitcoinSLogger
|
||||
with StartStop[HikariLogging] {
|
||||
|
||||
/** Logs thread activity */
|
||||
private case class HikariActivityUpdate(
|
||||
active: Int,
|
||||
idle: Int,
|
||||
waiting: Int,
|
||||
total: Int,
|
||||
maxThreads: Int,
|
||||
activeThreads: Int,
|
||||
maxQueueSize: Int,
|
||||
queueSize: Int
|
||||
) {
|
||||
|
||||
override def toString: String = {
|
||||
s"""
|
||||
| "${moduleName}-activity-update" : {
|
||||
| "active" : ${active},
|
||||
| "idle" : ${idle},
|
||||
| "waiting" : ${waiting},
|
||||
| "total" : ${total},
|
||||
| "maxThreads" : ${maxThreads},
|
||||
| "activeThreads" : ${activeThreads},
|
||||
| "maxQueueSize" : ${maxQueueSize},
|
||||
| "queueSize" : ${queueSize}
|
||||
|}
|
||||
|""".stripMargin.replaceAll("\\s", "")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* From the docs:
|
||||
* How long each connection is used before being returned to the pool. This is the "out of pool" or "in-use" time.
|
||||
* @see https://github.com/brettwooldridge/HikariCP/wiki/Dropwizard-Metrics
|
||||
*/
|
||||
private case class HikariPoolUsageUpdate(
|
||||
`75thPercentile`: Double,
|
||||
`95thPercentile`: Double,
|
||||
`98thPercentile`: Double,
|
||||
`99thPercentile`: Double,
|
||||
`999thPercentile`: Double,
|
||||
max: Double,
|
||||
min: Double,
|
||||
median: Double,
|
||||
mean: Double
|
||||
) {
|
||||
|
||||
override def toString: String = {
|
||||
s"""
|
||||
|"${moduleName}-pool-usage" : {
|
||||
| "max" : ${max},
|
||||
| "min" : ${min},
|
||||
| "median" : ${median},
|
||||
| "mean" : ${mean},
|
||||
| "75thPercentile" : ${`75thPercentile`},
|
||||
| "95thPercentile" : ${`95thPercentile`},
|
||||
| "98thPercentile" : ${`98thPercentile`},
|
||||
| "99thPercentile" : ${`99thPercentile`},
|
||||
| "999thPercentile" : ${`999thPercentile`}
|
||||
|}
|
||||
|""".stripMargin.replaceAll("\\s", "")
|
||||
}
|
||||
}
|
||||
|
||||
//this is needed to get the 'AsyncExecutor' bean below to register properly
|
||||
//dbConfig.database.ioExecutionContext
|
||||
|
||||
private lazy val poolName = hikariDataSource.getPoolName
|
||||
private lazy val mBeanServer = ManagementFactory.getPlatformMBeanServer
|
||||
|
||||
lazy val aeBeanName = new ObjectName(
|
||||
s"slick:type=AsyncExecutor,name=$poolName")
|
||||
|
||||
lazy val poolBeanName = new ObjectName(
|
||||
s"com.zaxxer.hikari:type=Pool ($poolName)")
|
||||
|
||||
lazy val poolConfigBeanName = new ObjectName(
|
||||
s"com.zaxxer.hikari:type=PoolConfig ($poolName)"
|
||||
)
|
||||
|
||||
/**
|
||||
* MBean uses random string incantations for
|
||||
* accessing attributes :-(
|
||||
*
|
||||
* @see [[https://github.com/brettwooldridge/HikariCP/wiki/MBean-(JMX)-Monitoring-and-Management#programmatic-access HikariCP docs]]
|
||||
*/
|
||||
private lazy val objectName = new ObjectName(
|
||||
s"com.zaxxer.hikari:type=Pool ($poolName)"
|
||||
)
|
||||
|
||||
/**
|
||||
* @see https://github.com/brettwooldridge/HikariCP/wiki/MBean-(JMX)-Monitoring-and-Management
|
||||
*/
|
||||
private lazy val hikariMxBean =
|
||||
JMX.newMXBeanProxy(mBeanServer, objectName, classOf[HikariPoolMXBean])
|
||||
|
||||
/**
|
||||
* @see http://slick.lightbend.com/doc/3.3.0/config.html#monitoring
|
||||
*/
|
||||
private lazy val slickMxBean =
|
||||
JMX.newMXBeanProxy(mBeanServer, aeBeanName, classOf[AsyncExecutorMXBean])
|
||||
|
||||
// https://github.com/brettwooldridge/HikariCP/wiki/Dropwizard-Metrics#pool-namepoolusage
|
||||
private lazy val poolUsageMetricName = s"$poolName.pool.Usage"
|
||||
|
||||
private lazy val metricRegistry: MetricRegistry = Option(
|
||||
hikariDataSource.getMetricRegistry
|
||||
) match {
|
||||
case Some(registry: MetricRegistry) =>
|
||||
registry
|
||||
case Some(other: AnyRef) =>
|
||||
val msg = s"Could not load metric registry, got $other"
|
||||
logger.error(msg)
|
||||
throw new RuntimeException(msg)
|
||||
case None =>
|
||||
val msg = "Could not load metric registry, got null!"
|
||||
logger.error(msg)
|
||||
throw new RuntimeException(msg)
|
||||
}
|
||||
|
||||
private val logHikariStats: Runnable = () => {
|
||||
|
||||
val usageHistogram: Histogram =
|
||||
metricRegistry.getHistograms().get(poolUsageMetricName)
|
||||
val usageSnapshot = usageHistogram.getSnapshot()
|
||||
|
||||
val poolUsageUpdate = HikariPoolUsageUpdate(
|
||||
`75thPercentile` = usageSnapshot.get75thPercentile(),
|
||||
`95thPercentile` = usageSnapshot.get95thPercentile(),
|
||||
`98thPercentile` = usageSnapshot.get98thPercentile(),
|
||||
`99thPercentile` = usageSnapshot.get99thPercentile(),
|
||||
`999thPercentile` = usageSnapshot.get999thPercentile(),
|
||||
max = usageSnapshot.getMax().toDouble,
|
||||
min = usageSnapshot.getMin().toDouble,
|
||||
median = usageSnapshot.getMedian(),
|
||||
mean = usageSnapshot.getMean()
|
||||
)
|
||||
|
||||
val activityUpdate = HikariActivityUpdate(
|
||||
active = hikariMxBean.getActiveConnections,
|
||||
idle = hikariMxBean.getIdleConnections,
|
||||
waiting = hikariMxBean.getThreadsAwaitingConnection,
|
||||
total = hikariMxBean.getTotalConnections,
|
||||
maxThreads = slickMxBean.getMaxThreads,
|
||||
activeThreads = slickMxBean.getActiveThreads,
|
||||
maxQueueSize = slickMxBean.getMaxQueueSize,
|
||||
queueSize = slickMxBean.getQueueSize
|
||||
)
|
||||
|
||||
logger.info(poolUsageUpdate)
|
||||
logger.info(activityUpdate)
|
||||
}
|
||||
|
||||
private[this] var started: Boolean = false
|
||||
private[this] var cancelOpt: Option[ScheduledFuture[_]] = None
|
||||
|
||||
override def start(): HikariLogging = {
|
||||
if (!started) {
|
||||
val metricRegistry = new MetricRegistry
|
||||
|
||||
mBeanServer.getMBeanInfo(aeBeanName)
|
||||
mBeanServer.getMBeanInfo(poolBeanName)
|
||||
mBeanServer.getMBeanInfo(poolConfigBeanName)
|
||||
|
||||
hikariDataSource.setMetricRegistry(metricRegistry)
|
||||
val future = HikariLogging.scheduler.scheduleAtFixedRate(
|
||||
logHikariStats,
|
||||
interval.toMillis,
|
||||
interval.toMillis,
|
||||
TimeUnit.MILLISECONDS)
|
||||
cancelOpt = Some(future)
|
||||
started = true
|
||||
this
|
||||
} else {
|
||||
this
|
||||
}
|
||||
}
|
||||
|
||||
override def stop(): HikariLogging = {
|
||||
cancelOpt match {
|
||||
case Some(cancel) =>
|
||||
if (!cancel.isCancelled) {
|
||||
val _: Boolean = cancel.cancel(true)
|
||||
this
|
||||
} else {
|
||||
cancelOpt = None
|
||||
this
|
||||
}
|
||||
case None =>
|
||||
this
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
object HikariLogging extends BitcoinSLogger {
|
||||
private[db] val scheduler = Executors.newScheduledThreadPool(1)
|
||||
|
||||
/** Returns a started hikari logger if configuration is correct, else None
|
||||
* @param jdbcProfileComponent the database component we are logging for
|
||||
* @param interval how often the hikari logs should be output
|
||||
*/
|
||||
def fromJdbcProfileComponent[T <: DbAppConfig](
|
||||
jdbcProfileComponent: JdbcProfileComponent[T],
|
||||
interval: Duration): Option[HikariLogging] = {
|
||||
val dataSource = jdbcProfileComponent.database.source
|
||||
val moduleName = jdbcProfileComponent.appConfig.moduleName
|
||||
dataSource match {
|
||||
case hikariSource: HikariCPJdbcDataSource =>
|
||||
val started = HikariLogging(hikariSource.ds, moduleName, interval)
|
||||
.start()
|
||||
Some(started)
|
||||
case _: JdbcDataSource =>
|
||||
val err = {
|
||||
s"JdbcProfile Component is not a Hikari source=${jdbcProfileComponent.dbConfig.profile}"
|
||||
}
|
||||
logger.error(err)
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
@ -4,6 +4,8 @@ import org.bitcoins.core.util.BitcoinSLogger
|
||||
import slick.basic.DatabaseConfig
|
||||
import slick.jdbc.JdbcProfile
|
||||
|
||||
import scala.concurrent.duration.Duration
|
||||
|
||||
trait JdbcProfileComponent[+ConfigType <: DbAppConfig] extends BitcoinSLogger {
|
||||
|
||||
def appConfig: ConfigType
|
||||
@ -29,4 +31,33 @@ trait JdbcProfileComponent[+ConfigType <: DbAppConfig] extends BitcoinSLogger {
|
||||
lazy val database: Database = {
|
||||
dbConfig.db
|
||||
}
|
||||
|
||||
private[this] var hikariLoggerOpt: Option[HikariLogging] = None
|
||||
|
||||
/** Starts the background logger for hikari
|
||||
* @param interval - how often hikari logs database connection pool information
|
||||
*/
|
||||
protected def startHikariLogger(interval: Duration): HikariLogging = {
|
||||
hikariLoggerOpt match {
|
||||
case Some(hikarkiLogger) => hikarkiLogger
|
||||
case None =>
|
||||
//this is needed to get the 'AsyncExecutor' bean below to register properly
|
||||
//dbConfig.database.ioExecutionContext
|
||||
val _ = database.ioExecutionContext
|
||||
//start a new one
|
||||
HikariLogging.fromJdbcProfileComponent(this, interval) match {
|
||||
case Some(hikariLogger) =>
|
||||
hikariLoggerOpt = Some(hikariLogger)
|
||||
hikariLogger
|
||||
case None =>
|
||||
sys.error(s"Could not start hikari logging")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
protected def stopHikariLogger(): Unit = {
|
||||
hikariLoggerOpt.foreach(_.stop())
|
||||
()
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,9 @@ package org.bitcoins
|
||||
|
||||
import com.typesafe.config.{Config, ConfigRenderOptions}
|
||||
|
||||
import java.util.concurrent.TimeUnit
|
||||
import scala.concurrent.duration.{Duration, FiniteDuration}
|
||||
|
||||
package object db {
|
||||
|
||||
implicit class ConfigOps(private val config: Config) extends AnyVal {
|
||||
@ -51,5 +54,19 @@ package object db {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
def getBooleanOpt(key: String): Option[Boolean] = {
|
||||
if (config.hasPath(key)) Some(config.getBoolean(key))
|
||||
else None
|
||||
}
|
||||
|
||||
def getDurationOpt(key: String): Option[Duration] = {
|
||||
if (config.hasPath(key)) {
|
||||
val javaDuration = config.getDuration(key)
|
||||
val scalaDuration =
|
||||
new FiniteDuration(javaDuration.toNanos, TimeUnit.NANOSECONDS)
|
||||
Some(scalaDuration)
|
||||
} else None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -62,29 +62,41 @@ case class DLCOracleAppConfig(
|
||||
logger.info(s"Applied $numMigrations to the dlc oracle project")
|
||||
|
||||
val migrations = migrationsApplied()
|
||||
if (migrations == 2 || migrations == 3) { // For V2/V3 migrations
|
||||
logger.debug(s"Doing V2/V3 Migration")
|
||||
val migrationWorkAroundF =
|
||||
if (migrations == 2 || migrations == 3) { // For V2/V3 migrations
|
||||
logger.debug(s"Doing V2/V3 Migration")
|
||||
|
||||
val dummyMigrationTLV = EnumEventDescriptorV0TLV.dummy
|
||||
val dummyMigrationTLV = EnumEventDescriptorV0TLV.dummy
|
||||
|
||||
val eventDAO = EventDAO()(ec, appConfig)
|
||||
for {
|
||||
// get all old events
|
||||
allEvents <- eventDAO.findByEventDescriptor(dummyMigrationTLV)
|
||||
allOutcomes <- EventOutcomeDAO()(ec, appConfig).findAll()
|
||||
val eventDAO = EventDAO()(ec, appConfig)
|
||||
for {
|
||||
// get all old events
|
||||
allEvents <- eventDAO.findByEventDescriptor(dummyMigrationTLV)
|
||||
allOutcomes <- EventOutcomeDAO()(ec, appConfig).findAll()
|
||||
|
||||
outcomesByNonce = allOutcomes.groupBy(_.nonce)
|
||||
// Update them to have the correct event descriptor
|
||||
updated = allEvents.map { eventDb =>
|
||||
val outcomeDbs = outcomesByNonce(eventDb.nonce)
|
||||
val descriptor =
|
||||
EventOutcomeDbHelper.createEnumEventDescriptor(outcomeDbs)
|
||||
eventDb.copy(eventDescriptorTLV = descriptor)
|
||||
}
|
||||
outcomesByNonce = allOutcomes.groupBy(_.nonce)
|
||||
// Update them to have the correct event descriptor
|
||||
updated = allEvents.map { eventDb =>
|
||||
val outcomeDbs = outcomesByNonce(eventDb.nonce)
|
||||
val descriptor =
|
||||
EventOutcomeDbHelper.createEnumEventDescriptor(outcomeDbs)
|
||||
eventDb.copy(eventDescriptorTLV = descriptor)
|
||||
}
|
||||
|
||||
_ <- eventDAO.upsertAll(updated)
|
||||
} yield ()
|
||||
} else FutureUtil.unit
|
||||
_ <- eventDAO.upsertAll(updated)
|
||||
|
||||
} yield ()
|
||||
} else FutureUtil.unit
|
||||
|
||||
migrationWorkAroundF.map { _ =>
|
||||
if (isHikariLoggingEnabled) {
|
||||
//.get is safe because hikari logging is enabled
|
||||
startHikariLogger(hikariLoggingInterval.get)
|
||||
()
|
||||
} else {
|
||||
()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,31 @@ to ensure the entire module is initialized correctly.
|
||||
bitcoin-s {
|
||||
datadir = ${HOME}/.bitcoin-s
|
||||
network = regtest # regtest, testnet3, mainnet, signet
|
||||
|
||||
dbDefault = {
|
||||
dataSourceClass = slick.jdbc.DatabaseUrlDataSource
|
||||
profile = "slick.jdbc.SQLiteProfile$"
|
||||
|
||||
db {
|
||||
# for information on parameters available here see
|
||||
# https://scala-slick.org/doc/3.3.1/api/index.html#slick.jdbc.JdbcBackend$DatabaseFactoryDef@forConfig(String,Config,Driver,ClassLoader):Database
|
||||
path = ${bitcoin-s.datadir}/${bitcoin-s.network}/
|
||||
driver = org.sqlite.JDBC
|
||||
user = ""
|
||||
password = ""
|
||||
host = localhost
|
||||
port = 5432
|
||||
|
||||
# this needs to be set to 1 for SQLITE as it does not support concurrent database operations
|
||||
# see: https://github.com/bitcoin-s/bitcoin-s/pull/1840
|
||||
numThreads = 1
|
||||
queueSize=5000
|
||||
connectionPool = "HikariCP"
|
||||
registerMbeans = true
|
||||
}
|
||||
hikari-logging = false
|
||||
hikari-logging-interval = 1 minute
|
||||
}
|
||||
|
||||
bitcoind-rpc {
|
||||
# bitcoind rpc username
|
||||
rpcuser = user
|
||||
@ -128,6 +152,9 @@ bitcoin-s {
|
||||
# (e.g. "neutrino.testnet3.suredbits.com:18333")
|
||||
# Port number is optional, the default value is 8333 for mainnet,
|
||||
# 18333 for testnet and 18444 for regtest.
|
||||
|
||||
hikari-logging = true
|
||||
hikari-logging-interval = 1 minute
|
||||
}
|
||||
|
||||
chain {
|
||||
@ -143,6 +170,9 @@ bitcoin-s {
|
||||
|
||||
filter-batch-size = 100
|
||||
}
|
||||
|
||||
hikari-logging = true
|
||||
hikari-logging-interval = 1 minute
|
||||
}
|
||||
|
||||
# settings for wallet module
|
||||
@ -171,6 +201,9 @@ bitcoin-s {
|
||||
# How long we attempt to generate an address for
|
||||
# before we timeout
|
||||
addressQueueTimeout = 5 seconds
|
||||
|
||||
hikari-logging = true
|
||||
hikari-logging-interval = 1 minute
|
||||
}
|
||||
|
||||
keymanager {
|
||||
@ -280,20 +313,28 @@ bitcoin-s {
|
||||
user = "user"
|
||||
password = "topsecret"
|
||||
numThreads = 5
|
||||
|
||||
# http://scala-slick.org/doc/3.3.3/database.html
|
||||
connectionPool = "HikariCP"
|
||||
registerMbeans = true
|
||||
}
|
||||
}
|
||||
|
||||
chain.profile = ${bitcoin-s.common.profile}
|
||||
chain.db = ${bitcoin-s.common.db}
|
||||
|
||||
chain.db.poolName = "chain-connection-pool"
|
||||
|
||||
node.profile = ${bitcoin-s.common.profile}
|
||||
node.db = ${bitcoin-s.common.db}
|
||||
node.db.poolName = "node-connection-pool"
|
||||
|
||||
wallet.profile = ${bitcoin-s.common.profile}
|
||||
wallet.db = ${bitcoin-s.common.db}
|
||||
wallet.db.poolName = "wallet-connection-pool"
|
||||
|
||||
oracle.profile = ${bitcoin-s.common.profile}
|
||||
oracle.db = ${bitcoin-s.common.db}
|
||||
oracle.db.poolName = "oracle-connection-pool"
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -4,7 +4,7 @@ import akka.Done
|
||||
import akka.actor.ActorSystem
|
||||
import com.typesafe.config.Config
|
||||
import org.bitcoins.chain.config.ChainAppConfig
|
||||
import org.bitcoins.core.util.Mutable
|
||||
import org.bitcoins.core.util.{FutureUtil, Mutable}
|
||||
import org.bitcoins.db.{AppConfigFactory, DbAppConfig, JdbcProfileComponent}
|
||||
import org.bitcoins.node._
|
||||
import org.bitcoins.node.db.NodeDbManagement
|
||||
@ -53,11 +53,22 @@ case class NodeAppConfig(
|
||||
} yield {
|
||||
logger.debug(s"Initializing node setup")
|
||||
val numMigrations = migrate()
|
||||
|
||||
val _ = if (isHikariLoggingEnabled) {
|
||||
//.get is safe because hikari logging is enabled
|
||||
startHikariLogger(hikariLoggingInterval.get)
|
||||
()
|
||||
} else {
|
||||
()
|
||||
}
|
||||
logger.info(s"Applied $numMigrations migrations fro the node project")
|
||||
}
|
||||
}
|
||||
|
||||
override def stop(): Future[Unit] = {
|
||||
val _ = stopHikariLogger()
|
||||
FutureUtil.unit
|
||||
}
|
||||
|
||||
lazy val nodeType: NodeType =
|
||||
NodeType.fromString(config.getString("bitcoin-s.node.mode"))
|
||||
|
||||
|
@ -4,6 +4,7 @@ object Deps {
|
||||
|
||||
object V {
|
||||
val bouncyCastle = "1.68"
|
||||
val dropwizardMetricsV = "4.1.0" //https://github.com/dropwizard/metrics
|
||||
val logback = "1.2.3"
|
||||
val grizzledSlf4j = "1.3.4"
|
||||
val scalacheck = "1.15.2"
|
||||
@ -179,6 +180,9 @@ object Deps {
|
||||
|
||||
val pgEmbedded =
|
||||
"com.opentable.components" % "otj-pg-embedded" % V.pgEmbeddedV withSources () withJavadoc ()
|
||||
|
||||
val dropwizardMetrics =
|
||||
"io.dropwizard.metrics" % "metrics-core" % V.dropwizardMetricsV withSources () withJavadoc ()
|
||||
}
|
||||
|
||||
object Test {
|
||||
@ -293,6 +297,7 @@ object Deps {
|
||||
)
|
||||
|
||||
val dbCommons = List(
|
||||
Compile.dropwizardMetrics,
|
||||
Compile.flyway,
|
||||
Compile.slick,
|
||||
Compile.sourcecode,
|
||||
|
@ -1,5 +1,4 @@
|
||||
bitcoin-s {
|
||||
datadir = ${HOME}/.bitcoin-s
|
||||
network = regtest # regtest, testnet3, mainnet, signet
|
||||
|
||||
dbDefault = {
|
||||
@ -22,6 +21,10 @@ bitcoin-s {
|
||||
queueSize=5000
|
||||
connectionPool = disabled
|
||||
}
|
||||
|
||||
#turn hikari logging off for tests
|
||||
hikari-logging = false
|
||||
hikari-logging-interval = 0 seconds
|
||||
}
|
||||
|
||||
oracle = ${bitcoin-s.dbDefault}
|
||||
|
@ -5,7 +5,7 @@ import org.bitcoins.core.api.chain.ChainQueryApi
|
||||
import org.bitcoins.core.api.feeprovider.FeeRateApi
|
||||
import org.bitcoins.core.api.node.NodeApi
|
||||
import org.bitcoins.core.hd._
|
||||
import org.bitcoins.core.util.Mutable
|
||||
import org.bitcoins.core.util.{FutureUtil, Mutable}
|
||||
import org.bitcoins.core.wallet.keymanagement.{
|
||||
KeyManagerInitializeError,
|
||||
KeyManagerParams
|
||||
@ -154,10 +154,21 @@ case class WalletAppConfig(
|
||||
migrate()
|
||||
}
|
||||
|
||||
if (isHikariLoggingEnabled) {
|
||||
//.get is safe because hikari logging is enabled
|
||||
startHikariLogger(hikariLoggingInterval.get)
|
||||
}
|
||||
logger.info(s"Applied $numMigrations to the wallet project")
|
||||
}
|
||||
}
|
||||
|
||||
override def stop(): Future[Unit] = {
|
||||
if (isHikariLoggingEnabled) {
|
||||
val _ = stopHikariLogger()
|
||||
}
|
||||
FutureUtil.unit
|
||||
}
|
||||
|
||||
/** The path to our encrypted mnemonic seed */
|
||||
private[bitcoins] lazy val seedPath: Path = kmConf.seedPath
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user