diff --git a/backend/mempool-config.sample.json b/backend/mempool-config.sample.json index 80c2b4e28..76b27d630 100644 --- a/backend/mempool-config.sample.json +++ b/backend/mempool-config.sample.json @@ -52,6 +52,7 @@ "ESPLORA": { "REST_API_URL": "http://127.0.0.1:3000", "UNIX_SOCKET_PATH": "/tmp/esplora-bitcoin-mainnet", + "BATCH_QUERY_BASE_SIZE": 1000, "RETRY_UNIX_SOCKET_AFTER": 30000, "REQUEST_TIMEOUT": 10000, "FALLBACK_TIMEOUT": 5000, @@ -132,6 +133,11 @@ "BISQ_URL": "https://bisq.markets/api", "BISQ_ONION": "http://bisqmktse2cabavbr2xjq7xw3h6g5ottemo5rolfcwt6aly6tp5fdryd.onion/api" }, + "REDIS": { + "ENABLED": false, + "UNIX_SOCKET_PATH": "/tmp/redis.sock", + "BATCH_QUERY_BASE_SIZE": 5000 + }, "REPLICATION": { "ENABLED": false, "AUDIT": false, diff --git a/backend/src/__fixtures__/mempool-config.template.json b/backend/src/__fixtures__/mempool-config.template.json index 7d5a14de6..0c30651ce 100644 --- a/backend/src/__fixtures__/mempool-config.template.json +++ b/backend/src/__fixtures__/mempool-config.template.json @@ -53,6 +53,7 @@ "ESPLORA": { "REST_API_URL": "__ESPLORA_REST_API_URL__", "UNIX_SOCKET_PATH": "__ESPLORA_UNIX_SOCKET_PATH__", + "BATCH_QUERY_BASE_SIZE": 1000, "RETRY_UNIX_SOCKET_AFTER": 888, "REQUEST_TIMEOUT": 10000, "FALLBACK_TIMEOUT": 5000, @@ -140,6 +141,7 @@ }, "REDIS": { "ENABLED": false, - "UNIX_SOCKET_PATH": "/tmp/redis.sock" + "UNIX_SOCKET_PATH": "/tmp/redis.sock", + "BATCH_QUERY_BASE_SIZE": 5000 } } diff --git a/backend/src/__tests__/config.test.ts b/backend/src/__tests__/config.test.ts index a565c64af..2991162e9 100644 --- a/backend/src/__tests__/config.test.ts +++ b/backend/src/__tests__/config.test.ts @@ -55,6 +55,7 @@ describe('Mempool Backend Config', () => { expect(config.ESPLORA).toStrictEqual({ REST_API_URL: 'http://127.0.0.1:3000', UNIX_SOCKET_PATH: null, + BATCH_QUERY_BASE_SIZE: 1000, RETRY_UNIX_SOCKET_AFTER: 30000, REQUEST_TIMEOUT: 10000, FALLBACK_TIMEOUT: 5000, @@ -144,7 +145,8 @@ describe('Mempool Backend Config', () => { expect(config.REDIS).toStrictEqual({ ENABLED: false, - UNIX_SOCKET_PATH: '' + UNIX_SOCKET_PATH: '', + BATCH_QUERY_BASE_SIZE: 5000, }); }); }); diff --git a/backend/src/api/bitcoin/bitcoin-api-abstract-factory.ts b/backend/src/api/bitcoin/bitcoin-api-abstract-factory.ts index f008e5ed8..5bd961e23 100644 --- a/backend/src/api/bitcoin/bitcoin-api-abstract-factory.ts +++ b/backend/src/api/bitcoin/bitcoin-api-abstract-factory.ts @@ -5,7 +5,7 @@ export interface AbstractBitcoinApi { $getRawTransaction(txId: string, skipConversion?: boolean, addPrevout?: boolean, lazyPrevouts?: boolean): Promise; $getRawTransactions(txids: string[]): Promise; $getMempoolTransactions(txids: string[]): Promise; - $getAllMempoolTransactions(lastTxid: string); + $getAllMempoolTransactions(lastTxid?: string, max_txs?: number); $getTransactionHex(txId: string): Promise; $getBlockHeightTip(): Promise; $getBlockHashTip(): Promise; diff --git a/backend/src/api/bitcoin/bitcoin-api.ts b/backend/src/api/bitcoin/bitcoin-api.ts index 1722334df..f54c836f8 100644 --- a/backend/src/api/bitcoin/bitcoin-api.ts +++ b/backend/src/api/bitcoin/bitcoin-api.ts @@ -77,7 +77,7 @@ class BitcoinApi implements AbstractBitcoinApi { throw new Error('Method getMempoolTransactions not supported by the Bitcoin RPC API.'); } - $getAllMempoolTransactions(lastTxid: string): Promise { + $getAllMempoolTransactions(lastTxid?: string, max_txs?: number): Promise { throw new Error('Method getAllMempoolTransactions not supported by the Bitcoin RPC API.'); } diff --git a/backend/src/api/bitcoin/esplora-api.ts b/backend/src/api/bitcoin/esplora-api.ts index 19800cd21..c0b548b9a 100644 --- a/backend/src/api/bitcoin/esplora-api.ts +++ b/backend/src/api/bitcoin/esplora-api.ts @@ -8,8 +8,9 @@ import logger from '../../logger'; interface FailoverHost { host: string, rtts: number[], - rtt: number + rtt: number, failures: number, + latestHeight?: number, socket?: boolean, outOfSync?: boolean, unreachable?: boolean, @@ -92,6 +93,7 @@ class FailoverRouter { host.rtts.unshift(rtt); host.rtts.slice(0, 5); host.rtt = host.rtts.reduce((acc, l) => acc + l, 0) / host.rtts.length; + host.latestHeight = height; if (height == null || isNaN(height) || (maxHeight - height > 2)) { host.outOfSync = true; } else { @@ -99,22 +101,23 @@ class FailoverRouter { } host.unreachable = false; } else { + host.outOfSync = true; host.unreachable = true; } } this.sortHosts(); - logger.debug(`Tomahawk ranking: ${this.hosts.map(host => '\navg rtt ' + Math.round(host.rtt).toString().padStart(5, ' ') + ' | reachable? ' + (!host.unreachable || false).toString().padStart(5, ' ') + ' | in sync? ' + (!host.outOfSync || false).toString().padStart(5, ' ') + ` | ${host.host}`).join('')}`); + logger.debug(`Tomahawk ranking:\n${this.hosts.map((host, index) => this.formatRanking(index, host, this.activeHost, maxHeight)).join('\n')}`); // switch if the current host is out of sync or significantly slower than the next best alternative if (this.activeHost.outOfSync || this.activeHost.unreachable || (this.activeHost !== this.hosts[0] && this.hosts[0].preferred) || (!this.activeHost.preferred && this.activeHost.rtt > (this.hosts[0].rtt * 2) + 50)) { if (this.activeHost.unreachable) { - logger.warn(`Unable to reach ${this.activeHost.host}, failing over to next best alternative`); + logger.warn(`🚨🚨🚨 Unable to reach ${this.activeHost.host}, failing over to next best alternative 🚨🚨🚨`); } else if (this.activeHost.outOfSync) { - logger.warn(`${this.activeHost.host} has fallen behind, failing over to next best alternative`); + logger.warn(`🚨🚨🚨 ${this.activeHost.host} has fallen behind, failing over to next best alternative 🚨🚨🚨`); } else { - logger.debug(`${this.activeHost.host} is no longer the best esplora host`); + logger.debug(`🛠️ ${this.activeHost.host} is no longer the best esplora host 🛠️`); } this.electHost(); } @@ -122,6 +125,11 @@ class FailoverRouter { this.pollTimer = setTimeout(() => { this.pollHosts(); }, this.pollInterval); } + private formatRanking(index: number, host: FailoverHost, active: FailoverHost, maxHeight: number): string { + const heightStatus = host.outOfSync ? '🚫' : (host.latestHeight && host.latestHeight < maxHeight ? '🟧' : '✅'); + return `${host === active ? '⭐️' : ' '} ${host.rtt < Infinity ? Math.round(host.rtt).toString().padStart(5, ' ') + 'ms' : ' - '} ${host.unreachable ? '🔥' : '✅'} | block: ${host.latestHeight || '??????'} ${heightStatus} | ${host.host} ${host === active ? '⭐️' : ' '}`; + } + // sort hosts by connection quality, and update default fallback private sortHosts(): void { // sort by connection quality @@ -156,7 +164,7 @@ class FailoverRouter { private addFailure(host: FailoverHost): FailoverHost { host.failures++; if (host.failures > 5 && this.multihost) { - logger.warn(`Too many esplora failures on ${this.activeHost.host}, falling back to next best alternative`); + logger.warn(`🚨🚨🚨 Too many esplora failures on ${this.activeHost.host}, falling back to next best alternative 🚨🚨🚨`); this.electHost(); return this.activeHost; } else { @@ -225,8 +233,8 @@ class ElectrsApi implements AbstractBitcoinApi { return this.failoverRouter.$post('/internal/mempool/txs', txids, 'json'); } - async $getAllMempoolTransactions(lastSeenTxid?: string): Promise { - return this.failoverRouter.$get('/internal/mempool/txs' + (lastSeenTxid ? '/' + lastSeenTxid : '')); + async $getAllMempoolTransactions(lastSeenTxid?: string, max_txs?: number): Promise { + return this.failoverRouter.$get('/internal/mempool/txs' + (lastSeenTxid ? '/' + lastSeenTxid : ''), 'json', max_txs ? { max_txs } : null); } $getTransactionHex(txId: string): Promise { diff --git a/backend/src/api/blocks.ts b/backend/src/api/blocks.ts index 64dc1d5ba..6b4a14a0e 100644 --- a/backend/src/api/blocks.ts +++ b/backend/src/api/blocks.ts @@ -761,8 +761,13 @@ class Blocks { this.updateTimerProgress(timer, `saved ${this.currentBlockHeight} to database`); if (!fastForwarded) { - const lastestPriceId = await PricesRepository.$getLatestPriceId(); - this.updateTimerProgress(timer, `got latest price id ${this.currentBlockHeight}`); + let lastestPriceId; + try { + lastestPriceId = await PricesRepository.$getLatestPriceId(); + this.updateTimerProgress(timer, `got latest price id ${this.currentBlockHeight}`); + } catch (e) { + logger.debug('failed to fetch latest price id from db: ' + (e instanceof Error ? e.message : e)); + } if (priceUpdater.historyInserted === true && lastestPriceId !== null) { await blocksRepository.$saveBlockPrices([{ height: blockExtended.height, @@ -771,9 +776,7 @@ class Blocks { this.updateTimerProgress(timer, `saved prices for ${this.currentBlockHeight}`); } else { logger.debug(`Cannot save block price for ${blockExtended.height} because the price updater hasnt completed yet. Trying again in 10 seconds.`, logger.tags.mining); - setTimeout(() => { - indexer.runSingleTask('blocksPrices'); - }, 10000); + indexer.scheduleSingleTask('blocksPrices', 10000); } // Save blocks summary for visualization if it's enabled diff --git a/backend/src/api/lightning/clightning/clightning-convert.ts b/backend/src/api/lightning/clightning/clightning-convert.ts index 55e4bd213..0db60f649 100644 --- a/backend/src/api/lightning/clightning/clightning-convert.ts +++ b/backend/src/api/lightning/clightning/clightning-convert.ts @@ -44,9 +44,13 @@ export enum FeatureBits { KeysendOptional = 55, ScriptEnforcedLeaseRequired = 2022, ScriptEnforcedLeaseOptional = 2023, + SimpleTaprootChannelsRequiredFinal = 80, + SimpleTaprootChannelsOptionalFinal = 81, + SimpleTaprootChannelsRequiredStaging = 180, + SimpleTaprootChannelsOptionalStaging = 181, MaxBolt11Feature = 5114, }; - + export const FeaturesMap = new Map([ [FeatureBits.DataLossProtectRequired, 'data-loss-protect'], [FeatureBits.DataLossProtectOptional, 'data-loss-protect'], @@ -85,6 +89,10 @@ export const FeaturesMap = new Map([ [FeatureBits.ZeroConfOptional, 'zero-conf'], [FeatureBits.ShutdownAnySegwitRequired, 'shutdown-any-segwit'], [FeatureBits.ShutdownAnySegwitOptional, 'shutdown-any-segwit'], + [FeatureBits.SimpleTaprootChannelsRequiredFinal, 'taproot-channels'], + [FeatureBits.SimpleTaprootChannelsOptionalFinal, 'taproot-channels'], + [FeatureBits.SimpleTaprootChannelsRequiredStaging, 'taproot-channels-staging'], + [FeatureBits.SimpleTaprootChannelsOptionalStaging, 'taproot-channels-staging'], ]); /** diff --git a/backend/src/api/mempool.ts b/backend/src/api/mempool.ts index f5e788f98..60bcd2f99 100644 --- a/backend/src/api/mempool.ts +++ b/backend/src/api/mempool.ts @@ -126,7 +126,7 @@ class Mempool { loadingIndicators.setProgress('mempool', count / expectedCount * 100); while (!done) { try { - const result = await bitcoinApi.$getAllMempoolTransactions(last_txid); + const result = await bitcoinApi.$getAllMempoolTransactions(last_txid, config.ESPLORA.BATCH_QUERY_BASE_SIZE); if (result) { for (const tx of result) { const extendedTransaction = transactionUtils.extendMempoolTransaction(tx); @@ -235,7 +235,7 @@ class Mempool { if (!loaded) { const remainingTxids = transactions.filter(txid => !this.mempoolCache[txid]); - const sliceLength = 10000; + const sliceLength = config.ESPLORA.BATCH_QUERY_BASE_SIZE; for (let i = 0; i < Math.ceil(remainingTxids.length / sliceLength); i++) { const slice = remainingTxids.slice(i * sliceLength, (i + 1) * sliceLength); const txs = await transactionUtils.$getMempoolTransactionsExtended(slice, false, false, false); diff --git a/backend/src/api/rbf-cache.ts b/backend/src/api/rbf-cache.ts index c573d3291..708393083 100644 --- a/backend/src/api/rbf-cache.ts +++ b/backend/src/api/rbf-cache.ts @@ -480,7 +480,7 @@ class RbfCache { }; if (config.MEMPOOL.BACKEND === 'esplora') { - const sliceLength = 250; + const sliceLength = Math.ceil(config.ESPLORA.BATCH_QUERY_BASE_SIZE / 40); for (let i = 0; i < Math.ceil(txids.length / sliceLength); i++) { const slice = txids.slice(i * sliceLength, (i + 1) * sliceLength); try { diff --git a/backend/src/api/redis-cache.ts b/backend/src/api/redis-cache.ts index 00b280274..82ce34ad1 100644 --- a/backend/src/api/redis-cache.ts +++ b/backend/src/api/redis-cache.ts @@ -122,8 +122,9 @@ class RedisCache { async $removeTransactions(transactions: string[]) { try { await this.$ensureConnected(); - for (let i = 0; i < Math.ceil(transactions.length / 10000); i++) { - const slice = transactions.slice(i * 10000, (i + 1) * 10000); + const sliceLength = config.REDIS.BATCH_QUERY_BASE_SIZE; + for (let i = 0; i < Math.ceil(transactions.length / sliceLength); i++) { + const slice = transactions.slice(i * sliceLength, (i + 1) * sliceLength); await this.client.unlink(slice.map(txid => `mempool:tx:${txid}`)); logger.debug(`Deleted ${slice.length} transactions from the Redis cache`); } diff --git a/backend/src/config.ts b/backend/src/config.ts index 37d5a2de9..4115149e6 100644 --- a/backend/src/config.ts +++ b/backend/src/config.ts @@ -43,6 +43,7 @@ interface IConfig { ESPLORA: { REST_API_URL: string; UNIX_SOCKET_PATH: string | void | null; + BATCH_QUERY_BASE_SIZE: number; RETRY_UNIX_SOCKET_AFTER: number; REQUEST_TIMEOUT: number; FALLBACK_TIMEOUT: number; @@ -151,6 +152,7 @@ interface IConfig { REDIS: { ENABLED: boolean; UNIX_SOCKET_PATH: string; + BATCH_QUERY_BASE_SIZE: number; }, } @@ -195,6 +197,7 @@ const defaults: IConfig = { 'ESPLORA': { 'REST_API_URL': 'http://127.0.0.1:3000', 'UNIX_SOCKET_PATH': null, + 'BATCH_QUERY_BASE_SIZE': 1000, 'RETRY_UNIX_SOCKET_AFTER': 30000, 'REQUEST_TIMEOUT': 10000, 'FALLBACK_TIMEOUT': 5000, @@ -303,6 +306,7 @@ const defaults: IConfig = { 'REDIS': { 'ENABLED': false, 'UNIX_SOCKET_PATH': '', + 'BATCH_QUERY_BASE_SIZE': 5000, }, }; diff --git a/backend/src/database.ts b/backend/src/database.ts index 0638aa319..dc543bbbc 100644 --- a/backend/src/database.ts +++ b/backend/src/database.ts @@ -2,6 +2,7 @@ import * as fs from 'fs'; import path from 'path'; import config from './config'; import { createPool, Pool, PoolConnection } from 'mysql2/promise'; +import { LogLevel } from './logger'; import logger from './logger'; import { FieldPacket, OkPacket, PoolOptions, ResultSetHeader, RowDataPacket } from 'mysql2/typings/mysql'; import { execSync } from 'child_process'; @@ -33,7 +34,7 @@ import { execSync } from 'child_process'; } public async query(query, params?, connection?: PoolConnection): Promise<[T, FieldPacket[]]> + OkPacket[] | ResultSetHeader>(query, params?, errorLogLevel: LogLevel | 'silent' = 'debug', connection?: PoolConnection): Promise<[T, FieldPacket[]]> { this.checkDBFlag(); let hardTimeout; @@ -55,19 +56,38 @@ import { execSync } from 'child_process'; }).then(result => { resolve(result); }).catch(error => { + if (errorLogLevel !== 'silent') { + logger[errorLogLevel](`database query "${query?.sql?.slice(0, 160) || (typeof(query) === 'string' || query instanceof String ? query?.slice(0, 160) : 'unknown query')}" failed!`); + } reject(error); }).finally(() => { clearTimeout(timer); }); }); } else { - const pool = await this.getPool(); - return pool.query(query, params); + try { + const pool = await this.getPool(); + return pool.query(query, params); + } catch (e) { + if (errorLogLevel !== 'silent') { + logger[errorLogLevel](`database query "${query?.sql?.slice(0, 160) || (typeof(query) === 'string' || query instanceof String ? query?.slice(0, 160) : 'unknown query')}" failed!`); + } + throw e; + } + } + } + + private async $rollbackAtomic(connection: PoolConnection): Promise { + try { + await connection.rollback(); + await connection.release(); + } catch (e) { + logger.warn('Failed to rollback incomplete db transaction: ' + (e instanceof Error ? e.message : e)); } } public async $atomicQuery(queries: { query, params }[]): Promise<[T, FieldPacket[]][]> + OkPacket[] | ResultSetHeader>(queries: { query, params }[], errorLogLevel: LogLevel | 'silent' = 'debug'): Promise<[T, FieldPacket[]][]> { const pool = await this.getPool(); const connection = await pool.getConnection(); @@ -76,7 +96,7 @@ import { execSync } from 'child_process'; const results: [T, FieldPacket[]][] = []; for (const query of queries) { - const result = await this.query(query.query, query.params, connection) as [T, FieldPacket[]]; + const result = await this.query(query.query, query.params, errorLogLevel, connection) as [T, FieldPacket[]]; results.push(result); } @@ -84,9 +104,8 @@ import { execSync } from 'child_process'; return results; } catch (e) { - logger.err('Could not complete db transaction, rolling back: ' + (e instanceof Error ? e.message : e)); - connection.rollback(); - connection.release(); + logger.warn('Could not complete db transaction, rolling back: ' + (e instanceof Error ? e.message : e)); + this.$rollbackAtomic(connection); throw e; } finally { connection.release(); diff --git a/backend/src/index.ts b/backend/src/index.ts index 59c92fbf3..0c28df0a8 100644 --- a/backend/src/index.ts +++ b/backend/src/index.ts @@ -92,9 +92,15 @@ class Server { logger.notice(`Starting Mempool Server${worker ? ' (worker)' : ''}... (${backendInfo.getShortCommitHash()})`); // Register cleanup listeners for exit events - ['exit', 'SIGHUP', 'SIGINT', 'SIGTERM', 'SIGUSR1', 'SIGUSR2', 'uncaughtException', 'unhandledRejection'].forEach(event => { + ['exit', 'SIGHUP', 'SIGINT', 'SIGTERM', 'SIGUSR1', 'SIGUSR2'].forEach(event => { process.on(event, () => { this.onExit(event); }); }); + process.on('uncaughtException', (error) => { + this.onUnhandledException('uncaughtException', error); + }); + process.on('unhandledRejection', (reason, promise) => { + this.onUnhandledException('unhandledRejection', reason); + }); if (config.MEMPOOL.BACKEND === 'esplora') { bitcoinApi.startHealthChecks(); @@ -200,7 +206,7 @@ class Server { } const newMempool = await bitcoinApi.$getRawMempool(); const numHandledBlocks = await blocks.$updateBlocks(); - const pollRate = config.MEMPOOL.POLL_RATE_MS * (indexer.indexerRunning ? 10 : 1); + const pollRate = config.MEMPOOL.POLL_RATE_MS * (indexer.indexerIsRunning() ? 10 : 1); if (numHandledBlocks === 0) { await memPool.$updateMempool(newMempool, pollRate); } @@ -314,14 +320,18 @@ class Server { } } - onExit(exitEvent): void { + onExit(exitEvent, code = 0): void { + logger.debug(`onExit for signal: ${exitEvent}`); if (config.DATABASE.ENABLED) { DB.releasePidLock(); } - process.exit(0); + process.exit(code); + } + + onUnhandledException(type, error): void { + console.error(`${type}:`, error); + this.onExit(type, 1); } } - - ((): Server => new Server())(); diff --git a/backend/src/indexer.ts b/backend/src/indexer.ts index 7ec65d9c9..2e2f8b037 100644 --- a/backend/src/indexer.ts +++ b/backend/src/indexer.ts @@ -15,11 +15,18 @@ export interface CoreIndex { best_block_height: number; } +type TaskName = 'blocksPrices' | 'coinStatsIndex'; + class Indexer { - runIndexer = true; - indexerRunning = false; - tasksRunning: string[] = []; - coreIndexes: CoreIndex[] = []; + private runIndexer = true; + private indexerRunning = false; + private tasksRunning: { [key in TaskName]?: boolean; } = {}; + private tasksScheduled: { [key in TaskName]?: NodeJS.Timeout; } = {}; + private coreIndexes: CoreIndex[] = []; + + public indexerIsRunning(): boolean { + return this.indexerRunning; + } /** * Check which core index is available for indexing @@ -69,33 +76,69 @@ class Indexer { } } - public async runSingleTask(task: 'blocksPrices' | 'coinStatsIndex'): Promise { - if (!Common.indexingEnabled()) { - return; - } - - if (task === 'blocksPrices' && !this.tasksRunning.includes(task) && !['testnet', 'signet'].includes(config.MEMPOOL.NETWORK)) { - this.tasksRunning.push(task); - const lastestPriceId = await PricesRepository.$getLatestPriceId(); - if (priceUpdater.historyInserted === false || lastestPriceId === null) { - logger.debug(`Blocks prices indexer is waiting for the price updater to complete`, logger.tags.mining); - setTimeout(() => { - this.tasksRunning = this.tasksRunning.filter(runningTask => runningTask !== task); - this.runSingleTask('blocksPrices'); - }, 10000); - } else { - logger.debug(`Blocks prices indexer will run now`, logger.tags.mining); - await mining.$indexBlockPrices(); - this.tasksRunning = this.tasksRunning.filter(runningTask => runningTask !== task); + /** + * schedules a single task to run in `timeout` ms + * only one task of each type may be scheduled + * + * @param {TaskName} task - the type of task + * @param {number} timeout - delay in ms + * @param {boolean} replace - `true` replaces any already scheduled task (works like a debounce), `false` ignores subsequent requests (works like a throttle) + */ + public scheduleSingleTask(task: TaskName, timeout: number = 10000, replace = false): void { + if (this.tasksScheduled[task]) { + if (!replace) { //throttle + return; + } else { // debounce + clearTimeout(this.tasksScheduled[task]); } } + this.tasksScheduled[task] = setTimeout(async () => { + try { + await this.runSingleTask(task); + } catch (e) { + logger.err(`Unexpected error in scheduled task ${task}: ` + (e instanceof Error ? e.message : e)); + } finally { + clearTimeout(this.tasksScheduled[task]); + } + }, timeout); + } - if (task === 'coinStatsIndex' && !this.tasksRunning.includes(task)) { - this.tasksRunning.push(task); - logger.debug(`Indexing coinStatsIndex now`); - await mining.$indexCoinStatsIndex(); - this.tasksRunning = this.tasksRunning.filter(runningTask => runningTask !== task); + /** + * Runs a single task immediately + * + * (use `scheduleSingleTask` instead to queue a task to run after some timeout) + */ + public async runSingleTask(task: TaskName): Promise { + if (!Common.indexingEnabled() || this.tasksRunning[task]) { + return; } + this.tasksRunning[task] = true; + + switch (task) { + case 'blocksPrices': { + if (!['testnet', 'signet'].includes(config.MEMPOOL.NETWORK)) { + let lastestPriceId; + try { + lastestPriceId = await PricesRepository.$getLatestPriceId(); + } catch (e) { + logger.debug('failed to fetch latest price id from db: ' + (e instanceof Error ? e.message : e)); + } if (priceUpdater.historyInserted === false || lastestPriceId === null) { + logger.debug(`Blocks prices indexer is waiting for the price updater to complete`, logger.tags.mining); + this.scheduleSingleTask(task, 10000); + } else { + logger.debug(`Blocks prices indexer will run now`, logger.tags.mining); + await mining.$indexBlockPrices(); + } + } + } break; + + case 'coinStatsIndex': { + logger.debug(`Indexing coinStatsIndex now`); + await mining.$indexCoinStatsIndex(); + } break; + } + + this.tasksRunning[task] = false; } public async $run(): Promise { diff --git a/backend/src/logger.ts b/backend/src/logger.ts index efafe894e..364c529e7 100644 --- a/backend/src/logger.ts +++ b/backend/src/logger.ts @@ -157,4 +157,6 @@ class Logger { } } +export type LogLevel = 'emerg' | 'alert' | 'crit' | 'err' | 'warn' | 'notice' | 'info' | 'debug'; + export default new Logger(); diff --git a/backend/src/repositories/NodesSocketsRepository.ts b/backend/src/repositories/NodesSocketsRepository.ts index af594e6e1..e85126de4 100644 --- a/backend/src/repositories/NodesSocketsRepository.ts +++ b/backend/src/repositories/NodesSocketsRepository.ts @@ -14,7 +14,7 @@ class NodesSocketsRepository { await DB.query(` INSERT INTO nodes_sockets(public_key, socket, type) VALUE (?, ?, ?) - `, [socket.publicKey, socket.addr, socket.network]); + `, [socket.publicKey, socket.addr, socket.network], 'silent'); } catch (e: any) { if (e.errno !== 1062) { // ER_DUP_ENTRY - Not an issue, just ignore this logger.err(`Cannot save node socket (${[socket.publicKey, socket.addr, socket.network]}) into db. Reason: ` + (e instanceof Error ? e.message : e)); diff --git a/backend/src/tasks/lightning/forensics.service.ts b/backend/src/tasks/lightning/forensics.service.ts index 584dd3c79..aa88f5bb4 100644 --- a/backend/src/tasks/lightning/forensics.service.ts +++ b/backend/src/tasks/lightning/forensics.service.ts @@ -79,7 +79,7 @@ class ForensicsService { } let progress = 0; - const sliceLength = 1000; + const sliceLength = Math.ceil(config.ESPLORA.BATCH_QUERY_BASE_SIZE / 10); // process batches of 1000 channels for (let i = 0; i < Math.ceil(allChannels.length / sliceLength); i++) { const channels = allChannels.slice(i * sliceLength, (i + 1) * sliceLength); diff --git a/backend/src/tasks/lightning/network-sync.service.ts b/backend/src/tasks/lightning/network-sync.service.ts index dc0d609fa..7d6a40571 100644 --- a/backend/src/tasks/lightning/network-sync.service.ts +++ b/backend/src/tasks/lightning/network-sync.service.ts @@ -290,7 +290,7 @@ class NetworkSyncService { const allChannels = await channelsApi.$getChannelsByStatus([0, 1]); - const sliceLength = 5000; + const sliceLength = Math.ceil(config.ESPLORA.BATCH_QUERY_BASE_SIZE / 2); // process batches of 5000 channels for (let i = 0; i < Math.ceil(allChannels.length / sliceLength); i++) { const channels = allChannels.slice(i * sliceLength, (i + 1) * sliceLength); diff --git a/contributors/starius.txt b/contributors/starius.txt new file mode 100644 index 000000000..df9fdbe8d --- /dev/null +++ b/contributors/starius.txt @@ -0,0 +1,3 @@ +I hereby accept the terms of the Contributor License Agreement in the CONTRIBUTING.md file of the mempool/mempool git repository as of Oct 13, 2023. + +Signed starius diff --git a/docker/backend/mempool-config.json b/docker/backend/mempool-config.json index a1359db97..e73fa1929 100644 --- a/docker/backend/mempool-config.json +++ b/docker/backend/mempool-config.json @@ -53,6 +53,7 @@ "ESPLORA": { "REST_API_URL": "__ESPLORA_REST_API_URL__", "UNIX_SOCKET_PATH": "__ESPLORA_UNIX_SOCKET_PATH__", + "BATCH_QUERY_BASE_SIZE": __ESPLORA_BATCH_QUERY_BASE_SIZE__, "RETRY_UNIX_SOCKET_AFTER": __ESPLORA_RETRY_UNIX_SOCKET_AFTER__, "REQUEST_TIMEOUT": __ESPLORA_REQUEST_TIMEOUT__, "FALLBACK_TIMEOUT": __ESPLORA_FALLBACK_TIMEOUT__, @@ -146,6 +147,7 @@ }, "REDIS": { "ENABLED": __REDIS_ENABLED__, - "UNIX_SOCKET_PATH": "__REDIS_UNIX_SOCKET_PATH__" + "UNIX_SOCKET_PATH": "__REDIS_UNIX_SOCKET_PATH__", + "BATCH_QUERY_BASE_SIZE": __REDIS_BATCH_QUERY_BASE_SIZE__ } } diff --git a/docker/backend/start.sh b/docker/backend/start.sh index 23c578efe..232cf7284 100755 --- a/docker/backend/start.sh +++ b/docker/backend/start.sh @@ -54,6 +54,7 @@ __ELECTRUM_TLS_ENABLED__=${ELECTRUM_TLS_ENABLED:=false} # ESPLORA __ESPLORA_REST_API_URL__=${ESPLORA_REST_API_URL:=http://127.0.0.1:3000} __ESPLORA_UNIX_SOCKET_PATH__=${ESPLORA_UNIX_SOCKET_PATH:="null"} +__ESPLORA_BATCH_QUERY_BASE_SIZE__=${ESPLORA_BATCH_QUERY_BASE_SIZE:=1000} __ESPLORA_RETRY_UNIX_SOCKET_AFTER__=${ESPLORA_RETRY_UNIX_SOCKET_AFTER:=30000} __ESPLORA_REQUEST_TIMEOUT__=${ESPLORA_REQUEST_TIMEOUT:=5000} __ESPLORA_FALLBACK_TIMEOUT__=${ESPLORA_FALLBACK_TIMEOUT:=5000} @@ -148,6 +149,7 @@ __MEMPOOL_SERVICES_ACCELERATIONS__=${MEMPOOL_SERVICES_ACCELERATIONS:=false} # REDIS __REDIS_ENABLED__=${REDIS_ENABLED:=false} __REDIS_UNIX_SOCKET_PATH__=${REDIS_UNIX_SOCKET_PATH:=true} +__REDIS_BATCH_QUERY_BASE_SIZE__=${REDIS_BATCH_QUERY_BASE_SIZE:=5000} mkdir -p "${__MEMPOOL_CACHE_DIR__}" @@ -201,6 +203,7 @@ sed -i "s!__ELECTRUM_TLS_ENABLED__!${__ELECTRUM_TLS_ENABLED__}!g" mempool-config sed -i "s!__ESPLORA_REST_API_URL__!${__ESPLORA_REST_API_URL__}!g" mempool-config.json sed -i "s!__ESPLORA_UNIX_SOCKET_PATH__!${__ESPLORA_UNIX_SOCKET_PATH__}!g" mempool-config.json +sed -i "s!__ESPLORA_BATCH_QUERY_BASE_SIZE__!${__ESPLORA_BATCH_QUERY_BASE_SIZE__}!g" mempool-config.json sed -i "s!__ESPLORA_RETRY_UNIX_SOCKET_AFTER__!${__ESPLORA_RETRY_UNIX_SOCKET_AFTER__}!g" mempool-config.json sed -i "s!__ESPLORA_REQUEST_TIMEOUT__!${__ESPLORA_REQUEST_TIMEOUT__}!g" mempool-config.json sed -i "s!__ESPLORA_FALLBACK_TIMEOUT__!${__ESPLORA_FALLBACK_TIMEOUT__}!g" mempool-config.json @@ -288,5 +291,6 @@ sed -i "s!__MEMPOOL_SERVICES_ACCELERATIONS__!${__MEMPOOL_SERVICES_ACCELERATIONS_ # REDIS sed -i "s!__REDIS_ENABLED__!${__REDIS_ENABLED__}!g" mempool-config.json sed -i "s!__REDIS_UNIX_SOCKET_PATH__!${__REDIS_UNIX_SOCKET_PATH__}!g" mempool-config.json +sed -i "s!__REDIS_BATCH_QUERY_BASE_SIZE__!${__REDIS_BATCH_QUERY_BASE_SIZE__}!g" mempool-config.json node /backend/package/index.js diff --git a/frontend/src/app/shared/components/truncate/truncate.component.html b/frontend/src/app/shared/components/truncate/truncate.component.html index 94208f3a4..0ec579dad 100644 --- a/frontend/src/app/shared/components/truncate/truncate.component.html +++ b/frontend/src/app/shared/components/truncate/truncate.component.html @@ -1,4 +1,5 @@ + @@ -11,6 +12,7 @@ + {{text.slice(0,-lastChars)}}{{text.slice(-lastChars)}} diff --git a/frontend/src/app/shared/components/truncate/truncate.component.scss b/frontend/src/app/shared/components/truncate/truncate.component.scss index 315ce4e12..c5179384f 100644 --- a/frontend/src/app/shared/components/truncate/truncate.component.scss +++ b/frontend/src/app/shared/components/truncate/truncate.component.scss @@ -27,4 +27,17 @@ &.inline { display: inline-flex; } -} \ No newline at end of file +} + +.hidden { + color: transparent; + position: absolute; + max-width: 300px; + overflow: hidden; +} + +@media (max-width: 567px) { + .hidden { + max-width: 150px; + } +} diff --git a/production/install b/production/install index 18ee06233..4f1280a05 100755 --- a/production/install +++ b/production/install @@ -394,7 +394,7 @@ FREEBSD_PKG=() FREEBSD_PKG+=(zsh sudo git git-lfs screen curl wget calc neovim) FREEBSD_PKG+=(openssh-portable py39-pip rust llvm10 jq base64 libzmq4) FREEBSD_PKG+=(boost-libs autoconf automake gmake gcc libevent libtool pkgconf) -FREEBSD_PKG+=(nginx rsync py39-certbot-nginx mariadb105-server keybase) +FREEBSD_PKG+=(nginx rsync py39-certbot-nginx mariadb1011-server keybase) FREEBSD_PKG+=(geoipupdate) FREEBSD_UNFURL_PKG=() diff --git a/production/mempool-kill-all b/production/mempool-kill-all index 6abb1a935..b3f88c6da 100755 --- a/production/mempool-kill-all +++ b/production/mempool-kill-all @@ -1,16 +1,19 @@ #!/usr/bin/env zsh -# kill "while true" loops -killall sh - # kill actual node backends -killall node +killall node 2>/dev/null + +# kill "while true" loops +killall sh 2>/dev/null # kill unfurler chrome instances -killall chrome +killall chrome 2>/dev/null # kill xorg -killall xinit +killall xinit 2>/dev/null + +# kill dbus +killall dbus-daemon 2>/dev/null # kill nginx cache warmer scripts for pid in `ps uaxww|grep warmer|grep zsh|awk '{print $2}'`;do