mirror of
https://github.com/romanz/electrs.git
synced 2024-11-19 01:43:29 +01:00
Merge branch 'p2p'
This commit is contained in:
commit
2152ac9cca
@ -1,8 +1,11 @@
|
||||
target/
|
||||
.git/
|
||||
_*/
|
||||
.*
|
||||
_*
|
||||
contrib
|
||||
db*
|
||||
dist
|
||||
doc
|
||||
Dockerfile
|
||||
LICENSE
|
||||
README.md
|
||||
RELEASE-NOTES.md
|
||||
TODO.md
|
||||
examples
|
||||
scripts
|
||||
target
|
||||
tests
|
||||
|
29
.github/workflows/rust.yml
vendored
29
.github/workflows/rust.yml
vendored
@ -1,10 +1,10 @@
|
||||
name: electrs CI
|
||||
name: electrs
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
electrs:
|
||||
name: electrs
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
@ -13,7 +13,7 @@ jobs:
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
components: rustfmt
|
||||
components: rustfmt, clippy
|
||||
profile: minimal
|
||||
|
||||
- name: Format
|
||||
@ -26,10 +26,27 @@ jobs:
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --locked --release --all
|
||||
args: --locked --all
|
||||
|
||||
- name: Test
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --locked --release --all
|
||||
args: --locked --all
|
||||
|
||||
- name: Clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: -- -D warnings
|
||||
|
||||
integration:
|
||||
name: Integration
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Build
|
||||
run: docker build . --rm -t electrs:tests
|
||||
- name: Test
|
||||
run: docker run -v $PWD/contrib/:/contrib -v $PWD/tests/:/tests --rm electrs:tests bash /tests/run.sh
|
||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,5 +1,5 @@
|
||||
target
|
||||
*db/
|
||||
/db*/
|
||||
_*/
|
||||
*.log
|
||||
*.sublime*
|
||||
@ -8,3 +8,7 @@ _*/
|
||||
.env
|
||||
*.dat
|
||||
electrs.toml
|
||||
data/
|
||||
tests/bitcoin-*
|
||||
tests/bin
|
||||
.idea/
|
||||
|
1074
Cargo.lock
generated
1074
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
64
Cargo.toml
64
Cargo.toml
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "electrs"
|
||||
version = "0.8.12"
|
||||
version = "0.9.0-rc1"
|
||||
authors = ["Roman Zeyde <me@romanzey.de>"]
|
||||
description = "An efficient re-implementation of Electrum Server in Rust"
|
||||
license = "MIT"
|
||||
@ -12,42 +12,46 @@ readme = "README.md"
|
||||
edition = "2018"
|
||||
build = "build.rs"
|
||||
|
||||
[features]
|
||||
default = ["metrics"]
|
||||
metrics = ["prometheus", "tiny_http"]
|
||||
|
||||
[package.metadata.configure_me]
|
||||
spec = "internal/config_specification.toml"
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
|
||||
[features]
|
||||
default = ["rocksdb/snappy"]
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.10"
|
||||
bincode = "1.0"
|
||||
bitcoin = { version = "0.26.2", features = ["use-serde"] }
|
||||
configure_me = "0.4.0"
|
||||
crossbeam-channel = "0.3"
|
||||
dirs-next = "2.0.0"
|
||||
error-chain = "0.12"
|
||||
glob = "0.3"
|
||||
hex = "0.3"
|
||||
libc = "0.2"
|
||||
anyhow = "1.0"
|
||||
bitcoin = { version = "0.27", features = ["use-serde", "rand"] }
|
||||
configure_me = "0.4"
|
||||
crossbeam-channel = "0.5"
|
||||
dirs-next = "2.0"
|
||||
env_logger = "0.7"
|
||||
log = "0.4"
|
||||
lru = "0.6.1"
|
||||
num_cpus = "1.0"
|
||||
page_size = "0.4"
|
||||
prometheus = "0.10"
|
||||
protobuf = "= 2.14.0" # https://github.com/stepancheg/rust-protobuf/blob/master/CHANGELOG.md#2150---2020-06-21
|
||||
rocksdb = { version = "0.12.2", default-features = false } # due to https://github.com/romanz/electrs/issues/193
|
||||
parking_lot = "0.11"
|
||||
prometheus = { version = "0.12", features = ["process"], optional = true }
|
||||
rayon = "1.5"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
sha2 = "0.9.2"
|
||||
signal-hook = "0.1"
|
||||
stderrlog = "0.5.1"
|
||||
sysconf = ">=0.3.4"
|
||||
time = "0.1"
|
||||
tiny_http = "0.6"
|
||||
signal-hook = "0.3"
|
||||
tiny_http = { version = "0.8", optional = true }
|
||||
|
||||
[dependencies.bitcoincore-rpc]
|
||||
# use bitcoin 0.27 (until https://github.com/rust-bitcoin/rust-bitcoincore-rpc/pull/196 is merged)
|
||||
git = "https://github.com/romanz/rust-bitcoincore-rpc"
|
||||
rev = "06ac9fa3e834413f7afeaed322cf8098d876e4a0"
|
||||
|
||||
[dependencies.rocksdb]
|
||||
# support building with Rust 1.41.1 and workaround https://github.com/romanz/electrs/issues/403
|
||||
git = "https://github.com/romanz/rust-rocksdb"
|
||||
rev = "2023b18a7b83fc47b5bc950b5322a2284b771162"
|
||||
default-features = false
|
||||
# ZSTD is used for data compression
|
||||
# Snappy is only for checking old DB
|
||||
features = ["zstd", "snappy"]
|
||||
|
||||
[build-dependencies]
|
||||
configure_me_codegen = "0.4.0"
|
||||
configure_me_codegen = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.2"
|
||||
|
66
Dockerfile
66
Dockerfile
@ -1,43 +1,39 @@
|
||||
FROM rust:1.44.1-slim-buster as builder
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends clang=1:7.* cmake=3.* \
|
||||
libsnappy-dev=1.* \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
### Electrum Rust Server ###
|
||||
FROM rust:1.41.1-slim as electrs-build
|
||||
RUN apt-get update
|
||||
RUN apt-get install -qq -y clang cmake
|
||||
|
||||
# Install electrs
|
||||
WORKDIR /build/electrs
|
||||
COPY . .
|
||||
|
||||
RUN cargo install --locked --path .
|
||||
|
||||
# Create runtime image
|
||||
FROM debian:buster-slim
|
||||
FROM debian:buster-slim as updated
|
||||
RUN apt-get update -qqy
|
||||
|
||||
WORKDIR /app
|
||||
### Bitcoin Core ###
|
||||
FROM updated as bitcoin-build
|
||||
# Download
|
||||
RUN apt-get install -qqy wget
|
||||
WORKDIR /build/bitcoin
|
||||
ARG BITCOIND_VERSION=22.0
|
||||
RUN wget -q https://bitcoincore.org/bin/bitcoin-core-$BITCOIND_VERSION/bitcoin-$BITCOIND_VERSION-x86_64-linux-gnu.tar.gz
|
||||
RUN tar xvf bitcoin-$BITCOIND_VERSION-x86_64-linux-gnu.tar.gz
|
||||
RUN mv -v bitcoin-$BITCOIND_VERSION/bin/bitcoind .
|
||||
RUN mv -v bitcoin-$BITCOIND_VERSION/bin/bitcoin-cli .
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends curl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
FROM updated as result
|
||||
# Copy the binaries
|
||||
COPY --from=electrs-build /usr/local/cargo/bin/electrs /usr/bin/electrs
|
||||
COPY --from=bitcoin-build /build/bitcoin/bitcoind /build/bitcoin/bitcoin-cli /usr/bin/
|
||||
RUN bitcoind -version && bitcoin-cli -version
|
||||
|
||||
RUN groupadd -r user \
|
||||
&& adduser --disabled-login --system --shell /bin/false --uid 1000 --ingroup user user
|
||||
### Electrum ###
|
||||
# Clone latest Electrum wallet and a few test tools
|
||||
WORKDIR /build/
|
||||
RUN apt-get install -qqy git libsecp256k1-0 python3-cryptography python3-setuptools python3-pip jq curl
|
||||
RUN git clone --recurse-submodules https://github.com/spesmilo/electrum/ && cd electrum/ && git log -1
|
||||
RUN python3 -m pip install -e electrum/
|
||||
|
||||
COPY --from=builder --chown=user:user \
|
||||
/build/target/release/electrs .
|
||||
|
||||
USER user
|
||||
|
||||
# Electrum RPC
|
||||
EXPOSE 50001
|
||||
|
||||
# Prometheus monitoring
|
||||
EXPOSE 4224
|
||||
|
||||
STOPSIGNAL SIGINT
|
||||
|
||||
HEALTHCHECK CMD curl -fSs http://localhost:4224/ || exit 1
|
||||
|
||||
ENTRYPOINT ["./electrs"]
|
||||
RUN electrum version --offline
|
||||
WORKDIR /
|
||||
|
@ -27,10 +27,10 @@ See [here](doc/usage.md) for installation, build and usage instructions.
|
||||
|
||||
## Features
|
||||
|
||||
* Supports Electrum protocol [v1.4](https://electrumx.readthedocs.io/en/latest/protocol.html)
|
||||
* Supports Electrum protocol [v1.4](https://electrumx-spesmilo.readthedocs.io/en/latest/protocol.html)
|
||||
* Maintains an index over transaction inputs and outputs, allowing fast balance queries
|
||||
* Fast synchronization of the Bitcoin blockchain (~2 hours for ~187GB @ July 2018) on [modest hardware](https://gist.github.com/romanz/cd9324474de0c2f121198afe3d063548)
|
||||
* Low index storage overhead (~20%), relying on a local full node for transaction retrieval
|
||||
* Fast synchronization of the Bitcoin blockchain (~4 hours for ~336GB @ August 2021) using HDD storage.
|
||||
* Low index storage overhead (~10%), relying on a local full node for transaction retrieval
|
||||
* Efficient mempool tracker (allowing better fee [estimation](https://github.com/spesmilo/electrum/blob/59c1d03f018026ac301c4e74facfc64da8ae4708/RELEASE-NOTES#L34-L46))
|
||||
* Low CPU & memory usage (after initial indexing)
|
||||
* [`txindex`](https://github.com/bitcoinbook/bitcoinbook/blob/develop/ch03.asciidoc#txindex) is not required for the Bitcoin node
|
||||
|
@ -1,3 +1,26 @@
|
||||
# 0.9.0 (TBD)
|
||||
|
||||
**IMPORTANT: This release contains major changes, please read carefully!**
|
||||
|
||||
The two main things to watch out for:
|
||||
|
||||
* Database schema changed - this will cause **reindex after upgrade**.
|
||||
* We now use **bitcoin p2p protocol** to fetch blocks - some configurations may not work.
|
||||
|
||||
See [upgrading](doc/usage.md#upgrading) section of our docs to learn more.
|
||||
|
||||
Full list of changes:
|
||||
|
||||
* Fix incorrect ordering of same-block transactions (#297)
|
||||
* Change DB index format and use Zstd compression (instead of Snappy)
|
||||
* The database will be reindexed automatically when it encounters old version (#477)
|
||||
* Don't use bitcoind JSON RPC for fetching blocks (#373)
|
||||
* Use p2p for block fetching only.
|
||||
This is safer than reading `blk*dat` files and faster than JSON RPC.
|
||||
* Support Electrum JSON RPC batching and errors
|
||||
* Use `rust-bitcoincore-rpc` crate
|
||||
* Increase default `index_lookup_limit` to 200
|
||||
|
||||
# 0.8.12 (14 Sep 2021)
|
||||
|
||||
* Fail if `cookie` is specified (#478)
|
||||
|
@ -1,39 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import hashlib
|
||||
import sys
|
||||
import argparse
|
||||
|
||||
import client
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--testnet', action='store_true')
|
||||
parser.add_argument('--regtest', action='store_true')
|
||||
parser.add_argument('--signet', action='store_true')
|
||||
parser.add_argument('address', nargs='+')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.testnet:
|
||||
port = 60001
|
||||
from pycoin.symbols.xtn import network
|
||||
if args.regtest:
|
||||
port = 60401
|
||||
from pycoin.symbols.xrt import network
|
||||
if args.signet:
|
||||
port = 60601
|
||||
from pycoin.symbols.xtn import network
|
||||
else:
|
||||
port = 50001
|
||||
from pycoin.symbols.btc import network
|
||||
|
||||
conn = client.Client(('localhost', port))
|
||||
for addr in args.address:
|
||||
script = network.parse.address(addr).script()
|
||||
script_hash = hashlib.sha256(script).digest()[::-1].hex()
|
||||
reply = conn.call('blockchain.scripthash.get_balance', script_hash)
|
||||
result = reply['result']
|
||||
print('{} has {} satoshis'.format(addr, result))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -7,12 +7,21 @@ class Client:
|
||||
self.f = self.s.makefile('r')
|
||||
self.id = 0
|
||||
|
||||
def call(self, method, *args):
|
||||
req = {
|
||||
'id': self.id,
|
||||
'method': method,
|
||||
'params': list(args),
|
||||
}
|
||||
msg = json.dumps(req) + '\n'
|
||||
def call(self, requests):
|
||||
requests = list(requests)
|
||||
for request in requests:
|
||||
request['id'] = self.id
|
||||
request['jsonrpc'] = '2.0'
|
||||
self.id += 1
|
||||
|
||||
msg = json.dumps(requests) + '\n'
|
||||
self.s.sendall(msg.encode('ascii'))
|
||||
return json.loads(self.f.readline())
|
||||
response = json.loads(self.f.readline())
|
||||
try:
|
||||
return [r['result'] for r in response]
|
||||
except KeyError:
|
||||
raise ValueError(response)
|
||||
|
||||
|
||||
def request(method, *args):
|
||||
return {'method': method, 'params': list(args)}
|
||||
|
16
contrib/get_tip.py
Executable file
16
contrib/get_tip.py
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import client
|
||||
import json
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("host")
|
||||
parser.add_argument("port", type=int)
|
||||
args = parser.parse_args()
|
||||
|
||||
conn = client.Client((args.host, args.port))
|
||||
print(conn.call([client.request("blockchain.headers.subscribe")]))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
16
contrib/get_tx.py
Executable file
16
contrib/get_tx.py
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import client
|
||||
import json
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("txid")
|
||||
args = parser.parse_args()
|
||||
|
||||
conn = client.Client(("localhost", 50001))
|
||||
tx, = conn.call([client.request("blockchain.transaction.get", args.txid, True)])
|
||||
print(json.dumps(tx))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import client
|
||||
import json
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
@ -9,7 +10,7 @@ def main():
|
||||
args = parser.parse_args()
|
||||
|
||||
conn = client.Client((args.host, args.port))
|
||||
print(conn.call("server.version", "health_check", "1.4")["result"])
|
||||
print(json.dumps(conn.call([client.request("server.version", "health_check", "1.4")])))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
156
contrib/history.py
Executable file
156
contrib/history.py
Executable file
@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import datetime
|
||||
import hashlib
|
||||
import io
|
||||
import sys
|
||||
|
||||
import pycoin
|
||||
from logbook import Logger, StreamHandler
|
||||
import prettytable
|
||||
|
||||
import client
|
||||
|
||||
log = Logger('electrum')
|
||||
|
||||
|
||||
def _script_hash(script):
|
||||
return hashlib.sha256(script).digest()[::-1].hex()
|
||||
|
||||
|
||||
def show_rows(rows, field_names):
|
||||
t = prettytable.PrettyTable()
|
||||
t.field_names = field_names
|
||||
t.add_rows(rows)
|
||||
for f in t.field_names:
|
||||
if "mBTC" in f:
|
||||
t.align[f] = "r"
|
||||
print(t)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--network', default='mainnet')
|
||||
parser.add_argument('address', nargs='+')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.network == 'regtest':
|
||||
port = 60401
|
||||
from pycoin.symbols.xrt import network
|
||||
elif args.network == 'testnet':
|
||||
port = 60001
|
||||
from pycoin.symbols.xtn import network
|
||||
elif args.network == 'mainnet':
|
||||
port = 50001
|
||||
from pycoin.symbols.btc import network
|
||||
else:
|
||||
raise ValueError(f"unknown network: {args.network}")
|
||||
|
||||
hostport = ('localhost', port)
|
||||
log.info('connecting to {}:{}', *hostport)
|
||||
conn = client.Client(hostport)
|
||||
|
||||
tip, = conn.call([client.request('blockchain.headers.subscribe')])
|
||||
|
||||
script_hashes = [
|
||||
_script_hash(network.parse.address(addr).script())
|
||||
for addr in args.address
|
||||
]
|
||||
|
||||
conn.call(
|
||||
client.request('blockchain.scripthash.subscribe', script_hash)
|
||||
for script_hash in script_hashes
|
||||
)
|
||||
log.info('subscribed to {} scripthashes', len(script_hashes))
|
||||
|
||||
balances = conn.call(
|
||||
client.request('blockchain.scripthash.get_balance', script_hash)
|
||||
for script_hash in script_hashes
|
||||
)
|
||||
|
||||
unspents = conn.call(
|
||||
client.request('blockchain.scripthash.listunspent', script_hash)
|
||||
for script_hash in script_hashes
|
||||
)
|
||||
for addr, balance, unspent in sorted(zip(args.address, balances, unspents), key=lambda v: v[0]):
|
||||
if unspent:
|
||||
log.debug("{}: confirmed={:,.5f} mBTC, unconfirmed={:,.5f} mBTC",
|
||||
addr, balance["confirmed"] / 1e5, balance["unconfirmed"] / 1e5)
|
||||
for u in unspent:
|
||||
log.debug("\t{}:{} = {:,.5f} mBTC {}",
|
||||
u["tx_hash"], u["tx_pos"], u["value"] / 1e5,
|
||||
f'@ {u["height"]}' if u["height"] else "")
|
||||
|
||||
histories = conn.call(
|
||||
client.request('blockchain.scripthash.get_history', script_hash)
|
||||
for script_hash in script_hashes
|
||||
)
|
||||
txids_map = dict(
|
||||
(tx['tx_hash'], tx['height'] if tx['height'] > 0 else None)
|
||||
for history in histories
|
||||
for tx in history
|
||||
)
|
||||
log.info('got history of {} transactions', len(txids_map))
|
||||
|
||||
txs = map(network.tx.from_hex, conn.call(
|
||||
client.request('blockchain.transaction.get', txid)
|
||||
for txid in txids_map.keys()
|
||||
))
|
||||
txs_map = dict(zip(txids_map.keys(), txs))
|
||||
log.info('loaded {} transactions', len(txids_map))
|
||||
|
||||
confirmed_txids = {txid: height for txid, height in txids_map.items() if height is not None}
|
||||
|
||||
heights = set(confirmed_txids.values())
|
||||
def _parse_header(header):
|
||||
return network.block.parse_as_header(io.BytesIO(bytes.fromhex(header)))
|
||||
headers = map(_parse_header, conn.call(
|
||||
client.request('blockchain.block.header', height)
|
||||
for height in heights
|
||||
))
|
||||
def _parse_timestamp(header):
|
||||
return datetime.datetime.utcfromtimestamp(header.timestamp).strftime('%Y-%m-%dT%H:%M:%SZ')
|
||||
timestamps = map(_parse_timestamp, headers)
|
||||
timestamps_map = dict(zip(heights, timestamps))
|
||||
log.info('loaded {} header timestamps', len(heights))
|
||||
|
||||
proofs = conn.call(
|
||||
client.request('blockchain.transaction.get_merkle', txid, height)
|
||||
for txid, height in confirmed_txids.items()
|
||||
)
|
||||
log.info('loaded {} merkle proofs', len(proofs)) # TODO: verify proofs
|
||||
|
||||
sorted_txdata = sorted(
|
||||
(proof['block_height'], proof['pos'], txid)
|
||||
for proof, txid in zip(proofs, confirmed_txids)
|
||||
)
|
||||
|
||||
utxos = {}
|
||||
balance = 0
|
||||
|
||||
rows = []
|
||||
script_hashes = set(script_hashes)
|
||||
for block_height, block_pos, txid in sorted_txdata:
|
||||
tx_obj = txs_map[txid]
|
||||
for txi in tx_obj.txs_in:
|
||||
utxos.pop((str(txi.previous_hash), txi.previous_index), None)
|
||||
|
||||
for index, txo in enumerate(tx_obj.txs_out):
|
||||
if _script_hash(txo.puzzle_script()) in script_hashes:
|
||||
utxos[(txid, index)] = txo
|
||||
|
||||
diff = sum(txo.coin_value for txo in utxos.values()) - balance
|
||||
balance += diff
|
||||
confirmations = tip['height'] - block_height + 1
|
||||
rows.append([txid, timestamps_map[block_height], block_height, confirmations, f'{diff/1e5:,.5f}', f'{balance/1e5:,.5f}'])
|
||||
show_rows(rows, ["txid", "block timestamp", "height", "confirmations", "delta (mBTC)", "total (mBTC)"])
|
||||
|
||||
tip_header = _parse_header(tip['hex'])
|
||||
log.info('tip={}, height={} @ {}', tip_header.id(), tip['height'], _parse_timestamp(tip_header))
|
||||
|
||||
unconfirmed = {txs_map[txid] for txid, height in txids_map.items() if height is None}
|
||||
# TODO: show unconfirmed balance
|
||||
|
||||
if __name__ == '__main__':
|
||||
StreamHandler(sys.stderr).push_application()
|
||||
main()
|
4
contrib/history.sh
Executable file
4
contrib/history.sh
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
cd `dirname $0`
|
||||
.env/bin/python history.py $*
|
@ -1,33 +1,27 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import daemon
|
||||
|
||||
import client
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('txid')
|
||||
parser.add_argument("txid")
|
||||
args = parser.parse_args()
|
||||
|
||||
d = daemon.Daemon(port=8332, cookie_dir='~/.bitcoin')
|
||||
txid = args.txid
|
||||
conn = client.Client(("localhost", 50001))
|
||||
tx, = conn.call([client.request("blockchain.transaction.get", args.txid, True)])
|
||||
requests = []
|
||||
for vin in tx["vin"]:
|
||||
prev_txid = vin["txid"]
|
||||
requests.append(client.request("blockchain.transaction.get", prev_txid, True))
|
||||
|
||||
txn, = d.request('getrawtransaction', [[txid, True]])
|
||||
vin = txn['vin']
|
||||
fee = 0
|
||||
for vin, prev_tx in zip(tx["vin"], conn.call(requests)):
|
||||
txo = prev_tx["vout"][vin["vout"]]
|
||||
fee += txo["value"]
|
||||
|
||||
fee = 0.0
|
||||
for txi in txn['vin']:
|
||||
prev_txid = txi['txid']
|
||||
prev_tx, = d.request('getrawtransaction', [[prev_txid, True]])
|
||||
index = txi['vout']
|
||||
prev_txo = prev_tx['vout'][index]
|
||||
print(f"{prev_txid}:{index:<5} {prev_txo['value']:+20.8f}")
|
||||
fee += prev_txo['value']
|
||||
fee -= sum(vout["value"] for vout in tx["vout"])
|
||||
|
||||
for i, txo in enumerate(txn['vout']):
|
||||
print(f"{txid}:{i:<5} {-txo['value']:+20.8f}")
|
||||
fee -= txo['value']
|
||||
print(f'vSize = {tx["vsize"]}, Fee = {1e3 * fee:.2f} mBTC = {1e8 * fee / tx["vsize"]:.2f} sat/vB')
|
||||
|
||||
print(f"Fee = {1e6 * fee:.2f} uBTC = {1e8 * fee / txn['vsize']:.2f} sat/vB")
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -8,16 +8,9 @@
|
||||
# This example contains only the most important settings.
|
||||
# See docs or electrs man page for advanced settings.
|
||||
|
||||
# Set if you have low-memory device (such as RPi) or you run electrs under a different user than bitcoind and do NOT have sysperms in bitcoind.
|
||||
jsonrpc_import = true
|
||||
|
||||
# File where bitcoind stores the cookie, usually file .cookie in its datadir
|
||||
cookie_file = "/var/run/bitcoin-mainnet/cookie"
|
||||
|
||||
# Prefer cookie file unless you have btc-rpc-proxy or a good reason to use username:password
|
||||
# cookie and cookie_file can NOT be specified at the same tie.
|
||||
# cookie = "username:password"
|
||||
|
||||
# The listening address of bitcoind, port is usually 8332
|
||||
daemon_rpc_addr = "127.0.0.1:8332"
|
||||
|
||||
@ -32,5 +25,5 @@ network = "bitcoin"
|
||||
electrum_rpc_addr = "127.0.0.1:50001"
|
||||
|
||||
# How much information about internal workings should electrs print. Increase before reporting a bug.
|
||||
verbose = 3
|
||||
verbose = 2
|
||||
|
||||
|
@ -1,31 +1,53 @@
|
||||
# Index Schema
|
||||
|
||||
The index is stored at a single RocksDB database using the following schema:
|
||||
The index is stored at a single RocksDB database using the following column families.
|
||||
Most of the data is stored in key-only DB rows (i.e. having empty values).
|
||||
|
||||
## Transaction outputs' index
|
||||
## Transaction outputs' index (`funding`)
|
||||
|
||||
Allows efficiently finding all funding transactions for a specific address:
|
||||
|
||||
| Code | Script Hash Prefix | Funding TxID Prefix | |
|
||||
| ------ | -------------------- | --------------------- | - |
|
||||
| `b'O'` | `SHA256(script)[:8]` | `txid[:8]` | |
|
||||
| Script Hash Prefix | Confirmed Block Height |
|
||||
| -------------------- | ---------------------- |
|
||||
| `SHA256(script)[:8]` | `height as u32` |
|
||||
|
||||
## Transaction inputs' index
|
||||
## Transaction inputs' index (`spending`)
|
||||
|
||||
Allows efficiently finding spending transaction of a specific output:
|
||||
|
||||
| Code | Funding TxID Prefix | Funding Output Index | Spending TxID Prefix | |
|
||||
| ------ | -------------------- | --------------------- | --------------------- | - |
|
||||
| `b'I'` | `txid[:8]` | `uint16` | `txid[:8]` | |
|
||||
| Previous Outpoint Prefix | Confirmed Block Height |
|
||||
| ------------------------ | ---------------------- |
|
||||
| `txid[:8] as u64 + vout` | `height as u32` |
|
||||
|
||||
|
||||
## Full Transaction IDs
|
||||
## Transaction ID index (`txid`)
|
||||
|
||||
In order to save storage space, we store the full transaction IDs once, and use their 8-byte prefixes for the indexes above.
|
||||
In order to save storage space, we map the 8-byte transaction ID prefix to its confirmed block height:
|
||||
|
||||
| Code | Transaction ID | | Confirmed height |
|
||||
| ------ | ----------------- | - | ------------------ |
|
||||
| `b'T'` | `txid` (32 bytes) | | `uint32` |
|
||||
| Txid Prefix | Confirmed height |
|
||||
| ----------- | ---------------- |
|
||||
| `txid[:8]` | `height as u32` |
|
||||
|
||||
Note that this mapping allows us to use `getrawtransaction` RPC to retrieve actual transaction data from without `-txindex` enabled
|
||||
(by explicitly specifying the [blockhash](https://github.com/bitcoin/bitcoin/commit/497d0e014cc79d46531d570e74e4aeae72db602d)).
|
||||
|
||||
## Headers (`headers`)
|
||||
|
||||
For faster loading, we store all block headers in RocksDB:
|
||||
|
||||
| Serialized header |
|
||||
| ----------------------- |
|
||||
| `header as BlockHeader` |
|
||||
|
||||
In addition, we also store the chain tip:
|
||||
|
||||
| Key || Value |
|
||||
| --- || ------------------------ |
|
||||
| `T` || `blockhash as BlockHash` |
|
||||
|
||||
## Configuration (`config`)
|
||||
|
||||
| Key || Value |
|
||||
| --- || --------------------------- |
|
||||
| `C` || `serialized config as JSON` |
|
||||
|
||||
|
173
doc/usage.md
173
doc/usage.md
@ -8,8 +8,8 @@ Note for Raspberry Pi 4 owners: the old versions of OS/toolchains produce broken
|
||||
Make sure to use latest OS! (see #226)
|
||||
|
||||
Install [recent Rust](https://rustup.rs/) (1.41.1+, `apt install cargo` is preferred for Debian 10),
|
||||
[latest Bitcoin Core](https://bitcoincore.org/en/download/) (0.16+)
|
||||
and [latest Electrum wallet](https://electrum.org/#download) (3.3+).
|
||||
[latest Bitcoin Core](https://bitcoincore.org/en/download/) (0.21+)
|
||||
and [latest Electrum wallet](https://electrum.org/#download) (4.0+).
|
||||
|
||||
Also, install the following packages (on Debian or Ubuntu):
|
||||
```bash
|
||||
@ -34,11 +34,11 @@ The advantages of dynamic linking:
|
||||
* Cross compilation is more reliable
|
||||
* If another application is also using `rocksdb`, you don't store it on disk and in RAM twice
|
||||
|
||||
If you decided to use dynamic linking, you will also need to install the library.
|
||||
On Debian:
|
||||
If you decided to use dynamic linking, you will also need to install the library ([6.11.4 release](https://github.com/facebook/rocksdb/releases/tag/v6.11.4) is required).
|
||||
On [Debian 11 (bullseye)](https://packages.debian.org/bullseye/librocksdb-dev) and [Ubuntu 21.04 (hirsute)](https://packages.ubuntu.com/hirsute/librocksdb-dev):
|
||||
|
||||
```bash
|
||||
$ sudo apt install librocksdb-dev
|
||||
$ sudo apt install librocksdb-dev=6.11.4-3
|
||||
```
|
||||
|
||||
#### Preparing for cross compilation
|
||||
@ -139,7 +139,7 @@ $ mkdir db
|
||||
$ docker run --network host \
|
||||
--volume $HOME/.bitcoin:/home/user/.bitcoin:ro \
|
||||
--volume $PWD/db:/home/user/db \
|
||||
--env ELECTRS_VERBOSE=4 \
|
||||
--env ELECTRS_VERBOSE=2 \
|
||||
--env ELECTRS_TIMESTAMP=true \
|
||||
--env ELECTRS_DB_DIR=/home/user/db \
|
||||
--rm -i -t electrs-app
|
||||
@ -150,7 +150,7 @@ If not using the host-network, you probably want to expose the ports for electrs
|
||||
```bash
|
||||
$ docker run --volume $HOME/.bitcoin:/home/user/.bitcoin:ro \
|
||||
--volume $PWD/db:/home/user/db \
|
||||
--env ELECTRS_VERBOSE=4 \
|
||||
--env ELECTRS_VERBOSE=2 \
|
||||
--env ELECTRS_TIMESTAMP=true \
|
||||
--env ELECTRS_DB_DIR=/home/user/db \
|
||||
--env ELECTRS_ELECTRUM_RPC_ADDR=0.0.0.0:50001 \
|
||||
@ -180,7 +180,7 @@ And two disadvantages:
|
||||
* It's currently not trivial to independently verify the built packages, so you may need to trust the author of the repository.
|
||||
The build is now deterministic but nobody verified it independently yet.
|
||||
* The repository is considered beta.
|
||||
electrs` seems to work well so far but was not tested heavily.
|
||||
electrs seems to work well so far but was not tested heavily.
|
||||
The author of the repository is also a contributor to `electrs` and appreciates [bug reports](https://github.com/Kixunil/cryptoanarchy-deb-repo-builder/issues),
|
||||
[test reports](https://github.com/Kixunil/cryptoanarchy-deb-repo-builder/issues/61), and other contributions.
|
||||
|
||||
@ -193,7 +193,9 @@ If you use automated systems, refer to their documentation first!
|
||||
|
||||
Pruning must be turned **off** for `electrs` to work.
|
||||
`txindex` is allowed but unnecessary for `electrs`.
|
||||
However, you might still need it if you run other services (e.g.`eclair`)
|
||||
However, you might still need it if you run other services (e.g.`eclair`).
|
||||
The option `maxconnections` (if used) should be set to 12 or more for bitcoind to accept inbound p2p connections.
|
||||
Note that setting `maxuploadtarget` may cause p2p-based sync to fail - so consider using `-whitelist=download@127.0.0.1` to disable the limit for local p2p connections.
|
||||
|
||||
The highly recommended way of authenticating `electrs` is using cookie file.
|
||||
It's the most [secure](https://github.com/Kixunil/security_writings/blob/master/cookie_files.md) and robust method.
|
||||
@ -230,10 +232,10 @@ It is a good practice to use these special arguments at the beginning of the com
|
||||
|
||||
**Naming convention**
|
||||
|
||||
For each command line argument an **environment variable** of the same name with `ELECTRS_` prefix, upper case letters and underscores instead of hyphens exists
|
||||
For each command line argument an **environment variable** of the same name with `ELECTRS_` prefix, upper case letters and underscores instead of hypens exists
|
||||
(e.g. you can use `ELECTRS_ELECTRUM_RPC_ADDR` instead of `--electrum-rpc-addr`).
|
||||
|
||||
Similarly, for each such argument an option in config file exists with underscores instead of hyphens (e.g. `electrum_rpc_addr`).
|
||||
Similarly, for each such argument an option in config file exists with underscores instead of hypens (e.g. `electrum_rpc_addr`).
|
||||
|
||||
You need to use a number in config file if you want to increase verbosity (e.g. `verbose = 3` is equivalent to `-vvv`) and `true` value in case of flags (e.g. `timestamp = true`)
|
||||
|
||||
@ -252,47 +254,58 @@ Please read upgrade notes if you're upgrading to a newer version.
|
||||
|
||||
### Electrs usage
|
||||
|
||||
First index sync should take ~1.5 hours (on a dual core Intel CPU @ 3.3 GHz, 8 GB RAM, 1TB WD Blue HDD):
|
||||
First index sync should take ~4 hours for ~336GB @ August 2021 (on a dual core Intel CPU @ 3.3 GHz, 8 GB RAM, 1TB WD Blue HDD):
|
||||
```bash
|
||||
$ ./target/release/electrs -vvv --timestamp --db-dir ./db --electrum-rpc-addr="127.0.0.1:50001"
|
||||
2018-08-17T18:27:42 - INFO - NetworkInfo { version: 179900, subversion: "/Satoshi:0.17.99/" }
|
||||
2018-08-17T18:27:42 - INFO - BlockchainInfo { chain: "main", blocks: 537204, headers: 537204, bestblockhash: "0000000000000000002956768ca9421a8ddf4e53b1d81e429bd0125a383e3636", pruned: false, initialblockdownload: false }
|
||||
2018-08-17T18:27:42 - DEBUG - opening DB at "./db/mainnet"
|
||||
2018-08-17T18:27:42 - DEBUG - full compaction marker: None
|
||||
2018-08-17T18:27:42 - INFO - listing block files at "/home/user/.bitcoin/blocks/blk*.dat"
|
||||
2018-08-17T18:27:42 - INFO - indexing 1348 blk*.dat files
|
||||
2018-08-17T18:27:42 - DEBUG - found 0 indexed blocks
|
||||
2018-08-17T18:27:55 - DEBUG - applying 537205 new headers from height 0
|
||||
2018-08-17T19:31:01 - DEBUG - no more blocks to index
|
||||
2018-08-17T19:31:03 - DEBUG - no more blocks to index
|
||||
2018-08-17T19:31:03 - DEBUG - last indexed block: best=0000000000000000002956768ca9421a8ddf4e53b1d81e429bd0125a383e3636 height=537204 @ 2018-08-17T15:24:02Z
|
||||
2018-08-17T19:31:05 - DEBUG - opening DB at "./db/mainnet"
|
||||
2018-08-17T19:31:06 - INFO - starting full compaction
|
||||
2018-08-17T19:58:19 - INFO - finished full compaction
|
||||
2018-08-17T19:58:19 - INFO - enabling auto-compactions
|
||||
2018-08-17T19:58:19 - DEBUG - opening DB at "./db/mainnet"
|
||||
2018-08-17T19:58:26 - DEBUG - applying 537205 new headers from height 0
|
||||
2018-08-17T19:58:27 - DEBUG - downloading new block headers (537205 already indexed) from 000000000000000000150d26fcc38b8c3b71ae074028d1d50949ef5aa429da00
|
||||
2018-08-17T19:58:27 - INFO - best=000000000000000000150d26fcc38b8c3b71ae074028d1d50949ef5aa429da00 height=537218 @ 2018-08-17T16:57:50Z (14 left to index)
|
||||
2018-08-17T19:58:28 - DEBUG - applying 14 new headers from height 537205
|
||||
2018-08-17T19:58:29 - INFO - RPC server running on 127.0.0.1:50001
|
||||
$ du -ch ~/.bitcoin/blocks/blk*.dat | tail -n1
|
||||
336G total
|
||||
|
||||
$ ./target/release/electrs -vv --timestamp --db-dir ./db --electrum-rpc-addr="127.0.0.1:50001"
|
||||
Config { network: Bitcoin, db_path: "./db/bitcoin", daemon_dir: "/home/user/.bitcoin", daemon_auth: CookieFile("/home/user/.bitcoin/.cookie"), daemon_rpc_addr: V4(127.0.0.1:8332), daemon_p2p_addr: V4(127.0.0.1:8333), electrum_rpc_addr: V4(127.0.0.1:50001), monitoring_addr: V4(127.0.0.1:4224), wait_duration: 10s, index_batch_size: 10, index_lookup_limit: 100, ignore_mempool: false, server_banner: "Welcome to electrs 0.9.0 (Electrum Rust Server)!", args: [] }
|
||||
[2021-08-17T18:48:40.054Z INFO electrs::metrics::metrics_impl] serving Prometheus metrics on 127.0.0.1:4224
|
||||
[2021-08-17T18:48:40.944Z INFO electrs::db] "./db/bitcoin": 0 SST files, 0 GB, 0 Grows
|
||||
[2021-08-17T18:48:41.075Z INFO electrs::index] indexing 2000 blocks: [1..2000]
|
||||
[2021-08-17T18:48:41.610Z INFO electrs::chain] chain updated: tip=00000000dfd5d65c9d8561b4b8f60a63018fe3933ecb131fb37f905f87da951a, height=2000
|
||||
[2021-08-17T18:48:41.623Z INFO electrs::index] indexing 2000 blocks: [2001..4000]
|
||||
[2021-08-17T18:48:42.178Z INFO electrs::chain] chain updated: tip=00000000922e2aa9e84a474350a3555f49f06061fd49df50a9352f156692a842, height=4000
|
||||
[2021-08-17T18:48:42.188Z INFO electrs::index] indexing 2000 blocks: [4001..6000]
|
||||
[2021-08-17T18:48:42.714Z INFO electrs::chain] chain updated: tip=00000000dbbb79792303bdd1c6c4d7ab9c21bba0667213c2eca955e11230c5a5, height=6000
|
||||
[2021-08-17T18:48:42.723Z INFO electrs::index] indexing 2000 blocks: [6001..8000]
|
||||
[2021-08-17T18:48:43.235Z INFO electrs::chain] chain updated: tip=0000000094fbacdffec05aea9847000522a258c269ae37a74a818afb96fc27d9, height=8000
|
||||
[2021-08-17T18:48:43.246Z INFO electrs::index] indexing 2000 blocks: [8001..10000]
|
||||
[2021-08-17T18:48:43.768Z INFO electrs::chain] chain updated: tip=0000000099c744455f58e6c6e98b671e1bf7f37346bfd4cf5d0274ad8ee660cb, height=10000
|
||||
<...>
|
||||
[2021-08-17T22:11:20.139Z INFO electrs::chain] chain updated: tip=00000000000000000002a23d6df20eecec15b21d32c75833cce28f113de888b7, height=690000
|
||||
[2021-08-17T22:11:20.157Z INFO electrs::index] indexing 2000 blocks: [690001..692000]
|
||||
[2021-08-17T22:12:16.944Z INFO electrs::chain] chain updated: tip=000000000000000000054dab4b85860fcee5808ab7357eb2bb45114a25b77380, height=692000
|
||||
[2021-08-17T22:12:16.957Z INFO electrs::index] indexing 2000 blocks: [692001..694000]
|
||||
[2021-08-17T22:13:11.764Z INFO electrs::chain] chain updated: tip=00000000000000000003f5acb5ec81df7c98c16bc8d89bdaadd4e8965729c018, height=694000
|
||||
[2021-08-17T22:13:11.777Z INFO electrs::index] indexing 2000 blocks: [694001..696000]
|
||||
[2021-08-17T22:14:05.852Z INFO electrs::chain] chain updated: tip=0000000000000000000dfc81671ac5a22d8751f9c1506689d3eaceaef26470b9, height=696000
|
||||
[2021-08-17T22:14:05.855Z INFO electrs::index] indexing 295 blocks: [696001..696295]
|
||||
[2021-08-17T22:14:15.557Z INFO electrs::chain] chain updated: tip=0000000000000000000eceb67a01c81c65b538a7b3729f879c6c1e248bb6577a, height=696295
|
||||
[2021-08-17T22:14:21.578Z INFO electrs::db] starting config compaction
|
||||
[2021-08-17T22:14:21.623Z INFO electrs::db] starting headers compaction
|
||||
[2021-08-17T22:14:21.667Z INFO electrs::db] starting txid compaction
|
||||
[2021-08-17T22:22:27.009Z INFO electrs::db] starting funding compaction
|
||||
[2021-08-17T22:38:17.104Z INFO electrs::db] starting spending compaction
|
||||
[2021-08-17T22:55:11.785Z INFO electrs::db] finished full compaction
|
||||
[2021-08-17T22:55:15.835Z INFO electrs::server] serving Electrum RPC on 127.0.0.1:50001
|
||||
[2021-08-17T22:55:25.837Z INFO electrs::index] indexing 7 blocks: [696296..696302]
|
||||
[2021-08-17T22:55:26.120Z INFO electrs::chain] chain updated: tip=0000000000000000000059e97dea0b0b9ebf4ac1fd66726b339fe1c9683de656, height=696302
|
||||
[2021-08-17T23:02:03.453Z INFO electrs::index] indexing 1 blocks: [696303..696303]
|
||||
[2021-08-17T23:02:03.691Z INFO electrs::chain] chain updated: tip=000000000000000000088107c337bf315e2db1e406c50566bd765f04a7e459b6, height=696303
|
||||
```
|
||||
You can specify options via command-line parameters, environment variables or using config files.
|
||||
See the documentation above.
|
||||
|
||||
Note that the final DB size should be ~20% of the `blk*.dat` files, but it may increase to ~35% at the end of the inital sync (just before the [full compaction is invoked](https://github.com/facebook/rocksdb/wiki/Manual-Compaction)).
|
||||
Note that the final DB size should be ~10% of the `blk*.dat` files, but it may increase to ~20% at the end of the inital sync (just before the [full compaction is invoked](https://github.com/facebook/rocksdb/wiki/Manual-Compaction)).
|
||||
|
||||
If initial sync fails due to `memory allocation of xxxxxxxx bytes failedAborted` errors, as may happen on devices with limited RAM, try the following arguments when starting `electrs`.
|
||||
It should take roughly 18 hours to sync and compact the index on an ODROID-HC1 with 8 CPU cores @ 2GHz, 2GB RAM, and an SSD using the following command:
|
||||
|
||||
```bash
|
||||
$ ./target/release/electrs -vvvv --index-batch-size=10 --jsonrpc-import --db-dir ./db --electrum-rpc-addr="127.0.0.1:50001"
|
||||
```
|
||||
It should take roughly 18 hours to sync and compact the index on an ODROID-HC1 with 8 CPU cores @ 2GHz, 2GB RAM, and an SSD using the command above.
|
||||
|
||||
The index database is stored here:
|
||||
```bash
|
||||
$ du db/
|
||||
38G db/mainnet/
|
||||
30G db/mainnet/
|
||||
```
|
||||
|
||||
See below for [extra configuration suggestions](https://github.com/romanz/electrs/blob/master/doc/usage.md#extra-configuration-suggestions) that you might want to consider.
|
||||
@ -305,7 +318,7 @@ Read below otherwise.
|
||||
There's a prepared script for launching `electrum` in such way to connect only to the local `electrs` instance to protect your privacy.
|
||||
|
||||
```bash
|
||||
$ ./scripts/local-electrum.bash
|
||||
$ ./contrib/local-electrum.bash
|
||||
+ ADDR=127.0.0.1
|
||||
+ PORT=50001
|
||||
+ PROTOCOL=t
|
||||
@ -412,7 +425,7 @@ After=bitcoind.service
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/home/bitcoin/electrs
|
||||
ExecStart=/home/bitcoin/electrs/target/release/electrs --db-dir ./db --electrum-rpc-addr="127.0.0.1:50001"
|
||||
ExecStart=/home/bitcoin/electrs/target/release/electrs -vv --db-dir ./db --electrum-rpc-addr="127.0.0.1:50001"
|
||||
User=bitcoin
|
||||
Group=bitcoin
|
||||
Type=simple
|
||||
@ -447,40 +460,60 @@ You can invoke any supported RPC using `netcat`, for example:
|
||||
|
||||
```
|
||||
$ echo '{"jsonrpc": "2.0", "method": "server.version", "params": ["", "1.4"], "id": 0}' | netcat 127.0.0.1 50001
|
||||
{"id":0,"jsonrpc":"2.0","result":["electrs 0.8.10","1.4"]}
|
||||
```
|
||||
Corresponding example in `Python`:
|
||||
|
||||
```
|
||||
import json
|
||||
import socket
|
||||
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.connect(("127.0.0.1", 50001))
|
||||
f = s.makefile()
|
||||
message = json.dumps({"jsonrpc": "2.0", "method": "server.version", "params": ["", "1.4"], "id": "0"})
|
||||
s.sendall((message + '\n').encode())
|
||||
print(json.loads(f.readline()))
|
||||
{"id":0,"jsonrpc":"2.0","result":["electrs 0.9.0","1.4"]}
|
||||
```
|
||||
|
||||
For more complex tasks, you may need to convert addresses to
|
||||
[script hashes](https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-basics.html#script-hashes) - see
|
||||
[contrib/addr.py](https://github.com/romanz/electrs/blob/master/contrib/addr.py) for getting an address balance:
|
||||
[contrib/addr.py](https://github.com/romanz/electrs/blob/master/contrib/addr.py) for getting an address balance and history:
|
||||
|
||||
```
|
||||
$ ./contrib/addr.py 144STc7gcb9XCp6t4hvrcUEKg9KemivsCR # sample address from block #640699
|
||||
144STc7gcb9XCp6t4hvrcUEKg9KemivsCR has {'confirmed': 12652436, 'unconfirmed': 0} satoshis
|
||||
$ ./contrib/history.sh 144STc7gcb9XCp6t4hvrcUEKg9KemivsCR
|
||||
[2021-08-18 13:56:40.254317] INFO: electrum: connecting to localhost:50001
|
||||
[2021-08-18 13:56:40.574461] INFO: electrum: subscribed to 1 scripthashes
|
||||
[2021-08-18 13:56:40.645072] DEBUG: electrum: 0.00000 mBTC (total)
|
||||
[2021-08-18 13:56:40.710279] INFO: electrum: got history of 2 transactions
|
||||
[2021-08-18 13:56:40.769064] INFO: electrum: loaded 2 transactions
|
||||
[2021-08-18 13:56:40.835569] INFO: electrum: loaded 2 header timestamps
|
||||
[2021-08-18 13:56:40.900560] INFO: electrum: loaded 2 merkle proofs
|
||||
+------------------------------------------------------------------+----------------------+--------+---------------+--------------+--------------+
|
||||
| txid | block timestamp | height | confirmations | delta (mBTC) | total (mBTC) |
|
||||
+------------------------------------------------------------------+----------------------+--------+---------------+--------------+--------------+
|
||||
| 34b6411d004f279622d0a45a4558746e1fa74323c5c01e9c0bb0a3277781a0d0 | 2020-07-25T08:33:57Z | 640699 | 55689 | 126.52436 | 126.52436 |
|
||||
| e58916ca945639c657de137b30bd29e213e4c9fc8e04652c1abc2922909fb8fd | 2020-07-25T21:20:35Z | 640775 | 55613 | -126.52436 | 0.00000 |
|
||||
+------------------------------------------------------------------+----------------------+--------+---------------+--------------+--------------+
|
||||
[2021-08-18 13:56:40.902677] INFO: electrum: tip=00000000000000000009d7590d32ca52ad0b8a4cdfee43e28e6dfcd11cafeaac, height=696387 @ 2021-08-18T13:47:19Z
|
||||
```
|
||||
|
||||
## Upgrading
|
||||
|
||||
> **If you're upgrading from version 0.8.7 to a higher version and used `cookie` option you should change your configuration!**
|
||||
> The `cookie` option was deprecated and **will be removed eventually**!
|
||||
> If you had actual cookie (from `~/bitcoin/.cookie` file) specified in `cookie` option, this was wrong as it wouldn't get updated when needed.
|
||||
> It's strongly recommended to use proper cookie authentication using `cookie_file`.
|
||||
> If you really have to use fixed username and password, explicitly specified in `bitcoind` config, use `auth` option instead.
|
||||
> Users of `btc-rpc-proxy` using `public:public` need to use `auth` too.
|
||||
> You can read [a detailed explanation of cookie deprecation with motivation explained](cookie_deprecation.md).
|
||||
### Important changes from versions older than 0.9.0
|
||||
|
||||
In 0.9.0 we have changed the RocksDB index format to optimize electrs performance.
|
||||
We also use Bitcoin P2P protocol instead of reading blocks from disk or JSON RPC.
|
||||
|
||||
Upgrading checklist:
|
||||
|
||||
* Make sure you upgrade at time when you don't need to use electrs for a while.
|
||||
Because of reindex electrs will be unable to serve your requests for a few hours.
|
||||
(The exact time depends on your hardware.)
|
||||
If you wish to check the database without reindexing run electrs with `--no-auto-reindex`.
|
||||
* Make sure to allow accesses to bitcoind from local address, ideally whitelist it using `whitelist=download@127.0.0.1` bitcoind option.
|
||||
Either don't use `maxconnections` bitcoind option or set it to 12 or more.
|
||||
* If you use non-default P2P port for bitcoind adjust `electrs` configuration.
|
||||
* If you still didn't migrate `cookie` electrs option you have to now - see below.
|
||||
|
||||
### Important changes from version older than 0.8.8
|
||||
|
||||
**If you're upgrading from version 0.8.7 to a higher version and used `cookie` option you should change your configuration!**
|
||||
The `cookie` option was deprecated and **will be removed eventually**!
|
||||
If you had actual cookie (from `~/bitcoin/.cookie` file) specified in `cookie` option, this was wrong as it wouldn't get updated when needed.
|
||||
It's strongly recommended to use proper cookie authentication using `cookie_file`.
|
||||
If you really have to use fixed username and password, explicitly specified in `bitcoind` config, use `auth` option instead.
|
||||
Users of `btc-rpc-proxy` using `public:public` need to use `auth` too.
|
||||
You can read [a detailed explanation of cookie deprecation with motivation explained](cookie_deprecation.md).
|
||||
|
||||
### General upgrading guide
|
||||
|
||||
As with any other application, you need to remember how you installed `electrs` to upgrade it.
|
||||
If you don't then here's a little help: run `which electrs` and compare the output
|
||||
@ -508,8 +541,8 @@ If a new version of `electrs` is not yet in the package system, try wait a few d
|
||||
If you've deleted it, you need to `git clone` again.
|
||||
2. `git checkout master`
|
||||
3. `git pull`
|
||||
4. Strongly recommended: `git verify-tag v0.8.10` (fix the version number if we've forgotten to update this docs ;)) should show "Good signature from 15C8 C357 4AE4 F1E2 5F3F 35C5 87CA E5FA 4691 7CBB"
|
||||
5. `git checkout v0.8.10`
|
||||
4. Strongly recommended: `git verify-tag v0.9.0` (fix the version number if we've forgotten to update the docs ;)) should show "Good signature from 15C8 C357 4AE4 F1E2 5F3F 35C5 87CA E5FA 4691 7CBB"
|
||||
5. `git checkout v0.9.0`
|
||||
6. If you used static linking: `cargo build --locked --release`.
|
||||
If you used dynamic linking `ROCKSDB_INCLUDE_DIR=/usr/include ROCKSDB_LIB_DIR=/usr/lib cargo build --locked --no-default-features --release`.
|
||||
If you don't remember which linking you used, you probably used static.
|
||||
|
@ -1,29 +0,0 @@
|
||||
/// Benchmark full compaction.
|
||||
extern crate electrs;
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
extern crate error_chain;
|
||||
|
||||
use electrs::{config::Config, errors::*, store::DBStore};
|
||||
|
||||
use error_chain::ChainedError;
|
||||
|
||||
fn run(config: Config) -> Result<()> {
|
||||
if !config.db_path.exists() {
|
||||
panic!(
|
||||
"DB {:?} must exist when running this benchmark!",
|
||||
config.db_path
|
||||
);
|
||||
}
|
||||
let store = DBStore::open(&config.db_path, /*low_memory=*/ true);
|
||||
store.compact();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if let Err(e) = run(Config::from_args()) {
|
||||
error!("{}", e.display_chain());
|
||||
}
|
||||
}
|
@ -1,42 +0,0 @@
|
||||
/// Benchmark regular indexing flow (using JSONRPC), don't persist the resulting index.
|
||||
extern crate electrs;
|
||||
extern crate error_chain;
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use electrs::{
|
||||
cache::BlockTxIDsCache, config::Config, daemon::Daemon, errors::*, fake::FakeStore,
|
||||
index::Index, metrics::Metrics, signal::Waiter,
|
||||
};
|
||||
use error_chain::ChainedError;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn run() -> Result<()> {
|
||||
let signal = Waiter::start();
|
||||
let config = Config::from_args();
|
||||
let metrics = Metrics::new(config.monitoring_addr);
|
||||
metrics.start();
|
||||
let cache = Arc::new(BlockTxIDsCache::new(0, &metrics));
|
||||
|
||||
let daemon = Daemon::new(
|
||||
&config.daemon_dir,
|
||||
&config.blocks_dir,
|
||||
config.daemon_rpc_addr,
|
||||
config.cookie_getter(),
|
||||
config.network_type,
|
||||
signal.clone(),
|
||||
cache,
|
||||
&metrics,
|
||||
)?;
|
||||
let fake_store = FakeStore {};
|
||||
let index = Index::load(&fake_store, &daemon, &metrics, config.index_batch_size)?;
|
||||
index.update(&fake_store, &signal)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if let Err(e) = run() {
|
||||
error!("{}", e.display_chain());
|
||||
}
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
extern crate electrs;
|
||||
|
||||
extern crate hex;
|
||||
extern crate log;
|
||||
|
||||
use electrs::{config::Config, store::DBStore};
|
||||
|
||||
fn max_collision(store: DBStore, prefix: &[u8]) {
|
||||
let prefix_len = prefix.len();
|
||||
let mut prev: Option<Vec<u8>> = None;
|
||||
let mut collision_max = 0;
|
||||
|
||||
for row in store.iter_scan(prefix) {
|
||||
assert!(row.key.starts_with(prefix));
|
||||
if let Some(prev) = prev {
|
||||
let collision_len = prev
|
||||
.iter()
|
||||
.zip(row.key.iter())
|
||||
.take_while(|(a, b)| a == b)
|
||||
.count();
|
||||
if collision_len > collision_max {
|
||||
eprintln!(
|
||||
"{} bytes collision found:\n{:?}\n{:?}\n",
|
||||
collision_len - prefix_len,
|
||||
revhex(&prev[prefix_len..]),
|
||||
revhex(&row.key[prefix_len..]),
|
||||
);
|
||||
collision_max = collision_len;
|
||||
}
|
||||
}
|
||||
prev = Some(row.key.to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
fn revhex(value: &[u8]) -> String {
|
||||
hex::encode(&value.iter().cloned().rev().collect::<Vec<u8>>())
|
||||
}
|
||||
|
||||
fn run(config: Config) {
|
||||
if !config.db_path.exists() {
|
||||
panic!("DB {:?} must exist when running this tool!", config.db_path);
|
||||
}
|
||||
let store = DBStore::open(&config.db_path, /*low_memory=*/ false);
|
||||
max_collision(store, b"T");
|
||||
}
|
||||
|
||||
fn main() {
|
||||
run(Config::from_args());
|
||||
}
|
@ -17,6 +17,11 @@ count = true
|
||||
name = "timestamp"
|
||||
doc = "Prepend log lines with a timestamp"
|
||||
|
||||
[[switch]]
|
||||
name = "auto_reindex"
|
||||
doc = "Automatically reindex the database if it's inconsistent or in old format"
|
||||
default = true
|
||||
|
||||
[[param]]
|
||||
name = "db_dir"
|
||||
type = "std::path::PathBuf"
|
||||
@ -29,19 +34,6 @@ type = "std::path::PathBuf"
|
||||
doc = "Data directory of Bitcoind (default: ~/.bitcoin/)"
|
||||
default = "crate::config::default_daemon_dir()"
|
||||
|
||||
[[param]]
|
||||
name = "blocks_dir"
|
||||
type = "std::path::PathBuf"
|
||||
doc = "Analogous to bitcoind's -blocksdir option, this specifies the directory containing the raw blocks files (blk*.dat)"
|
||||
|
||||
[[param]]
|
||||
name = "cookie"
|
||||
type = "String"
|
||||
doc = "DEPRECATED: use cookie_file or auth instead!"
|
||||
# Force the user to use config file in order to avoid password leaks
|
||||
argument = false
|
||||
env_var = false
|
||||
|
||||
[[param]]
|
||||
name = "auth"
|
||||
type = "String"
|
||||
@ -72,16 +64,16 @@ doc = "Electrum server JSONRPC 'addr:port' to listen on (default: '127.0.0.1:500
|
||||
name = "daemon_rpc_addr"
|
||||
type = "crate::config::ResolvAddr"
|
||||
doc = "Bitcoin daemon JSONRPC 'addr:port' to connect (default: 127.0.0.1:8332 for mainnet, 127.0.0.1:18332 for testnet, 127.0.0.1:18443 for regtest and 127.0.0.1:18554 for signet)"
|
||||
[[param]]
|
||||
name = "daemon_p2p_addr"
|
||||
type = "crate::config::ResolvAddr"
|
||||
doc = "Bitcoin daemon p2p 'addr:port' to connect (default: 127.0.0.1:8333 for mainnet, 127.0.0.1:18333 for testnet, 127.0.0.1:18444 for regtest and 127.0.0.1:38333 for signet)"
|
||||
|
||||
[[param]]
|
||||
name = "monitoring_addr"
|
||||
type = "crate::config::ResolvAddr"
|
||||
doc = "Prometheus monitoring 'addr:port' to listen on (default: 127.0.0.1:4224 for mainnet, 127.0.0.1:14224 for testnet, 127.0.0.1:24224 for regtest and 127.0.0.1:34224 for regtest)"
|
||||
|
||||
[[switch]]
|
||||
name = "jsonrpc_import"
|
||||
doc = "Use JSONRPC instead of directly importing blk*.dat files. Useful for remote full node or low memory system"
|
||||
|
||||
[[param]]
|
||||
name = "wait_duration_secs"
|
||||
type = "u64"
|
||||
@ -94,29 +86,19 @@ type = "usize"
|
||||
doc = "Number of blocks to get in one JSONRPC request from bitcoind"
|
||||
default = "10"
|
||||
|
||||
[[param]]
|
||||
name = "bulk_index_threads"
|
||||
type = "usize"
|
||||
doc = "Number of threads used for bulk indexing (default: use the # of CPUs)"
|
||||
default = "0"
|
||||
[[switch]]
|
||||
name = "ignore_mempool"
|
||||
doc = "Don't sync mempool - queries will show only confirmed transactions."
|
||||
|
||||
[[switch]]
|
||||
name = "sync_once"
|
||||
doc = "Exit after the initial sync is over (don't start Electrum server)."
|
||||
|
||||
[[param]]
|
||||
name = "tx_cache_size_mb"
|
||||
type = "f32"
|
||||
doc = "Total size of transactions to cache (MB)"
|
||||
default = "10.0"
|
||||
|
||||
[[param]]
|
||||
name = "blocktxids_cache_size_mb"
|
||||
type = "f32"
|
||||
doc = "Total size of block transactions IDs to cache (in MB)"
|
||||
default = "10.0"
|
||||
|
||||
[[param]]
|
||||
name = "txid_limit"
|
||||
name = "index_lookup_limit"
|
||||
type = "usize"
|
||||
doc = "Number of transactions to lookup before returning an error, to prevent 'too popular' addresses from causing the RPC server to get stuck (0 - disable the limit)"
|
||||
default = "100"
|
||||
default = "200"
|
||||
|
||||
[[param]]
|
||||
name = "server_banner"
|
||||
|
@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eu
|
||||
trap 'kill $(jobs -p)' EXIT
|
||||
|
||||
DELAY=5
|
||||
LOG=/tmp/electrs.log
|
||||
CARGO="cargo +stable"
|
||||
|
||||
tail -v -n0 -F "$LOG" &
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
while :
|
||||
do
|
||||
$CARGO fmt
|
||||
$CARGO check --release
|
||||
$CARGO run --release -- $* 2>> "$LOG"
|
||||
echo "Restarting in $DELAY seconds..."
|
||||
sleep $DELAY
|
||||
done
|
15
server.sh
Executable file
15
server.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
cd `dirname $0`
|
||||
|
||||
cargo fmt --all
|
||||
cargo build --all --release
|
||||
|
||||
NETWORK=$1
|
||||
shift
|
||||
|
||||
DB=./db
|
||||
export RUST_LOG=${RUST_LOG-electrs=INFO}
|
||||
target/release/electrs --network $NETWORK --db-dir $DB --daemon-dir $HOME/.bitcoin $*
|
||||
|
||||
# use SIGINT to quit
|
60
src/app.rs
60
src/app.rs
@ -1,60 +0,0 @@
|
||||
use bitcoin::hash_types::BlockHash;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use crate::{config::Config, daemon, errors::*, index, signal::Waiter, store};
|
||||
|
||||
pub struct App {
|
||||
store: store::DBStore,
|
||||
index: index::Index,
|
||||
daemon: daemon::Daemon,
|
||||
banner: String,
|
||||
tip: Mutex<BlockHash>,
|
||||
}
|
||||
|
||||
impl App {
|
||||
pub fn new(
|
||||
store: store::DBStore,
|
||||
index: index::Index,
|
||||
daemon: daemon::Daemon,
|
||||
config: &Config,
|
||||
) -> Result<Arc<App>> {
|
||||
Ok(Arc::new(App {
|
||||
store,
|
||||
index,
|
||||
daemon: daemon.reconnect()?,
|
||||
banner: config.server_banner.clone(),
|
||||
tip: Mutex::new(BlockHash::default()),
|
||||
}))
|
||||
}
|
||||
|
||||
fn write_store(&self) -> &impl store::WriteStore {
|
||||
&self.store
|
||||
}
|
||||
// TODO: use index for queries.
|
||||
pub fn read_store(&self) -> &dyn store::ReadStore {
|
||||
&self.store
|
||||
}
|
||||
pub fn index(&self) -> &index::Index {
|
||||
&self.index
|
||||
}
|
||||
pub fn daemon(&self) -> &daemon::Daemon {
|
||||
&self.daemon
|
||||
}
|
||||
|
||||
pub fn update(&self, signal: &Waiter) -> Result<bool> {
|
||||
let mut tip = self.tip.lock().expect("failed to lock tip");
|
||||
let new_block = *tip != self.daemon().getbestblockhash()?;
|
||||
if new_block {
|
||||
*tip = self.index().update(self.write_store(), signal)?;
|
||||
}
|
||||
Ok(new_block)
|
||||
}
|
||||
|
||||
pub fn get_banner(&self) -> Result<String> {
|
||||
Ok(format!(
|
||||
"{}\n{}",
|
||||
self.banner,
|
||||
self.daemon.get_subversion()?
|
||||
))
|
||||
}
|
||||
}
|
@ -1,89 +1,17 @@
|
||||
extern crate electrs;
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
extern crate error_chain;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
use anyhow::{Context, Result};
|
||||
use electrs::{server, Config, Daemon, Rpc, Tracker};
|
||||
|
||||
use error_chain::ChainedError;
|
||||
use std::process;
|
||||
use std::sync::Arc;
|
||||
|
||||
use electrs::{
|
||||
app::App,
|
||||
bulk,
|
||||
cache::{BlockTxIDsCache, TransactionCache},
|
||||
config::Config,
|
||||
daemon::Daemon,
|
||||
errors::*,
|
||||
index::Index,
|
||||
metrics::Metrics,
|
||||
query::Query,
|
||||
rpc::RPC,
|
||||
signal::Waiter,
|
||||
store::{full_compaction, is_fully_compacted, DBStore},
|
||||
};
|
||||
|
||||
fn run_server(config: &Config) -> Result<()> {
|
||||
let signal = Waiter::start();
|
||||
let metrics = Metrics::new(config.monitoring_addr);
|
||||
metrics.start();
|
||||
let blocktxids_cache = Arc::new(BlockTxIDsCache::new(config.blocktxids_cache_size, &metrics));
|
||||
|
||||
let daemon = Daemon::new(
|
||||
&config.daemon_dir,
|
||||
&config.blocks_dir,
|
||||
config.daemon_rpc_addr,
|
||||
config.cookie_getter(),
|
||||
config.network_type,
|
||||
signal.clone(),
|
||||
blocktxids_cache,
|
||||
&metrics,
|
||||
)?;
|
||||
// Perform initial indexing from local blk*.dat block files.
|
||||
let store = DBStore::open(&config.db_path, /*low_memory=*/ config.jsonrpc_import);
|
||||
let index = Index::load(&store, &daemon, &metrics, config.index_batch_size)?;
|
||||
let store = if is_fully_compacted(&store) {
|
||||
store // initial import and full compaction are over
|
||||
} else if config.jsonrpc_import {
|
||||
index.update(&store, &signal)?; // slower: uses JSONRPC for fetching blocks
|
||||
full_compaction(store)
|
||||
} else {
|
||||
// faster, but uses more memory
|
||||
let store =
|
||||
bulk::index_blk_files(&daemon, config.bulk_index_threads, &metrics, &signal, store)?;
|
||||
let store = full_compaction(store);
|
||||
index.reload(&store); // make sure the block header index is up-to-date
|
||||
store
|
||||
}
|
||||
.enable_compaction(); // enable auto compactions before starting incremental index updates.
|
||||
|
||||
let app = App::new(store, index, daemon, &config)?;
|
||||
let tx_cache = TransactionCache::new(config.tx_cache_size, &metrics);
|
||||
let query = Query::new(app.clone(), &metrics, tx_cache, config.txid_limit);
|
||||
let relayfee = query.get_relayfee()?;
|
||||
debug!("relayfee: {} BTC", relayfee);
|
||||
|
||||
let mut server = None; // Electrum RPC server
|
||||
loop {
|
||||
app.update(&signal)?;
|
||||
query.update_mempool()?;
|
||||
server
|
||||
.get_or_insert_with(|| {
|
||||
RPC::start(config.electrum_rpc_addr, query.clone(), &metrics, relayfee)
|
||||
})
|
||||
.notify(); // update subscribed clients
|
||||
if let Err(err) = signal.wait(config.wait_duration) {
|
||||
info!("stopping server: {}", err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
fn main() -> Result<()> {
|
||||
let config = Config::from_args();
|
||||
if let Err(e) = run_server(&config) {
|
||||
error!("server failed: {}", e.display_chain());
|
||||
process::exit(1);
|
||||
let mut tracker = Tracker::new(&config)?;
|
||||
tracker
|
||||
.sync(&Daemon::connect(&config)?)
|
||||
.context("initial sync failed")?;
|
||||
if config.sync_once {
|
||||
return Ok(());
|
||||
}
|
||||
// re-connect after initial sync (due to possible timeout during compaction)
|
||||
server::run(&config, Rpc::new(&config, tracker)?).context("server failed")
|
||||
}
|
||||
|
289
src/bulk.rs
289
src/bulk.rs
@ -1,289 +0,0 @@
|
||||
use bitcoin::blockdata::block::Block;
|
||||
use bitcoin::consensus::encode::{deserialize, Decodable};
|
||||
use bitcoin::hash_types::BlockHash;
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
use std::io::Cursor;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{
|
||||
mpsc::{Receiver, SyncSender},
|
||||
Arc, Mutex,
|
||||
};
|
||||
use std::thread;
|
||||
|
||||
use crate::daemon::Daemon;
|
||||
use crate::errors::*;
|
||||
use crate::index::{index_block, last_indexed_block, read_indexed_blockhashes};
|
||||
use crate::metrics::{CounterVec, Histogram, HistogramOpts, HistogramVec, MetricOpts, Metrics};
|
||||
use crate::signal::Waiter;
|
||||
use crate::store::{DBStore, Row, WriteStore};
|
||||
use crate::util::{spawn_thread, HeaderList, SyncChannel};
|
||||
|
||||
struct Parser {
|
||||
magic: u32,
|
||||
current_headers: HeaderList,
|
||||
indexed_blockhashes: Mutex<HashSet<BlockHash>>,
|
||||
// metrics
|
||||
duration: HistogramVec,
|
||||
block_count: CounterVec,
|
||||
bytes_read: Histogram,
|
||||
}
|
||||
|
||||
impl Parser {
|
||||
fn new(
|
||||
daemon: &Daemon,
|
||||
metrics: &Metrics,
|
||||
indexed_blockhashes: HashSet<BlockHash>,
|
||||
) -> Result<Arc<Parser>> {
|
||||
Ok(Arc::new(Parser {
|
||||
magic: daemon.magic(),
|
||||
current_headers: load_headers(daemon)?,
|
||||
indexed_blockhashes: Mutex::new(indexed_blockhashes),
|
||||
duration: metrics.histogram_vec(
|
||||
HistogramOpts::new(
|
||||
"electrs_parse_duration",
|
||||
"blk*.dat parsing duration (in seconds)",
|
||||
),
|
||||
&["step"],
|
||||
),
|
||||
block_count: metrics.counter_vec(
|
||||
MetricOpts::new("electrs_parse_blocks", "# of block parsed (from blk*.dat)"),
|
||||
&["type"],
|
||||
),
|
||||
|
||||
bytes_read: metrics.histogram(HistogramOpts::new(
|
||||
"electrs_parse_bytes_read",
|
||||
"# of bytes read (from blk*.dat)",
|
||||
)),
|
||||
}))
|
||||
}
|
||||
|
||||
fn last_indexed_row(&self) -> Row {
|
||||
// TODO: use JSONRPC for missing blocks, and don't use 'L' row at all.
|
||||
let indexed_blockhashes = self.indexed_blockhashes.lock().unwrap();
|
||||
let last_header = self
|
||||
.current_headers
|
||||
.iter()
|
||||
.take_while(|h| indexed_blockhashes.contains(h.hash()))
|
||||
.last()
|
||||
.expect("no indexed header found");
|
||||
debug!("last indexed block: {:?}", last_header);
|
||||
last_indexed_block(last_header.hash())
|
||||
}
|
||||
|
||||
fn read_blkfile(&self, path: &Path) -> Result<Vec<u8>> {
|
||||
let timer = self.duration.with_label_values(&["read"]).start_timer();
|
||||
let blob = fs::read(&path).chain_err(|| format!("failed to read {:?}", path))?;
|
||||
timer.observe_duration();
|
||||
self.bytes_read.observe(blob.len() as f64);
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
fn index_blkfile(&self, blob: Vec<u8>) -> Result<Vec<Row>> {
|
||||
let timer = self.duration.with_label_values(&["parse"]).start_timer();
|
||||
let blocks = parse_blocks(blob, self.magic)?;
|
||||
timer.observe_duration();
|
||||
|
||||
let mut rows = Vec::<Row>::new();
|
||||
let timer = self.duration.with_label_values(&["index"]).start_timer();
|
||||
for block in blocks {
|
||||
let blockhash = block.block_hash();
|
||||
if let Some(header) = self.current_headers.header_by_blockhash(&blockhash) {
|
||||
if self
|
||||
.indexed_blockhashes
|
||||
.lock()
|
||||
.expect("indexed_blockhashes")
|
||||
.insert(blockhash)
|
||||
{
|
||||
rows.extend(index_block(&block, header.height()));
|
||||
self.block_count.with_label_values(&["indexed"]).inc();
|
||||
} else {
|
||||
self.block_count.with_label_values(&["duplicate"]).inc();
|
||||
}
|
||||
} else {
|
||||
// will be indexed later (after bulk load is over) if not an orphan block
|
||||
self.block_count.with_label_values(&["skipped"]).inc();
|
||||
}
|
||||
}
|
||||
timer.observe_duration();
|
||||
|
||||
let timer = self.duration.with_label_values(&["sort"]).start_timer();
|
||||
rows.sort_unstable_by(|a, b| a.key.cmp(&b.key));
|
||||
timer.observe_duration();
|
||||
Ok(rows)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_blocks(blob: Vec<u8>, magic: u32) -> Result<Vec<Block>> {
|
||||
let mut cursor = Cursor::new(&blob);
|
||||
let mut blocks = vec![];
|
||||
let max_pos = blob.len() as u64;
|
||||
while cursor.position() < max_pos {
|
||||
let offset = cursor.position();
|
||||
match u32::consensus_decode(&mut cursor) {
|
||||
Ok(value) => {
|
||||
if magic != value {
|
||||
cursor.set_position(offset + 1);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(_) => break, // EOF
|
||||
};
|
||||
let block_size = u32::consensus_decode(&mut cursor).chain_err(|| "no block size")?;
|
||||
let start = cursor.position();
|
||||
let end = start + block_size as u64;
|
||||
|
||||
// If Core's WriteBlockToDisk ftell fails, only the magic bytes and size will be written
|
||||
// and the block body won't be written to the blk*.dat file.
|
||||
// Since the first 4 bytes should contain the block's version, we can skip such blocks
|
||||
// by peeking the cursor (and skipping previous `magic` and `block_size`).
|
||||
match u32::consensus_decode(&mut cursor) {
|
||||
Ok(value) => {
|
||||
if magic == value {
|
||||
cursor.set_position(start);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(_) => break, // EOF
|
||||
}
|
||||
let block: Block = deserialize(&blob[start as usize..end as usize])
|
||||
.chain_err(|| format!("failed to parse block at {}..{}", start, end))?;
|
||||
blocks.push(block);
|
||||
cursor.set_position(end as u64);
|
||||
}
|
||||
Ok(blocks)
|
||||
}
|
||||
|
||||
fn load_headers(daemon: &Daemon) -> Result<HeaderList> {
|
||||
let tip = daemon.getbestblockhash()?;
|
||||
let mut headers = HeaderList::empty();
|
||||
let new_headers = headers.order(daemon.get_new_headers(&headers, &tip)?);
|
||||
headers.apply(new_headers, tip);
|
||||
Ok(headers)
|
||||
}
|
||||
|
||||
fn set_open_files_limit(limit: libc::rlim_t) {
|
||||
let resource = libc::RLIMIT_NOFILE;
|
||||
let mut rlim = libc::rlimit {
|
||||
rlim_cur: 0,
|
||||
rlim_max: 0,
|
||||
};
|
||||
let result = unsafe { libc::getrlimit(resource, &mut rlim) };
|
||||
if result < 0 {
|
||||
panic!("getrlimit() failed: {}", result);
|
||||
}
|
||||
rlim.rlim_cur = limit; // set softs limit only.
|
||||
let result = unsafe { libc::setrlimit(resource, &rlim) };
|
||||
if result < 0 {
|
||||
panic!("setrlimit() failed: {}", result);
|
||||
}
|
||||
}
|
||||
|
||||
type JoinHandle = thread::JoinHandle<Result<()>>;
|
||||
type BlobReceiver = Arc<Mutex<Receiver<(Vec<u8>, PathBuf)>>>;
|
||||
|
||||
fn start_reader(blk_files: Vec<PathBuf>, parser: Arc<Parser>) -> (BlobReceiver, JoinHandle) {
|
||||
let chan = SyncChannel::new(0);
|
||||
let blobs = chan.sender();
|
||||
let handle = spawn_thread("bulk_read", move || -> Result<()> {
|
||||
for path in blk_files {
|
||||
blobs
|
||||
.send((parser.read_blkfile(&path)?, path))
|
||||
.expect("failed to send blk*.dat contents");
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
(Arc::new(Mutex::new(chan.into_receiver())), handle)
|
||||
}
|
||||
|
||||
fn start_indexer(
|
||||
blobs: BlobReceiver,
|
||||
parser: Arc<Parser>,
|
||||
writer: SyncSender<(Vec<Row>, PathBuf)>,
|
||||
) -> JoinHandle {
|
||||
spawn_thread("bulk_index", move || -> Result<()> {
|
||||
loop {
|
||||
let msg = blobs.lock().unwrap().recv();
|
||||
if let Ok((blob, path)) = msg {
|
||||
let rows = parser
|
||||
.index_blkfile(blob)
|
||||
.chain_err(|| format!("failed to index {:?}", path))?;
|
||||
writer
|
||||
.send((rows, path))
|
||||
.expect("failed to send indexed rows")
|
||||
} else {
|
||||
debug!("no more blocks to index");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn index_blk_files(
|
||||
daemon: &Daemon,
|
||||
index_threads: usize,
|
||||
metrics: &Metrics,
|
||||
signal: &Waiter,
|
||||
store: DBStore,
|
||||
) -> Result<DBStore> {
|
||||
set_open_files_limit(2048); // twice the default `ulimit -n` value
|
||||
let blk_files = daemon.list_blk_files()?;
|
||||
info!("indexing {} blk*.dat files", blk_files.len());
|
||||
let indexed_blockhashes = read_indexed_blockhashes(&store);
|
||||
debug!("found {} indexed blocks", indexed_blockhashes.len());
|
||||
let parser = Parser::new(daemon, metrics, indexed_blockhashes)?;
|
||||
let (blobs, reader) = start_reader(blk_files, parser.clone());
|
||||
let rows_chan = SyncChannel::new(0);
|
||||
let indexers: Vec<JoinHandle> = (0..index_threads)
|
||||
.map(|_| start_indexer(blobs.clone(), parser.clone(), rows_chan.sender()))
|
||||
.collect();
|
||||
|
||||
for (rows, path) in rows_chan.into_receiver() {
|
||||
trace!("indexed {:?}: {} rows", path, rows.len());
|
||||
store.write(rows);
|
||||
signal
|
||||
.poll()
|
||||
.chain_err(|| "stopping bulk indexing due to signal")?;
|
||||
}
|
||||
reader
|
||||
.join()
|
||||
.expect("reader panicked")
|
||||
.expect("reader failed");
|
||||
|
||||
indexers.into_iter().for_each(|i| {
|
||||
i.join()
|
||||
.expect("indexer panicked")
|
||||
.expect("indexing failed")
|
||||
});
|
||||
store.write(vec![parser.last_indexed_row()]);
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::*;
|
||||
use bitcoin::hashes::Hash;
|
||||
use hex::decode as hex_decode;
|
||||
|
||||
#[test]
|
||||
fn test_incomplete_block_parsing() {
|
||||
let magic = 0x0709110b;
|
||||
let raw_blocks = hex_decode(fixture("incomplete_block.hex")).unwrap();
|
||||
let blocks = parse_blocks(raw_blocks, magic).unwrap();
|
||||
assert_eq!(blocks.len(), 2);
|
||||
assert_eq!(
|
||||
blocks[1].block_hash().into_inner().to_vec(),
|
||||
hex_decode("d55acd552414cc44a761e8d6b64a4d555975e208397281d115336fc500000000").unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
pub fn fixture(filename: &str) -> String {
|
||||
let path = Path::new("src")
|
||||
.join("tests")
|
||||
.join("fixtures")
|
||||
.join(filename);
|
||||
fs::read_to_string(path).unwrap()
|
||||
}
|
||||
}
|
288
src/cache.rs
288
src/cache.rs
@ -1,273 +1,43 @@
|
||||
use crate::errors::*;
|
||||
use crate::metrics::{CounterVec, MetricOpts, Metrics};
|
||||
use bitcoin::{BlockHash, Transaction, Txid};
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use bitcoin::blockdata::transaction::Transaction;
|
||||
use bitcoin::consensus::encode::deserialize;
|
||||
use bitcoin::hash_types::{BlockHash, Txid};
|
||||
use lru::LruCache;
|
||||
use prometheus::IntGauge;
|
||||
use std::hash::Hash;
|
||||
use std::sync::Mutex;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
struct SizedLruCache<K, V> {
|
||||
map: LruCache<K, (V, usize)>,
|
||||
bytes_usage: usize,
|
||||
bytes_capacity: usize,
|
||||
lookups: CounterVec,
|
||||
usage: IntGauge,
|
||||
use crate::merkle::Proof;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Cache {
|
||||
txs: Arc<RwLock<HashMap<Txid, Transaction>>>,
|
||||
proofs: Arc<RwLock<HashMap<(BlockHash, Txid), Proof>>>,
|
||||
}
|
||||
|
||||
impl<K: Hash + Eq, V> SizedLruCache<K, V> {
|
||||
fn new(bytes_capacity: usize, lookups: CounterVec, usage: IntGauge) -> SizedLruCache<K, V> {
|
||||
SizedLruCache {
|
||||
map: LruCache::unbounded(),
|
||||
bytes_usage: 0,
|
||||
bytes_capacity,
|
||||
lookups,
|
||||
usage,
|
||||
}
|
||||
impl Cache {
|
||||
pub(crate) fn add_tx(&self, txid: Txid, f: impl FnOnce() -> Transaction) {
|
||||
self.txs.write().entry(txid).or_insert_with(f);
|
||||
}
|
||||
|
||||
fn get(&mut self, key: &K) -> Option<&V> {
|
||||
match self.map.get(key) {
|
||||
None => {
|
||||
self.lookups.with_label_values(&["miss"]).inc();
|
||||
None
|
||||
}
|
||||
Some((value, _)) => {
|
||||
self.lookups.with_label_values(&["hit"]).inc();
|
||||
Some(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn put(&mut self, key: K, value: V, byte_size: usize) {
|
||||
if byte_size > self.bytes_capacity {
|
||||
return;
|
||||
}
|
||||
if let Some((_, popped_size)) = self.map.put(key, (value, byte_size)) {
|
||||
self.bytes_usage -= popped_size
|
||||
}
|
||||
self.bytes_usage += byte_size;
|
||||
|
||||
while self.bytes_usage > self.bytes_capacity {
|
||||
match self.map.pop_lru() {
|
||||
Some((_, (_, popped_size))) => self.bytes_usage -= popped_size,
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
self.usage.set(self.bytes_usage as i64);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BlockTxIDsCache {
|
||||
map: Mutex<SizedLruCache<BlockHash, Vec<Txid>>>,
|
||||
}
|
||||
|
||||
impl BlockTxIDsCache {
|
||||
pub fn new(bytes_capacity: usize, metrics: &Metrics) -> BlockTxIDsCache {
|
||||
let lookups = metrics.counter_vec(
|
||||
MetricOpts::new(
|
||||
"electrs_blocktxids_cache",
|
||||
"# of cache lookups for list of transactions in a block",
|
||||
),
|
||||
&["type"],
|
||||
);
|
||||
let usage = metrics.gauge_int(MetricOpts::new(
|
||||
"electrs_blocktxids_cache_size",
|
||||
"Cache usage for list of transactions in a block (bytes)",
|
||||
));
|
||||
BlockTxIDsCache {
|
||||
map: Mutex::new(SizedLruCache::new(bytes_capacity, lookups, usage)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_or_else<F>(&self, blockhash: &BlockHash, load_txids_func: F) -> Result<Vec<Txid>>
|
||||
pub(crate) fn get_tx<F, T>(&self, txid: &Txid, f: F) -> Option<T>
|
||||
where
|
||||
F: FnOnce() -> Result<Vec<Txid>>,
|
||||
F: FnOnce(&Transaction) -> T,
|
||||
{
|
||||
if let Some(txids) = self.map.lock().unwrap().get(blockhash) {
|
||||
return Ok(txids.clone());
|
||||
}
|
||||
|
||||
let txids = load_txids_func()?;
|
||||
let byte_size = 32 /* hash size */ * (1 /* key */ + txids.len() /* values */);
|
||||
self.map
|
||||
.lock()
|
||||
.unwrap()
|
||||
.put(*blockhash, txids.clone(), byte_size);
|
||||
Ok(txids)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TransactionCache {
|
||||
// Store serialized transaction (should use less RAM).
|
||||
map: Mutex<SizedLruCache<Txid, Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl TransactionCache {
|
||||
pub fn new(bytes_capacity: usize, metrics: &Metrics) -> TransactionCache {
|
||||
let lookups = metrics.counter_vec(
|
||||
MetricOpts::new(
|
||||
"electrs_transactions_cache",
|
||||
"# of cache lookups for transactions",
|
||||
),
|
||||
&["type"],
|
||||
);
|
||||
let usage = metrics.gauge_int(MetricOpts::new(
|
||||
"electrs_transactions_cache_size",
|
||||
"Cache usage for list of transactions (bytes)",
|
||||
));
|
||||
TransactionCache {
|
||||
map: Mutex::new(SizedLruCache::new(bytes_capacity, lookups, usage)),
|
||||
}
|
||||
self.txs.read().get(txid).map(f)
|
||||
}
|
||||
|
||||
pub fn get_or_else<F>(&self, txid: &Txid, load_txn_func: F) -> Result<Transaction>
|
||||
pub(crate) fn add_proof<F>(&self, blockhash: BlockHash, txid: Txid, f: F)
|
||||
where
|
||||
F: FnOnce() -> Result<Vec<u8>>,
|
||||
F: FnOnce() -> Proof,
|
||||
{
|
||||
if let Some(serialized_txn) = self.map.lock().unwrap().get(txid) {
|
||||
return deserialize(serialized_txn).chain_err(|| "failed to parse cached tx");
|
||||
}
|
||||
let serialized_txn = load_txn_func()?;
|
||||
let txn = deserialize(&serialized_txn).chain_err(|| "failed to parse serialized tx")?;
|
||||
let byte_size = 32 /* key (hash size) */ + serialized_txn.len();
|
||||
self.map
|
||||
.lock()
|
||||
.unwrap()
|
||||
.put(*txid, serialized_txn, byte_size);
|
||||
Ok(txn)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bitcoin::hashes::Hash;
|
||||
|
||||
#[test]
|
||||
fn test_sized_lru_cache_hit_and_miss() {
|
||||
let counter = CounterVec::new(prometheus::Opts::new("name", "help"), &["type"]).unwrap();
|
||||
let usage = IntGauge::new("usage", "help").unwrap();
|
||||
let mut cache = SizedLruCache::<i8, i32>::new(100, counter.clone(), usage.clone());
|
||||
assert_eq!(counter.with_label_values(&["miss"]).get(), 0);
|
||||
assert_eq!(counter.with_label_values(&["hit"]).get(), 0);
|
||||
assert_eq!(usage.get(), 0);
|
||||
|
||||
assert_eq!(cache.get(&1), None); // no such key
|
||||
assert_eq!(counter.with_label_values(&["miss"]).get(), 1);
|
||||
assert_eq!(counter.with_label_values(&["hit"]).get(), 0);
|
||||
assert_eq!(usage.get(), 0);
|
||||
|
||||
cache.put(1, 10, 50); // add new key-value
|
||||
assert_eq!(cache.get(&1), Some(&10));
|
||||
assert_eq!(counter.with_label_values(&["miss"]).get(), 1);
|
||||
assert_eq!(counter.with_label_values(&["hit"]).get(), 1);
|
||||
assert_eq!(usage.get(), 50);
|
||||
|
||||
cache.put(3, 30, 50); // drop oldest key (1)
|
||||
cache.put(2, 20, 50);
|
||||
assert_eq!(cache.get(&1), None);
|
||||
assert_eq!(cache.get(&2), Some(&20));
|
||||
assert_eq!(cache.get(&3), Some(&30));
|
||||
assert_eq!(counter.with_label_values(&["miss"]).get(), 2);
|
||||
assert_eq!(counter.with_label_values(&["hit"]).get(), 3);
|
||||
assert_eq!(usage.get(), 100);
|
||||
|
||||
cache.put(3, 33, 50); // replace existing value
|
||||
assert_eq!(cache.get(&1), None);
|
||||
assert_eq!(cache.get(&2), Some(&20));
|
||||
assert_eq!(cache.get(&3), Some(&33));
|
||||
assert_eq!(counter.with_label_values(&["miss"]).get(), 3);
|
||||
assert_eq!(counter.with_label_values(&["hit"]).get(), 5);
|
||||
assert_eq!(usage.get(), 100);
|
||||
|
||||
cache.put(9, 90, 9999); // larger than cache capacity, don't drop the cache
|
||||
assert_eq!(cache.get(&1), None);
|
||||
assert_eq!(cache.get(&2), Some(&20));
|
||||
assert_eq!(cache.get(&3), Some(&33));
|
||||
assert_eq!(cache.get(&9), None);
|
||||
assert_eq!(counter.with_label_values(&["miss"]).get(), 5);
|
||||
assert_eq!(counter.with_label_values(&["hit"]).get(), 7);
|
||||
assert_eq!(usage.get(), 100);
|
||||
}
|
||||
|
||||
fn gen_hash<T: Hash>(seed: u8) -> T {
|
||||
let bytes: Vec<u8> = (seed..seed + 32).collect();
|
||||
<T as Hash>::hash(&bytes[..])
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blocktxids_cache_hit_and_miss() {
|
||||
let block1: BlockHash = gen_hash(1);
|
||||
let block2: BlockHash = gen_hash(2);
|
||||
let block3: BlockHash = gen_hash(3);
|
||||
let txids: Vec<Txid> = vec![gen_hash(4), gen_hash(5)];
|
||||
|
||||
let misses: Mutex<usize> = Mutex::new(0);
|
||||
let miss_func = || {
|
||||
*misses.lock().unwrap() += 1;
|
||||
Ok(txids.clone())
|
||||
};
|
||||
|
||||
let dummy_metrics = Metrics::new("127.0.0.1:60000".parse().unwrap());
|
||||
// 200 bytes ~ 32 (bytes/hash) * (1 key hash + 2 value hashes) * 2 txns
|
||||
let cache = BlockTxIDsCache::new(200, &dummy_metrics);
|
||||
|
||||
// cache miss
|
||||
let result = cache.get_or_else(&block1, &miss_func).unwrap();
|
||||
assert_eq!(1, *misses.lock().unwrap());
|
||||
assert_eq!(txids, result);
|
||||
|
||||
// cache hit
|
||||
let result = cache.get_or_else(&block1, &miss_func).unwrap();
|
||||
assert_eq!(1, *misses.lock().unwrap());
|
||||
assert_eq!(txids, result);
|
||||
|
||||
// cache size is 200, test that blockhash1 falls out of cache
|
||||
cache.get_or_else(&block2, &miss_func).unwrap();
|
||||
assert_eq!(2, *misses.lock().unwrap());
|
||||
cache.get_or_else(&block3, &miss_func).unwrap();
|
||||
assert_eq!(3, *misses.lock().unwrap());
|
||||
cache.get_or_else(&block1, &miss_func).unwrap();
|
||||
assert_eq!(4, *misses.lock().unwrap());
|
||||
|
||||
// cache hits
|
||||
cache.get_or_else(&block3, &miss_func).unwrap();
|
||||
cache.get_or_else(&block1, &miss_func).unwrap();
|
||||
assert_eq!(4, *misses.lock().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_txn_cache() {
|
||||
use hex;
|
||||
|
||||
let dummy_metrics = Metrics::new("127.0.0.1:60000".parse().unwrap());
|
||||
let cache = TransactionCache::new(1024, &dummy_metrics);
|
||||
let tx_bytes = hex::decode("0100000001a15d57094aa7a21a28cb20b59aab8fc7d1149a3bdbcddba9c622e4f5f6a99ece010000006c493046022100f93bb0e7d8db7bd46e40132d1f8242026e045f03a0efe71bbb8e3f475e970d790221009337cd7f1f929f00cc6ff01f03729b069a7c21b59b1736ddfee5db5946c5da8c0121033b9b137ee87d5a812d6f506efdd37f0affa7ffc310711c06c7f3e097c9447c52ffffffff0100e1f505000000001976a9140389035a9225b3839e2bbf32d826a1e222031fd888ac00000000").unwrap();
|
||||
|
||||
let tx: Transaction = deserialize(&tx_bytes).unwrap();
|
||||
let txid = tx.txid();
|
||||
|
||||
let mut misses = 0;
|
||||
assert_eq!(
|
||||
cache
|
||||
.get_or_else(&txid, || {
|
||||
misses += 1;
|
||||
Ok(tx_bytes.clone())
|
||||
})
|
||||
.unwrap(),
|
||||
tx
|
||||
);
|
||||
assert_eq!(misses, 1);
|
||||
assert_eq!(
|
||||
cache
|
||||
.get_or_else(&txid, || panic!("should not be called"))
|
||||
.unwrap(),
|
||||
tx
|
||||
);
|
||||
assert_eq!(misses, 1);
|
||||
self.proofs
|
||||
.write()
|
||||
.entry((blockhash, txid))
|
||||
.or_insert_with(f);
|
||||
}
|
||||
|
||||
pub(crate) fn get_proof<F, T>(&self, blockhash: BlockHash, txid: Txid, f: F) -> Option<T>
|
||||
where
|
||||
F: FnOnce(&Proof) -> T,
|
||||
{
|
||||
self.proofs.read().get(&(blockhash, txid)).map(f)
|
||||
}
|
||||
}
|
||||
|
240
src/chain.rs
Normal file
240
src/chain.rs
Normal file
@ -0,0 +1,240 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use bitcoin::consensus::deserialize;
|
||||
use bitcoin::hashes::hex::FromHex;
|
||||
use bitcoin::network::constants;
|
||||
use bitcoin::{BlockHash, BlockHeader};
|
||||
|
||||
/// A new header found, to be added to the chain at specific height
|
||||
pub(crate) struct NewHeader {
|
||||
header: BlockHeader,
|
||||
hash: BlockHash,
|
||||
height: usize,
|
||||
}
|
||||
|
||||
impl NewHeader {
|
||||
pub(crate) fn from((header, height): (BlockHeader, usize)) -> Self {
|
||||
Self {
|
||||
header,
|
||||
hash: header.block_hash(),
|
||||
height,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn height(&self) -> usize {
|
||||
self.height
|
||||
}
|
||||
|
||||
pub(crate) fn hash(&self) -> BlockHash {
|
||||
self.hash
|
||||
}
|
||||
}
|
||||
|
||||
/// Current blockchain headers' list
|
||||
pub struct Chain {
|
||||
headers: Vec<(BlockHash, BlockHeader)>,
|
||||
heights: HashMap<BlockHash, usize>,
|
||||
}
|
||||
|
||||
impl Chain {
|
||||
// create an empty chain
|
||||
pub fn new(network: constants::Network) -> Self {
|
||||
let genesis_header_hex = match network {
|
||||
constants::Network::Bitcoin => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c",
|
||||
constants::Network::Testnet => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff001d1aa4ae18",
|
||||
constants::Network::Regtest => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff7f2002000000",
|
||||
constants::Network::Signet => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a008f4d5fae77031e8ad22203",
|
||||
};
|
||||
let genesis_header_bytes = Vec::from_hex(genesis_header_hex).unwrap();
|
||||
let genesis: BlockHeader = deserialize(&genesis_header_bytes).unwrap();
|
||||
assert_eq!(genesis.prev_blockhash, BlockHash::default());
|
||||
Self {
|
||||
headers: vec![(genesis.block_hash(), genesis)],
|
||||
heights: std::iter::once((genesis.block_hash(), 0)).collect(), // genesis header @ zero height
|
||||
}
|
||||
}
|
||||
|
||||
/// Load the chain from a collecion of headers, up to the given tip
|
||||
pub(crate) fn load(&mut self, headers: Vec<BlockHeader>, tip: BlockHash) {
|
||||
let genesis_hash = self.headers[0].0;
|
||||
|
||||
let mut header_map: HashMap<BlockHash, BlockHeader> =
|
||||
headers.into_iter().map(|h| (h.block_hash(), h)).collect();
|
||||
let mut blockhash = tip;
|
||||
let mut new_headers = vec![];
|
||||
while blockhash != genesis_hash {
|
||||
let header = match header_map.remove(&blockhash) {
|
||||
Some(header) => header,
|
||||
None => panic!("missing header {} while loading from DB", blockhash),
|
||||
};
|
||||
blockhash = header.prev_blockhash;
|
||||
new_headers.push(header);
|
||||
}
|
||||
info!("loading {} headers, tip={}", new_headers.len(), tip);
|
||||
let new_headers = new_headers.into_iter().rev(); // order by height
|
||||
self.update(new_headers.zip(1..).map(NewHeader::from).collect())
|
||||
}
|
||||
|
||||
/// Get the block hash at specified height (if exists)
|
||||
pub(crate) fn get_block_hash(&self, height: usize) -> Option<BlockHash> {
|
||||
self.headers.get(height).map(|(hash, _header)| *hash)
|
||||
}
|
||||
|
||||
/// Get the block header at specified height (if exists)
|
||||
pub(crate) fn get_block_header(&self, height: usize) -> Option<&BlockHeader> {
|
||||
self.headers.get(height).map(|(_hash, header)| header)
|
||||
}
|
||||
|
||||
/// Get the block height given the specified hash (if exists)
|
||||
pub(crate) fn get_block_height(&self, blockhash: &BlockHash) -> Option<usize> {
|
||||
self.heights.get(blockhash).copied()
|
||||
}
|
||||
|
||||
/// Update the chain with a list of new headers (possibly a reorg)
|
||||
/// Note that we cannot shorten a chain (e.g. by dropping )
|
||||
pub(crate) fn update(&mut self, headers: Vec<NewHeader>) {
|
||||
if let Some(first_height) = headers.first().map(|h| h.height) {
|
||||
for (hash, _header) in self.headers.drain(first_height..) {
|
||||
assert!(self.heights.remove(&hash).is_some());
|
||||
}
|
||||
for (h, height) in headers.into_iter().zip(first_height..) {
|
||||
assert_eq!(h.height, height);
|
||||
assert_eq!(h.hash, h.header.block_hash());
|
||||
assert!(self.heights.insert(h.hash, h.height).is_none());
|
||||
self.headers.push((h.hash, h.header));
|
||||
}
|
||||
info!(
|
||||
"chain updated: tip={}, height={}",
|
||||
self.headers.last().unwrap().0,
|
||||
self.headers.len() - 1
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Best block hash
|
||||
pub(crate) fn tip(&self) -> BlockHash {
|
||||
self.headers.last().expect("empty chain").0
|
||||
}
|
||||
|
||||
/// Number of blocks (excluding genesis block)
|
||||
pub(crate) fn height(&self) -> usize {
|
||||
self.headers.len() - 1
|
||||
}
|
||||
|
||||
/// List of block hashes for efficient fork detection and block/header sync
|
||||
/// see https://en.bitcoin.it/wiki/Protocol_documentation#getblocks
|
||||
pub(crate) fn locator(&self) -> Vec<BlockHash> {
|
||||
let mut result = vec![];
|
||||
let mut index = self.headers.len() - 1;
|
||||
let mut step = 1;
|
||||
loop {
|
||||
if result.len() >= 10 {
|
||||
step *= 2;
|
||||
}
|
||||
result.push(self.headers[index].0);
|
||||
if index == 0 {
|
||||
break;
|
||||
}
|
||||
index = index.saturating_sub(step);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Chain, NewHeader};
|
||||
use bitcoin::consensus::deserialize;
|
||||
use bitcoin::hashes::hex::{FromHex, ToHex};
|
||||
use bitcoin::network::constants::Network::Regtest;
|
||||
use bitcoin::BlockHeader;
|
||||
|
||||
#[test]
|
||||
fn test_genesis() {
|
||||
let regtest = Chain::new(Regtest);
|
||||
assert_eq!(regtest.height(), 0);
|
||||
assert_eq!(
|
||||
regtest.tip().to_hex(),
|
||||
"0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_updates() {
|
||||
let hex_headers = vec![
|
||||
"0000002006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f1d14d3c7ff12d6adf494ebbcfba69baa915a066358b68a2b8c37126f74de396b1d61cc60ffff7f2000000000",
|
||||
"00000020d700ae5d3c705702e0a5d9ababd22ded079f8a63b880b1866321d6bfcb028c3fc816efcf0e84ccafa1dda26be337f58d41b438170c357cda33a68af5550590bc1e61cc60ffff7f2004000000",
|
||||
"00000020d13731bc59bc0989e06a5e7cab9843a4e17ad65c7ca47cd77f50dfd24f1f55793f7f342526aca9adb6ce8f33d8a07662c97d29d83b9e18117fb3eceecb2ab99b1e61cc60ffff7f2001000000",
|
||||
"00000020a603def3e1255cadfb6df072946327c58b344f9bfb133e8e3e280d1c2d55b31c731a68f70219472864a7cb010cd53dc7e0f67e57f7d08b97e5e092b0c3942ad51f61cc60ffff7f2001000000",
|
||||
"0000002041dd202b3b2edcdd3c8582117376347d48ff79ff97c95e5ac814820462012e785142dc360975b982ca43eecd14b4ba6f019041819d4fc5936255d7a2c45a96651f61cc60ffff7f2000000000",
|
||||
"0000002072e297a2d6b633c44f3c9b1a340d06f3ce4e6bcd79ebd4c4ff1c249a77e1e37c59c7be1ca0964452e1735c0d2740f0d98a11445a6140c36b55770b5c0bcf801f1f61cc60ffff7f2000000000",
|
||||
"000000200c9eb5889a8e924d1c4e8e79a716514579e41114ef37d72295df8869d6718e4ac5840f28de43ff25c7b9200aaf7873b20587c92827eaa61943484ca828bdd2e11f61cc60ffff7f2000000000",
|
||||
"000000205873f322b333933e656b07881bb399dae61a6c0fa74188b5fb0e3dd71c9e2442f9e2f433f54466900407cf6a9f676913dd54aad977f7b05afcd6dcd81e98ee752061cc60ffff7f2004000000",
|
||||
"00000020fd1120713506267f1dba2e1856ca1d4490077d261cde8d3e182677880df0d856bf94cfa5e189c85462813751ab4059643759ed319a81e0617113758f8adf67bc2061cc60ffff7f2000000000",
|
||||
"000000200030d7f9c11ef35b89a0eefb9a5e449909339b5e7854d99804ea8d6a49bf900a0304d2e55fe0b6415949cff9bca0f88c0717884a5e5797509f89f856af93624a2061cc60ffff7f2002000000",
|
||||
];
|
||||
let headers: Vec<BlockHeader> = hex_headers
|
||||
.iter()
|
||||
.map(|hex_header| deserialize(&Vec::from_hex(hex_header).unwrap()).unwrap())
|
||||
.collect();
|
||||
|
||||
for chunk_size in 1..hex_headers.len() {
|
||||
let mut regtest = Chain::new(Regtest);
|
||||
let mut height = 0;
|
||||
let mut tip = regtest.tip();
|
||||
for chunk in headers.chunks(chunk_size) {
|
||||
let mut update = vec![];
|
||||
for header in chunk {
|
||||
height += 1;
|
||||
tip = header.block_hash();
|
||||
update.push(NewHeader::from((*header, height)))
|
||||
}
|
||||
regtest.update(update);
|
||||
assert_eq!(regtest.tip(), tip);
|
||||
assert_eq!(regtest.height(), height);
|
||||
}
|
||||
assert_eq!(regtest.tip(), headers.last().unwrap().block_hash());
|
||||
assert_eq!(regtest.height(), headers.len());
|
||||
}
|
||||
|
||||
// test loading from a list of headers and tip
|
||||
let mut regtest = Chain::new(Regtest);
|
||||
regtest.load(headers.clone(), headers.last().unwrap().block_hash());
|
||||
assert_eq!(regtest.height(), headers.len());
|
||||
|
||||
// test getters
|
||||
for (header, height) in headers.iter().zip(1usize..) {
|
||||
assert_eq!(regtest.get_block_header(height), Some(header));
|
||||
assert_eq!(regtest.get_block_hash(height), Some(header.block_hash()));
|
||||
assert_eq!(regtest.get_block_height(&header.block_hash()), Some(height));
|
||||
}
|
||||
|
||||
// test chain shortening
|
||||
for i in (0..=headers.len()).rev() {
|
||||
let header = *regtest.get_block_header(i).unwrap();
|
||||
let hash = regtest.get_block_hash(i).unwrap();
|
||||
assert_eq!(regtest.get_block_height(&hash), Some(i));
|
||||
regtest.update(vec![NewHeader::from((header, i))]);
|
||||
assert_eq!(regtest.height(), i);
|
||||
assert_eq!(regtest.tip(), hash);
|
||||
}
|
||||
assert_eq!(regtest.height(), 0);
|
||||
assert_eq!(
|
||||
regtest.tip().to_hex(),
|
||||
"0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"
|
||||
);
|
||||
|
||||
// test reorg
|
||||
let mut regtest = Chain::new(Regtest);
|
||||
regtest.load(headers.clone(), headers.last().unwrap().block_hash());
|
||||
let height = regtest.height();
|
||||
|
||||
let new_header: BlockHeader = deserialize(&Vec::from_hex("000000200030d7f9c11ef35b89a0eefb9a5e449909339b5e7854d99804ea8d6a49bf900a0304d2e55fe0b6415949cff9bca0f88c0717884a5e5797509f89f856af93624a7a6bcc60ffff7f2000000000").unwrap()).unwrap();
|
||||
regtest.update(vec![NewHeader::from((new_header, height))]);
|
||||
assert_eq!(regtest.height(), height);
|
||||
assert_eq!(
|
||||
regtest.tip().to_hex(),
|
||||
"0e16637fe0700a7c52e9a6eaa58bd6ac7202652103be8f778680c66f51ad2e9b"
|
||||
);
|
||||
}
|
||||
}
|
294
src/config.rs
294
src/config.rs
@ -1,23 +1,21 @@
|
||||
use bitcoin::network::constants::Network;
|
||||
use bitcoincore_rpc::Auth;
|
||||
use dirs_next::home_dir;
|
||||
use std::convert::TryInto;
|
||||
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::ToSocketAddrs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::daemon::CookieGetter;
|
||||
use crate::errors::*;
|
||||
use std::time::Duration;
|
||||
|
||||
const DEFAULT_SERVER_ADDRESS: [u8; 4] = [127, 0, 0, 1]; // by default, serve on IPv4 localhost
|
||||
|
||||
mod internal {
|
||||
#![allow(unused)]
|
||||
#![allow(clippy::identity_conversion)]
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/configure_me_config.rs"));
|
||||
}
|
||||
@ -108,7 +106,7 @@ impl FromStr for BitcoinNetwork {
|
||||
}
|
||||
|
||||
impl ::configure_me::parse_arg::ParseArgFromStr for BitcoinNetwork {
|
||||
fn describe_type<W: fmt::Write>(mut writer: W) -> std::fmt::Result {
|
||||
fn describe_type<W: fmt::Write>(mut writer: W) -> fmt::Result {
|
||||
write!(writer, "either 'bitcoin', 'testnet', 'regtest' or 'signet'")
|
||||
}
|
||||
}
|
||||
@ -120,25 +118,46 @@ impl From<BitcoinNetwork> for Network {
|
||||
}
|
||||
|
||||
/// Parsed and post-processed configuration
|
||||
#[derive(Debug)]
|
||||
pub struct Config {
|
||||
// See below for the documentation of each field:
|
||||
pub log: stderrlog::StdErrLog,
|
||||
pub network_type: Network,
|
||||
pub network: Network,
|
||||
pub db_path: PathBuf,
|
||||
pub daemon_dir: PathBuf,
|
||||
pub blocks_dir: PathBuf,
|
||||
pub daemon_auth: SensitiveAuth,
|
||||
pub daemon_rpc_addr: SocketAddr,
|
||||
pub daemon_p2p_addr: SocketAddr,
|
||||
pub electrum_rpc_addr: SocketAddr,
|
||||
pub monitoring_addr: SocketAddr,
|
||||
pub jsonrpc_import: bool,
|
||||
pub wait_duration: Duration,
|
||||
pub index_batch_size: usize,
|
||||
pub bulk_index_threads: usize,
|
||||
pub tx_cache_size: usize,
|
||||
pub txid_limit: usize,
|
||||
pub index_lookup_limit: Option<usize>,
|
||||
pub auto_reindex: bool,
|
||||
pub ignore_mempool: bool,
|
||||
pub sync_once: bool,
|
||||
pub server_banner: String,
|
||||
pub blocktxids_cache_size: usize,
|
||||
pub cookie_getter: Arc<dyn CookieGetter>,
|
||||
pub args: Vec<String>,
|
||||
}
|
||||
|
||||
pub struct SensitiveAuth(pub Auth);
|
||||
|
||||
impl SensitiveAuth {
|
||||
pub(crate) fn get_auth(&self) -> Auth {
|
||||
self.0.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SensitiveAuth {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.0 {
|
||||
Auth::UserPass(ref user, _) => f
|
||||
.debug_tuple("UserPass")
|
||||
.field(&user)
|
||||
.field(&"<sensitive>")
|
||||
.finish(),
|
||||
_ => write!(f, "{:?}", self.0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns default daemon directory
|
||||
@ -151,40 +170,6 @@ fn default_daemon_dir() -> PathBuf {
|
||||
home
|
||||
}
|
||||
|
||||
fn default_blocks_dir(daemon_dir: &Path) -> PathBuf {
|
||||
daemon_dir.join("blocks")
|
||||
}
|
||||
|
||||
fn create_cookie_getter(
|
||||
cookie: Option<String>,
|
||||
cookie_file: Option<PathBuf>,
|
||||
daemon_dir: &Path,
|
||||
) -> Arc<dyn CookieGetter> {
|
||||
match (cookie, cookie_file) {
|
||||
(None, None) => Arc::new(CookieFile::from_daemon_dir(daemon_dir)),
|
||||
(None, Some(file)) => Arc::new(CookieFile::from_file(file)),
|
||||
(Some(cookie), None) => Arc::new(StaticCookie::from_string(cookie)),
|
||||
(Some(_), Some(_)) => {
|
||||
eprintln!("Error: ambigous configuration - cookie and cookie_file can't be specified at the same time");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes deprecation of cookie in favor of auth
|
||||
fn select_auth(auth: Option<String>, cookie: Option<String>) -> Option<String> {
|
||||
if cookie.is_some() {
|
||||
eprintln!("ERROR: cookie option is not supported!");
|
||||
eprintln!();
|
||||
eprintln!("You most likely want to use cookie_file instead.");
|
||||
eprintln!("If you really don't want to use cookie_file for a good reason and knowing the consequences use the auth option");
|
||||
eprintln!("See authentication section in electrs usage documentation for more details.");
|
||||
eprintln!("https://github.com/romanz/electrs/blob/master/doc/usage.md#configuration-files-and-priorities");
|
||||
std::process::exit(1);
|
||||
}
|
||||
auth
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Parses args, env vars, config files and post-processes them
|
||||
pub fn from_args() -> Config {
|
||||
@ -200,12 +185,11 @@ impl Config {
|
||||
.chain(home_config.as_ref().map(AsRef::as_ref))
|
||||
.chain(std::iter::once(system_config));
|
||||
|
||||
let (mut config, _) =
|
||||
let (mut config, args) =
|
||||
internal::Config::including_optional_config_files(configs).unwrap_or_exit();
|
||||
|
||||
let db_subdir = match config.network {
|
||||
// We must keep the name "mainnet" due to backwards compatibility
|
||||
Network::Bitcoin => "mainnet",
|
||||
Network::Bitcoin => "bitcoin",
|
||||
Network::Testnet => "testnet",
|
||||
Network::Regtest => "regtest",
|
||||
Network::Signet => "signet",
|
||||
@ -213,12 +197,18 @@ impl Config {
|
||||
|
||||
config.db_dir.push(db_subdir);
|
||||
|
||||
let default_daemon_port = match config.network {
|
||||
let default_daemon_rpc_port = match config.network {
|
||||
Network::Bitcoin => 8332,
|
||||
Network::Testnet => 18332,
|
||||
Network::Regtest => 18443,
|
||||
Network::Signet => 38332,
|
||||
};
|
||||
let default_daemon_p2p_port = match config.network {
|
||||
Network::Bitcoin => 8333,
|
||||
Network::Testnet => 18333,
|
||||
Network::Regtest => 18444,
|
||||
Network::Signet => 38333,
|
||||
};
|
||||
let default_electrum_port = match config.network {
|
||||
Network::Bitcoin => 50001,
|
||||
Network::Testnet => 60001,
|
||||
@ -233,13 +223,24 @@ impl Config {
|
||||
};
|
||||
|
||||
let daemon_rpc_addr: SocketAddr = config.daemon_rpc_addr.map_or(
|
||||
(DEFAULT_SERVER_ADDRESS, default_daemon_port).into(),
|
||||
(DEFAULT_SERVER_ADDRESS, default_daemon_rpc_port).into(),
|
||||
ResolvAddr::resolve_or_exit,
|
||||
);
|
||||
let daemon_p2p_addr: SocketAddr = config.daemon_p2p_addr.map_or(
|
||||
(DEFAULT_SERVER_ADDRESS, default_daemon_p2p_port).into(),
|
||||
ResolvAddr::resolve_or_exit,
|
||||
);
|
||||
let electrum_rpc_addr: SocketAddr = config.electrum_rpc_addr.map_or(
|
||||
(DEFAULT_SERVER_ADDRESS, default_electrum_port).into(),
|
||||
ResolvAddr::resolve_or_exit,
|
||||
);
|
||||
#[cfg(not(feature = "metrics"))]
|
||||
{
|
||||
if config.monitoring_addr.is_some() {
|
||||
eprintln!("Error: enable \"metrics\" feature to specify monitoring_addr");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
let monitoring_addr: SocketAddr = config.monitoring_addr.map_or(
|
||||
(DEFAULT_SERVER_ADDRESS, default_monitoring_port).into(),
|
||||
ResolvAddr::resolve_or_exit,
|
||||
@ -253,138 +254,83 @@ impl Config {
|
||||
}
|
||||
|
||||
let daemon_dir = &config.daemon_dir;
|
||||
let blocks_dir = config
|
||||
.blocks_dir
|
||||
.unwrap_or_else(|| default_blocks_dir(daemon_dir));
|
||||
|
||||
let auth = select_auth(config.auth, config.cookie);
|
||||
let cookie_getter = create_cookie_getter(auth, config.cookie_file, daemon_dir);
|
||||
|
||||
let mut log = stderrlog::new();
|
||||
log.verbosity(
|
||||
config
|
||||
.verbose
|
||||
.try_into()
|
||||
.expect("Overflow: Running electrs on less than 32 bit devices is unsupported"),
|
||||
);
|
||||
log.timestamp(if config.timestamp {
|
||||
stderrlog::Timestamp::Millisecond
|
||||
} else {
|
||||
stderrlog::Timestamp::Off
|
||||
let daemon_auth = SensitiveAuth(match (config.auth, config.cookie_file) {
|
||||
(None, None) => Auth::CookieFile(daemon_dir.join(".cookie")),
|
||||
(None, Some(cookie_file)) => Auth::CookieFile(cookie_file),
|
||||
(Some(auth), None) => {
|
||||
let parts: Vec<&str> = auth.splitn(2, ':').collect();
|
||||
if parts.len() != 2 {
|
||||
eprintln!("Error: auth cookie doesn't contain colon");
|
||||
std::process::exit(1);
|
||||
}
|
||||
Auth::UserPass(parts[0].to_owned(), parts[1].to_owned())
|
||||
}
|
||||
(Some(_), Some(_)) => {
|
||||
eprintln!("Error: ambigous configuration - auth and cookie_file can't be specified at the same time");
|
||||
std::process::exit(1);
|
||||
}
|
||||
});
|
||||
log.init().unwrap_or_else(|err| {
|
||||
eprintln!("Error: logging initialization failed: {}", err);
|
||||
std::process::exit(1)
|
||||
});
|
||||
// Could have been default, but it's useful to allow the user to specify 0 when overriding
|
||||
// configs.
|
||||
if config.bulk_index_threads == 0 {
|
||||
config.bulk_index_threads = num_cpus::get();
|
||||
}
|
||||
const MB: f32 = (1 << 20) as f32;
|
||||
|
||||
let level = match config.verbose {
|
||||
0 => log::LevelFilter::Error,
|
||||
1 => log::LevelFilter::Warn,
|
||||
2 => log::LevelFilter::Info,
|
||||
3 => log::LevelFilter::Debug,
|
||||
_ => log::LevelFilter::Trace,
|
||||
};
|
||||
|
||||
let index_lookup_limit = match config.index_lookup_limit {
|
||||
0 => None,
|
||||
_ => Some(config.index_lookup_limit),
|
||||
};
|
||||
let config = Config {
|
||||
log,
|
||||
network_type: config.network,
|
||||
network: config.network,
|
||||
db_path: config.db_dir,
|
||||
daemon_dir: config.daemon_dir,
|
||||
blocks_dir,
|
||||
daemon_auth,
|
||||
daemon_rpc_addr,
|
||||
daemon_p2p_addr,
|
||||
electrum_rpc_addr,
|
||||
monitoring_addr,
|
||||
jsonrpc_import: config.jsonrpc_import,
|
||||
wait_duration: Duration::from_secs(config.wait_duration_secs),
|
||||
index_batch_size: config.index_batch_size,
|
||||
bulk_index_threads: config.bulk_index_threads,
|
||||
tx_cache_size: (config.tx_cache_size_mb * MB) as usize,
|
||||
blocktxids_cache_size: (config.blocktxids_cache_size_mb * MB) as usize,
|
||||
txid_limit: config.txid_limit,
|
||||
index_lookup_limit,
|
||||
auto_reindex: config.auto_reindex,
|
||||
ignore_mempool: config.ignore_mempool,
|
||||
sync_once: config.sync_once,
|
||||
server_banner: config.server_banner,
|
||||
cookie_getter,
|
||||
args: args.map(|a| a.into_string().unwrap()).collect(),
|
||||
};
|
||||
eprintln!("{:?}", config);
|
||||
env_logger::Builder::from_default_env()
|
||||
.default_format()
|
||||
.format_timestamp_millis()
|
||||
.filter_level(level)
|
||||
.init();
|
||||
config
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cookie_getter(&self) -> Arc<dyn CookieGetter> {
|
||||
Arc::clone(&self.cookie_getter)
|
||||
}
|
||||
}
|
||||
|
||||
// CookieGetter + Debug isn't implemented in Rust, so we have to skip cookie_getter
|
||||
macro_rules! debug_struct {
|
||||
($name:ty, $($field:ident,)*) => {
|
||||
impl fmt::Debug for $name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct(stringify!($name))
|
||||
$(
|
||||
.field(stringify!($field), &self.$field)
|
||||
)*
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug_struct! { Config,
|
||||
log,
|
||||
network_type,
|
||||
db_path,
|
||||
daemon_dir,
|
||||
blocks_dir,
|
||||
daemon_rpc_addr,
|
||||
electrum_rpc_addr,
|
||||
monitoring_addr,
|
||||
jsonrpc_import,
|
||||
index_batch_size,
|
||||
bulk_index_threads,
|
||||
tx_cache_size,
|
||||
txid_limit,
|
||||
server_banner,
|
||||
blocktxids_cache_size,
|
||||
}
|
||||
|
||||
struct StaticCookie {
|
||||
value: Vec<u8>,
|
||||
}
|
||||
|
||||
impl StaticCookie {
|
||||
fn from_string(value: String) -> Self {
|
||||
StaticCookie {
|
||||
value: value.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CookieGetter for StaticCookie {
|
||||
fn get(&self) -> Result<Vec<u8>> {
|
||||
Ok(self.value.clone())
|
||||
}
|
||||
}
|
||||
|
||||
struct CookieFile {
|
||||
cookie_file: PathBuf,
|
||||
}
|
||||
|
||||
impl CookieFile {
|
||||
fn from_daemon_dir(daemon_dir: &Path) -> Self {
|
||||
CookieFile {
|
||||
cookie_file: daemon_dir.join(".cookie"),
|
||||
}
|
||||
}
|
||||
|
||||
fn from_file(cookie_file: PathBuf) -> Self {
|
||||
CookieFile { cookie_file }
|
||||
}
|
||||
}
|
||||
|
||||
impl CookieGetter for CookieFile {
|
||||
fn get(&self) -> Result<Vec<u8>> {
|
||||
let contents = fs::read(&self.cookie_file).chain_err(|| {
|
||||
ErrorKind::Connection(format!(
|
||||
"failed to read cookie from {}",
|
||||
self.cookie_file.display()
|
||||
))
|
||||
})?;
|
||||
Ok(contents)
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Auth, SensitiveAuth};
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_auth_debug() {
|
||||
let auth = Auth::None;
|
||||
assert_eq!(format!("{:?}", SensitiveAuth(auth)), "None");
|
||||
|
||||
let auth = Auth::CookieFile(Path::new("/foo/bar/.cookie").to_path_buf());
|
||||
assert_eq!(
|
||||
format!("{:?}", SensitiveAuth(auth)),
|
||||
"CookieFile(\"/foo/bar/.cookie\")"
|
||||
);
|
||||
|
||||
let auth = Auth::UserPass("user".to_owned(), "pass".to_owned());
|
||||
assert_eq!(
|
||||
format!("{:?}", SensitiveAuth(auth)),
|
||||
"UserPass(\"user\", \"<sensitive>\")"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
755
src/daemon.rs
755
src/daemon.rs
@ -1,634 +1,207 @@
|
||||
use bitcoin::blockdata::block::{Block, BlockHeader};
|
||||
use bitcoin::blockdata::transaction::Transaction;
|
||||
use bitcoin::consensus::encode::{deserialize, serialize};
|
||||
use bitcoin::hash_types::{BlockHash, Txid};
|
||||
use bitcoin::hashes::hex::{FromHex, ToHex};
|
||||
use bitcoin::hashes::Hash;
|
||||
use bitcoin::network::constants::Network;
|
||||
use serde_json::{from_str, from_value, Map, Value};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::io::{BufRead, BufReader, Lines, Write};
|
||||
use std::net::{SocketAddr, TcpStream};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
use anyhow::{Context, Result};
|
||||
|
||||
use crate::cache::BlockTxIDsCache;
|
||||
use crate::errors::*;
|
||||
use crate::metrics::{HistogramOpts, HistogramVec, Metrics};
|
||||
use crate::signal::Waiter;
|
||||
use crate::util::HeaderList;
|
||||
use bitcoin::{
|
||||
consensus::serialize, hashes::hex::ToHex, Amount, Block, BlockHash, Transaction, Txid,
|
||||
};
|
||||
use bitcoincore_rpc::{self, json, RpcApi};
|
||||
use parking_lot::Mutex;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
fn parse_hash<T: Hash>(value: &Value) -> Result<T> {
|
||||
T::from_hex(
|
||||
value
|
||||
.as_str()
|
||||
.chain_err(|| format!("non-string value: {}", value))?,
|
||||
)
|
||||
.chain_err(|| format!("non-hex value: {}", value))
|
||||
use crate::{
|
||||
chain::{Chain, NewHeader},
|
||||
config::Config,
|
||||
p2p::Connection,
|
||||
};
|
||||
|
||||
enum PollResult {
|
||||
Done(Result<()>),
|
||||
Retry,
|
||||
}
|
||||
|
||||
fn header_from_value(value: Value) -> Result<BlockHeader> {
|
||||
let header_hex = value
|
||||
.as_str()
|
||||
.chain_err(|| format!("non-string header: {}", value))?;
|
||||
let header_bytes = hex::decode(header_hex).chain_err(|| "non-hex header")?;
|
||||
|
||||
deserialize(&header_bytes).chain_err(|| format!("failed to parse header {}", header_hex))
|
||||
}
|
||||
|
||||
fn block_from_value(value: Value) -> Result<Block> {
|
||||
let block_hex = value.as_str().chain_err(|| "non-string block")?;
|
||||
let block_bytes = hex::decode(block_hex).chain_err(|| "non-hex block")?;
|
||||
deserialize(&block_bytes).chain_err(|| format!("failed to parse block {}", block_hex))
|
||||
}
|
||||
|
||||
fn tx_from_value(value: Value) -> Result<Transaction> {
|
||||
let tx_hex = value.as_str().chain_err(|| "non-string tx")?;
|
||||
let tx_bytes = hex::decode(tx_hex).chain_err(|| "non-hex tx")?;
|
||||
deserialize(&tx_bytes).chain_err(|| format!("failed to parse tx {}", tx_hex))
|
||||
}
|
||||
|
||||
/// Parse JSONRPC error code, if exists.
|
||||
fn parse_error_code(err: &Value) -> Option<i64> {
|
||||
if err.is_null() {
|
||||
return None;
|
||||
}
|
||||
err.as_object()?.get("code")?.as_i64()
|
||||
}
|
||||
|
||||
fn check_error_code(reply_obj: &mut Map<String, Value>, method: &str) -> Result<()> {
|
||||
if let Some(err) = reply_obj.remove("error") {
|
||||
if let Some(code) = parse_error_code(&err) {
|
||||
match code {
|
||||
// RPC_IN_WARMUP -> retry by later reconnection
|
||||
-28 => bail!(ErrorKind::Connection(err.to_string())),
|
||||
_ => bail!(ErrorKind::Daemon(method.to_owned(), err)),
|
||||
fn rpc_poll(client: &mut bitcoincore_rpc::Client) -> PollResult {
|
||||
match client.call::<BlockchainInfo>("getblockchaininfo", &[]) {
|
||||
Ok(info) => {
|
||||
let left_blocks = info.headers - info.blocks;
|
||||
if info.initial_block_download || left_blocks > 0 {
|
||||
info!(
|
||||
"waiting for {} blocks to download{}",
|
||||
left_blocks,
|
||||
if info.initial_block_download {
|
||||
" (IBD)"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
);
|
||||
return PollResult::Retry;
|
||||
}
|
||||
PollResult::Done(Ok(()))
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_jsonrpc_reply(mut reply: Value, method: &str, expected_id: u64) -> Result<Value> {
|
||||
if let Some(reply_obj) = reply.as_object_mut() {
|
||||
let id = reply_obj
|
||||
.get("id")
|
||||
.chain_err(|| format!("no id in reply: {:?}", reply_obj))?
|
||||
.clone();
|
||||
if id != expected_id {
|
||||
bail!(
|
||||
"wrong {} response id {}, expected {}",
|
||||
method,
|
||||
id,
|
||||
expected_id
|
||||
);
|
||||
}
|
||||
check_error_code(reply_obj, method)?;
|
||||
if let Some(result) = reply_obj.get_mut("result") {
|
||||
return Ok(result.take());
|
||||
}
|
||||
bail!("no result in reply: {:?}", reply_obj);
|
||||
}
|
||||
bail!("non-object reply: {:?}", reply);
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct BlockchainInfo {
|
||||
chain: String,
|
||||
blocks: u32,
|
||||
headers: u32,
|
||||
verificationprogress: f64,
|
||||
bestblockhash: String,
|
||||
pruned: bool,
|
||||
initialblockdownload: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct NetworkInfo {
|
||||
version: u64,
|
||||
subversion: String,
|
||||
relayfee: f64, // in BTC
|
||||
}
|
||||
|
||||
pub struct MempoolEntry {
|
||||
fee: u64, // in satoshis
|
||||
vsize: u32, // in virtual bytes (= weight/4)
|
||||
fee_per_vbyte: f32,
|
||||
}
|
||||
|
||||
impl MempoolEntry {
|
||||
pub(crate) fn new(fee: u64, vsize: u32) -> MempoolEntry {
|
||||
MempoolEntry {
|
||||
fee,
|
||||
vsize,
|
||||
fee_per_vbyte: fee as f32 / vsize as f32,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fee_per_vbyte(&self) -> f32 {
|
||||
self.fee_per_vbyte
|
||||
}
|
||||
|
||||
pub fn fee(&self) -> u64 {
|
||||
self.fee
|
||||
}
|
||||
|
||||
pub fn vsize(&self) -> u32 {
|
||||
self.vsize
|
||||
}
|
||||
}
|
||||
|
||||
pub trait CookieGetter: Send + Sync {
|
||||
fn get(&self) -> Result<Vec<u8>>;
|
||||
}
|
||||
|
||||
struct Connection {
|
||||
tx: TcpStream,
|
||||
rx: Lines<BufReader<TcpStream>>,
|
||||
cookie_getter: Arc<dyn CookieGetter>,
|
||||
addr: SocketAddr,
|
||||
signal: Waiter,
|
||||
}
|
||||
|
||||
fn tcp_connect(addr: SocketAddr, signal: &Waiter) -> Result<TcpStream> {
|
||||
loop {
|
||||
match TcpStream::connect(addr) {
|
||||
Ok(conn) => return Ok(conn),
|
||||
Err(err) => {
|
||||
warn!("failed to connect daemon at {}: {}", addr, err);
|
||||
signal.wait(Duration::from_secs(3))?;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
fn new(
|
||||
addr: SocketAddr,
|
||||
cookie_getter: Arc<dyn CookieGetter>,
|
||||
signal: Waiter,
|
||||
) -> Result<Connection> {
|
||||
let conn = tcp_connect(addr, &signal)?;
|
||||
let reader = BufReader::new(
|
||||
conn.try_clone()
|
||||
.chain_err(|| format!("failed to clone {:?}", conn))?,
|
||||
);
|
||||
Ok(Connection {
|
||||
tx: conn,
|
||||
rx: reader.lines(),
|
||||
cookie_getter,
|
||||
addr,
|
||||
signal,
|
||||
})
|
||||
}
|
||||
|
||||
fn reconnect(&self) -> Result<Connection> {
|
||||
Connection::new(self.addr, self.cookie_getter.clone(), self.signal.clone())
|
||||
}
|
||||
|
||||
fn send(&mut self, request: &str) -> Result<()> {
|
||||
let cookie = &self.cookie_getter.get()?;
|
||||
let msg = format!(
|
||||
"POST / HTTP/1.1\nAuthorization: Basic {}\nContent-Length: {}\n\n{}",
|
||||
base64::encode(cookie),
|
||||
request.len(),
|
||||
request,
|
||||
);
|
||||
self.tx.write_all(msg.as_bytes()).chain_err(|| {
|
||||
ErrorKind::Connection("disconnected from daemon while sending".to_owned())
|
||||
})
|
||||
}
|
||||
|
||||
fn recv(&mut self) -> Result<String> {
|
||||
// TODO: use proper HTTP parser.
|
||||
let mut in_header = true;
|
||||
let mut contents: Option<String> = None;
|
||||
let iter = self.rx.by_ref();
|
||||
let status = iter
|
||||
.next()
|
||||
.chain_err(|| {
|
||||
ErrorKind::Connection("disconnected from daemon while receiving".to_owned())
|
||||
})?
|
||||
.chain_err(|| "failed to read status")?;
|
||||
let mut headers = HashMap::new();
|
||||
for line in iter {
|
||||
let line = line.chain_err(|| ErrorKind::Connection("failed to read".to_owned()))?;
|
||||
if line.is_empty() {
|
||||
in_header = false; // next line should contain the actual response.
|
||||
} else if in_header {
|
||||
let parts: Vec<&str> = line.splitn(2, ": ").collect();
|
||||
if parts.len() == 2 {
|
||||
headers.insert(parts[0].to_owned(), parts[1].to_owned());
|
||||
} else {
|
||||
warn!("invalid header: {:?}", line);
|
||||
Err(err) => {
|
||||
if let Some(e) = extract_bitcoind_error(&err) {
|
||||
if e.code == -28 {
|
||||
info!("waiting for RPC warmup: {}", e.message);
|
||||
return PollResult::Retry;
|
||||
}
|
||||
} else {
|
||||
contents = Some(line);
|
||||
break;
|
||||
}
|
||||
PollResult::Done(Err(err).context("daemon not available"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn rpc_connect(config: &Config) -> Result<bitcoincore_rpc::Client> {
|
||||
let rpc_url = format!("http://{}", config.daemon_rpc_addr);
|
||||
let auth = config.daemon_auth.get_auth();
|
||||
if let bitcoincore_rpc::Auth::CookieFile(ref path) = auth {
|
||||
if !path.exists() {
|
||||
bail!("{:?} is missing - is bitcoind running?", path);
|
||||
}
|
||||
}
|
||||
let mut client = bitcoincore_rpc::Client::new(&rpc_url, auth)
|
||||
.with_context(|| format!("failed to connect to RPC: {}", config.daemon_rpc_addr))?;
|
||||
|
||||
loop {
|
||||
match rpc_poll(&mut client) {
|
||||
PollResult::Done(result) => return result.map(|()| client),
|
||||
PollResult::Retry => {
|
||||
std::thread::sleep(std::time::Duration::from_secs(1)); // wait a bit before polling
|
||||
}
|
||||
}
|
||||
|
||||
let contents =
|
||||
contents.chain_err(|| ErrorKind::Connection("no reply from daemon".to_owned()))?;
|
||||
let contents_length: &str = headers
|
||||
.get("Content-Length")
|
||||
.chain_err(|| format!("Content-Length is missing: {:?}", headers))?;
|
||||
let contents_length: usize = contents_length
|
||||
.parse()
|
||||
.chain_err(|| format!("invalid Content-Length: {:?}", contents_length))?;
|
||||
|
||||
let expected_length = contents_length - 1; // trailing EOL is skipped
|
||||
if expected_length != contents.len() {
|
||||
bail!(ErrorKind::Connection(format!(
|
||||
"expected {} bytes, got {}",
|
||||
expected_length,
|
||||
contents.len()
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(if status == "HTTP/1.1 200 OK" {
|
||||
contents
|
||||
} else if status == "HTTP/1.1 500 Internal Server Error" {
|
||||
warn!("HTTP status: {}", status);
|
||||
contents // the contents should have a JSONRPC error field
|
||||
} else {
|
||||
bail!(
|
||||
"request failed {:?}: {:?} = {:?}",
|
||||
status,
|
||||
headers,
|
||||
contents
|
||||
);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct Counter {
|
||||
value: AtomicU64,
|
||||
}
|
||||
|
||||
impl Counter {
|
||||
fn new() -> Self {
|
||||
Counter { value: 0.into() }
|
||||
}
|
||||
|
||||
fn next(&self) -> u64 {
|
||||
// fetch_add() returns previous value, we want current one
|
||||
self.value.fetch_add(1, Ordering::Relaxed) + 1
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Daemon {
|
||||
daemon_dir: PathBuf,
|
||||
blocks_dir: PathBuf,
|
||||
network: Network,
|
||||
conn: Mutex<Connection>,
|
||||
message_id: Counter, // for monotonic JSONRPC 'id'
|
||||
signal: Waiter,
|
||||
blocktxids_cache: Arc<BlockTxIDsCache>,
|
||||
p2p: Mutex<Connection>,
|
||||
rpc: bitcoincore_rpc::Client,
|
||||
}
|
||||
|
||||
// monitoring
|
||||
latency: HistogramVec,
|
||||
size: HistogramVec,
|
||||
// A workaround for https://github.com/rust-bitcoin/rust-bitcoincore-rpc/pull/190.
|
||||
#[derive(Deserialize)]
|
||||
struct BlockchainInfo {
|
||||
/// The current number of blocks processed in the server
|
||||
pub blocks: u64,
|
||||
/// The current number of headers we have validated
|
||||
pub headers: u64,
|
||||
/// Estimate of whether this node is in Initial Block Download mode
|
||||
#[serde(rename = "initialblockdownload")]
|
||||
pub initial_block_download: bool,
|
||||
/// If the blocks are subject to pruning
|
||||
pub pruned: bool,
|
||||
}
|
||||
|
||||
impl Daemon {
|
||||
pub fn new(
|
||||
daemon_dir: &PathBuf,
|
||||
blocks_dir: &PathBuf,
|
||||
daemon_rpc_addr: SocketAddr,
|
||||
cookie_getter: Arc<dyn CookieGetter>,
|
||||
network: Network,
|
||||
signal: Waiter,
|
||||
blocktxids_cache: Arc<BlockTxIDsCache>,
|
||||
metrics: &Metrics,
|
||||
) -> Result<Daemon> {
|
||||
let daemon = Daemon {
|
||||
daemon_dir: daemon_dir.clone(),
|
||||
blocks_dir: blocks_dir.clone(),
|
||||
network,
|
||||
conn: Mutex::new(Connection::new(
|
||||
daemon_rpc_addr,
|
||||
cookie_getter,
|
||||
signal.clone(),
|
||||
)?),
|
||||
message_id: Counter::new(),
|
||||
blocktxids_cache,
|
||||
signal: signal.clone(),
|
||||
latency: metrics.histogram_vec(
|
||||
HistogramOpts::new("electrs_daemon_rpc", "Bitcoind RPC latency (in seconds)"),
|
||||
&["method"],
|
||||
),
|
||||
// TODO: use better buckets (e.g. 1 byte to 10MB).
|
||||
size: metrics.histogram_vec(
|
||||
HistogramOpts::new("electrs_daemon_bytes", "Bitcoind RPC size (in bytes)"),
|
||||
&["method", "dir"],
|
||||
),
|
||||
};
|
||||
let network_info = daemon.getnetworkinfo()?;
|
||||
info!("{:?}", network_info);
|
||||
if network_info.version < 16_00_00 {
|
||||
bail!(
|
||||
"{} is not supported - please use bitcoind 0.16+",
|
||||
network_info.subversion,
|
||||
)
|
||||
pub fn connect(config: &Config) -> Result<Self> {
|
||||
let rpc = rpc_connect(config)?;
|
||||
let network_info = rpc.get_network_info()?;
|
||||
if network_info.version < 21_00_00 {
|
||||
bail!("electrs requires bitcoind 0.21+");
|
||||
}
|
||||
let blockchain_info = daemon.getblockchaininfo()?;
|
||||
info!("{:?}", blockchain_info);
|
||||
if !network_info.network_active {
|
||||
bail!("electrs requires active bitcoind p2p network");
|
||||
}
|
||||
let blockchain_info: BlockchainInfo = rpc.call("getblockchaininfo", &[])?;
|
||||
if blockchain_info.pruned {
|
||||
bail!("pruned node is not supported (use '-prune=0' bitcoind flag)".to_owned())
|
||||
bail!("electrs requires non-pruned bitcoind node");
|
||||
}
|
||||
loop {
|
||||
let info = daemon.getblockchaininfo()?;
|
||||
if !info.initialblockdownload {
|
||||
break;
|
||||
}
|
||||
if network == Network::Regtest && info.headers == info.blocks {
|
||||
break;
|
||||
}
|
||||
warn!(
|
||||
"wait until IBD is over: headers={} blocks={} progress={}",
|
||||
info.headers, info.blocks, info.verificationprogress
|
||||
);
|
||||
signal.wait(Duration::from_secs(3))?;
|
||||
}
|
||||
Ok(daemon)
|
||||
let p2p = Mutex::new(Connection::connect(config.network, config.daemon_p2p_addr)?);
|
||||
Ok(Self { p2p, rpc })
|
||||
}
|
||||
|
||||
pub fn reconnect(&self) -> Result<Daemon> {
|
||||
Ok(Daemon {
|
||||
daemon_dir: self.daemon_dir.clone(),
|
||||
blocks_dir: self.blocks_dir.clone(),
|
||||
network: self.network,
|
||||
conn: Mutex::new(self.conn.lock().unwrap().reconnect()?),
|
||||
message_id: Counter::new(),
|
||||
signal: self.signal.clone(),
|
||||
blocktxids_cache: Arc::clone(&self.blocktxids_cache),
|
||||
latency: self.latency.clone(),
|
||||
size: self.size.clone(),
|
||||
})
|
||||
pub(crate) fn estimate_fee(&self, nblocks: u16) -> Result<Option<Amount>> {
|
||||
Ok(self
|
||||
.rpc
|
||||
.estimate_smart_fee(nblocks, None)
|
||||
.context("failed to estimate fee")?
|
||||
.fee_rate)
|
||||
}
|
||||
|
||||
pub fn list_blk_files(&self) -> Result<Vec<PathBuf>> {
|
||||
let path = self.blocks_dir.join("blk*.dat");
|
||||
info!("listing block files at {:?}", path);
|
||||
let mut paths: Vec<PathBuf> = glob::glob(path.to_str().unwrap())
|
||||
.chain_err(|| "failed to list blk*.dat files")?
|
||||
.map(std::result::Result::unwrap)
|
||||
.collect();
|
||||
paths.sort();
|
||||
Ok(paths)
|
||||
pub(crate) fn get_relay_fee(&self) -> Result<Amount> {
|
||||
Ok(self
|
||||
.rpc
|
||||
.get_network_info()
|
||||
.context("failed to get relay fee")?
|
||||
.relay_fee)
|
||||
}
|
||||
|
||||
pub fn magic(&self) -> u32 {
|
||||
self.network.magic()
|
||||
pub(crate) fn broadcast(&self, tx: &Transaction) -> Result<Txid> {
|
||||
self.rpc
|
||||
.send_raw_transaction(tx)
|
||||
.context("failed to broadcast transaction")
|
||||
}
|
||||
|
||||
fn call_jsonrpc(&self, method: &str, request: &Value) -> Result<Value> {
|
||||
let mut conn = self.conn.lock().unwrap();
|
||||
let timer = self.latency.with_label_values(&[method]).start_timer();
|
||||
let request = request.to_string();
|
||||
conn.send(&request)?;
|
||||
self.size
|
||||
.with_label_values(&[method, "send"])
|
||||
.observe(request.len() as f64);
|
||||
let response = conn.recv()?;
|
||||
let result: Value = from_str(&response).chain_err(|| "invalid JSON")?;
|
||||
timer.observe_duration();
|
||||
self.size
|
||||
.with_label_values(&[method, "recv"])
|
||||
.observe(response.len() as f64);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn handle_request_batch(&self, method: &str, params_list: &[Value]) -> Result<Vec<Value>> {
|
||||
let id = self.message_id.next();
|
||||
let reqs = params_list
|
||||
.iter()
|
||||
.map(|params| json!({"method": method, "params": params, "id": id}))
|
||||
.collect();
|
||||
let mut results = vec![];
|
||||
let mut replies = self.call_jsonrpc(method, &reqs)?;
|
||||
if let Some(replies_vec) = replies.as_array_mut() {
|
||||
for reply in replies_vec {
|
||||
results.push(parse_jsonrpc_reply(reply.take(), method, id)?)
|
||||
}
|
||||
return Ok(results);
|
||||
}
|
||||
bail!("non-array replies: {:?}", replies);
|
||||
}
|
||||
|
||||
fn retry_request_batch(&self, method: &str, params_list: &[Value]) -> Result<Vec<Value>> {
|
||||
loop {
|
||||
match self.handle_request_batch(method, params_list) {
|
||||
Err(Error(ErrorKind::Connection(msg), _)) => {
|
||||
warn!("reconnecting to bitcoind: {}", msg);
|
||||
self.signal.wait(Duration::from_secs(3))?;
|
||||
let mut conn = self.conn.lock().unwrap();
|
||||
*conn = conn.reconnect()?;
|
||||
continue;
|
||||
}
|
||||
result => return result,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn request(&self, method: &str, params: Value) -> Result<Value> {
|
||||
let mut values = self.retry_request_batch(method, &[params])?;
|
||||
assert_eq!(values.len(), 1);
|
||||
Ok(values.remove(0))
|
||||
}
|
||||
|
||||
fn requests(&self, method: &str, params_list: &[Value]) -> Result<Vec<Value>> {
|
||||
self.retry_request_batch(method, params_list)
|
||||
}
|
||||
|
||||
// bitcoind JSONRPC API:
|
||||
|
||||
fn getblockchaininfo(&self) -> Result<BlockchainInfo> {
|
||||
let info: Value = self.request("getblockchaininfo", json!([]))?;
|
||||
from_value(info).chain_err(|| "invalid blockchain info")
|
||||
}
|
||||
|
||||
fn getnetworkinfo(&self) -> Result<NetworkInfo> {
|
||||
let info: Value = self.request("getnetworkinfo", json!([]))?;
|
||||
from_value(info).chain_err(|| "invalid network info")
|
||||
}
|
||||
|
||||
pub fn get_subversion(&self) -> Result<String> {
|
||||
Ok(self.getnetworkinfo()?.subversion)
|
||||
}
|
||||
|
||||
pub fn get_relayfee(&self) -> Result<f64> {
|
||||
Ok(self.getnetworkinfo()?.relayfee)
|
||||
}
|
||||
|
||||
pub fn getbestblockhash(&self) -> Result<BlockHash> {
|
||||
parse_hash(&self.request("getbestblockhash", json!([]))?).chain_err(|| "invalid blockhash")
|
||||
}
|
||||
|
||||
pub fn getblockheader(&self, blockhash: &BlockHash) -> Result<BlockHeader> {
|
||||
header_from_value(self.request(
|
||||
"getblockheader",
|
||||
json!([blockhash.to_hex(), /*verbose=*/ false]),
|
||||
)?)
|
||||
}
|
||||
|
||||
pub fn getblockheaders(&self, heights: &[usize]) -> Result<Vec<BlockHeader>> {
|
||||
let heights: Vec<Value> = heights.iter().map(|height| json!([height])).collect();
|
||||
let params_list: Vec<Value> = self
|
||||
.requests("getblockhash", &heights)?
|
||||
.into_iter()
|
||||
.map(|hash| json!([hash, /*verbose=*/ false]))
|
||||
.collect();
|
||||
let mut result = vec![];
|
||||
for h in self.requests("getblockheader", ¶ms_list)? {
|
||||
result.push(header_from_value(h)?);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn getblock(&self, blockhash: &BlockHash) -> Result<Block> {
|
||||
let block = block_from_value(
|
||||
self.request("getblock", json!([blockhash.to_hex(), /*verbose=*/ false]))?,
|
||||
)?;
|
||||
assert_eq!(block.block_hash(), *blockhash);
|
||||
Ok(block)
|
||||
}
|
||||
|
||||
fn load_blocktxids(&self, blockhash: &BlockHash) -> Result<Vec<Txid>> {
|
||||
self.request("getblock", json!([blockhash.to_hex(), /*verbose=*/ 1]))?
|
||||
.get("tx")
|
||||
.chain_err(|| "block missing txids")?
|
||||
.as_array()
|
||||
.chain_err(|| "invalid block txids")?
|
||||
.iter()
|
||||
.map(parse_hash)
|
||||
.collect::<Result<Vec<Txid>>>()
|
||||
}
|
||||
|
||||
pub fn getblocktxids(&self, blockhash: &BlockHash) -> Result<Vec<Txid>> {
|
||||
self.blocktxids_cache
|
||||
.get_or_else(blockhash, || self.load_blocktxids(blockhash))
|
||||
}
|
||||
|
||||
pub fn gettransaction(
|
||||
pub(crate) fn get_transaction_info(
|
||||
&self,
|
||||
txhash: &Txid,
|
||||
txid: &Txid,
|
||||
blockhash: Option<BlockHash>,
|
||||
) -> Result<Value> {
|
||||
// No need to parse the resulting JSON, just return it as-is to the client.
|
||||
self.rpc
|
||||
.call(
|
||||
"getrawtransaction",
|
||||
&[json!(txid), json!(true), json!(blockhash)],
|
||||
)
|
||||
.context("failed to get transaction info")
|
||||
}
|
||||
|
||||
pub(crate) fn get_transaction_hex(
|
||||
&self,
|
||||
txid: &Txid,
|
||||
blockhash: Option<BlockHash>,
|
||||
) -> Result<Value> {
|
||||
let tx = self.get_transaction(txid, blockhash)?;
|
||||
Ok(json!(serialize(&tx).to_hex()))
|
||||
}
|
||||
|
||||
pub(crate) fn get_transaction(
|
||||
&self,
|
||||
txid: &Txid,
|
||||
blockhash: Option<BlockHash>,
|
||||
) -> Result<Transaction> {
|
||||
let mut args = json!([txhash.to_hex(), /*verbose=*/ false]);
|
||||
if let Some(blockhash) = blockhash {
|
||||
args.as_array_mut().unwrap().push(json!(blockhash.to_hex()));
|
||||
}
|
||||
tx_from_value(self.request("getrawtransaction", args)?)
|
||||
self.rpc
|
||||
.get_raw_transaction(txid, blockhash.as_ref())
|
||||
.context("failed to get transaction")
|
||||
}
|
||||
|
||||
pub fn gettransaction_raw(
|
||||
&self,
|
||||
txhash: &Txid,
|
||||
blockhash: Option<BlockHash>,
|
||||
verbose: bool,
|
||||
) -> Result<Value> {
|
||||
let mut args = json!([txhash.to_hex(), verbose]);
|
||||
if let Some(blockhash) = blockhash {
|
||||
args.as_array_mut().unwrap().push(json!(blockhash.to_hex()));
|
||||
}
|
||||
self.request("getrawtransaction", args)
|
||||
pub(crate) fn get_block_txids(&self, blockhash: BlockHash) -> Result<Vec<Txid>> {
|
||||
Ok(self
|
||||
.rpc
|
||||
.get_block_info(&blockhash)
|
||||
.context("failed to get block txids")?
|
||||
.tx)
|
||||
}
|
||||
|
||||
pub fn getmempooltxids(&self) -> Result<HashSet<Txid>> {
|
||||
let txids: Value = self.request("getrawmempool", json!([/*verbose=*/ false]))?;
|
||||
let mut result = HashSet::new();
|
||||
for value in txids.as_array().chain_err(|| "non-array result")? {
|
||||
result.insert(parse_hash(value).chain_err(|| "invalid txid")?);
|
||||
}
|
||||
Ok(result)
|
||||
pub(crate) fn get_mempool_txids(&self) -> Result<Vec<Txid>> {
|
||||
self.rpc
|
||||
.get_raw_mempool()
|
||||
.context("failed to get mempool txids")
|
||||
}
|
||||
|
||||
pub fn getmempoolentry(&self, txid: &Txid) -> Result<MempoolEntry> {
|
||||
let entry = self.request("getmempoolentry", json!([txid.to_hex()]))?;
|
||||
let fee = (entry
|
||||
.get("fee")
|
||||
.chain_err(|| "missing fee")?
|
||||
.as_f64()
|
||||
.chain_err(|| "non-float fee")?
|
||||
* 100_000_000f64) as u64;
|
||||
let vsize = entry
|
||||
.get("size")
|
||||
.or_else(|| entry.get("vsize")) // (https://github.com/bitcoin/bitcoin/pull/15637)
|
||||
.chain_err(|| "missing vsize")?
|
||||
.as_u64()
|
||||
.chain_err(|| "non-integer vsize")? as u32;
|
||||
Ok(MempoolEntry::new(fee, vsize))
|
||||
pub(crate) fn get_mempool_entry(&self, txid: &Txid) -> Result<json::GetMempoolEntryResult> {
|
||||
self.rpc
|
||||
.get_mempool_entry(txid)
|
||||
.context("failed to get mempool entry")
|
||||
}
|
||||
|
||||
pub fn broadcast(&self, tx: &Transaction) -> Result<Txid> {
|
||||
let tx = hex::encode(serialize(tx));
|
||||
let txid = self.request("sendrawtransaction", json!([tx]))?;
|
||||
Txid::from_hex(txid.as_str().chain_err(|| "non-string txid")?)
|
||||
.chain_err(|| "failed to parse txid")
|
||||
pub(crate) fn get_new_headers(&self, chain: &Chain) -> Result<Vec<NewHeader>> {
|
||||
self.p2p.lock().get_new_headers(chain)
|
||||
}
|
||||
|
||||
fn get_all_headers(&self, tip: &BlockHash) -> Result<Vec<BlockHeader>> {
|
||||
let info: Value = self.request("getblockheader", json!([tip.to_hex()]))?;
|
||||
let tip_height = info
|
||||
.get("height")
|
||||
.expect("missing height")
|
||||
.as_u64()
|
||||
.expect("non-numeric height") as usize;
|
||||
let all_heights: Vec<usize> = (0..=tip_height).collect();
|
||||
let chunk_size = 100_000;
|
||||
let mut result = vec![];
|
||||
let null_hash = BlockHash::default();
|
||||
for heights in all_heights.chunks(chunk_size) {
|
||||
trace!("downloading {} block headers", heights.len());
|
||||
let mut headers = self.getblockheaders(heights)?;
|
||||
assert!(headers.len() == heights.len());
|
||||
result.append(&mut headers);
|
||||
}
|
||||
|
||||
let mut blockhash = null_hash;
|
||||
for header in &result {
|
||||
assert_eq!(header.prev_blockhash, blockhash);
|
||||
blockhash = header.block_hash();
|
||||
}
|
||||
assert_eq!(blockhash, *tip);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// Returns a list of BlockHeaders in ascending height (i.e. the tip is last).
|
||||
pub fn get_new_headers(
|
||||
&self,
|
||||
indexed_headers: &HeaderList,
|
||||
bestblockhash: &BlockHash,
|
||||
) -> Result<Vec<BlockHeader>> {
|
||||
// Iterate back over headers until known blockash is found:
|
||||
if indexed_headers.is_empty() {
|
||||
return self.get_all_headers(bestblockhash);
|
||||
}
|
||||
debug!(
|
||||
"downloading new block headers ({} already indexed) from {}",
|
||||
indexed_headers.len(),
|
||||
bestblockhash,
|
||||
);
|
||||
let mut new_headers = vec![];
|
||||
let null_hash = BlockHash::default();
|
||||
let mut blockhash = *bestblockhash;
|
||||
while blockhash != null_hash {
|
||||
if indexed_headers.header_by_blockhash(&blockhash).is_some() {
|
||||
break;
|
||||
}
|
||||
let header = self
|
||||
.getblockheader(&blockhash)
|
||||
.chain_err(|| format!("failed to get {} header", blockhash))?;
|
||||
new_headers.push(header);
|
||||
blockhash = header.prev_blockhash;
|
||||
}
|
||||
trace!("downloaded {} block headers", new_headers.len());
|
||||
new_headers.reverse(); // so the tip is the last vector entry
|
||||
Ok(new_headers)
|
||||
pub(crate) fn for_blocks<B, F>(&self, blockhashes: B, func: F) -> Result<()>
|
||||
where
|
||||
B: IntoIterator<Item = BlockHash>,
|
||||
F: FnMut(BlockHash, Block) + Send,
|
||||
{
|
||||
self.p2p.lock().for_blocks(blockhashes, func)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type RpcError = bitcoincore_rpc::jsonrpc::error::RpcError;
|
||||
|
||||
pub(crate) fn extract_bitcoind_error(err: &bitcoincore_rpc::Error) -> Option<&RpcError> {
|
||||
use bitcoincore_rpc::{
|
||||
jsonrpc::error::Error::Rpc as ServerError, Error::JsonRpc as JsonRpcError,
|
||||
};
|
||||
match err {
|
||||
JsonRpcError(ServerError(e)) => Some(e),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
378
src/db.rs
Normal file
378
src/db.rs
Normal file
@ -0,0 +1,378 @@
|
||||
use anyhow::{Context, Result};
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
pub(crate) type Row = Box<[u8]>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct WriteBatch {
|
||||
pub(crate) tip_row: Row,
|
||||
pub(crate) header_rows: Vec<Row>,
|
||||
pub(crate) funding_rows: Vec<Row>,
|
||||
pub(crate) spending_rows: Vec<Row>,
|
||||
pub(crate) txid_rows: Vec<Row>,
|
||||
}
|
||||
|
||||
impl WriteBatch {
|
||||
pub(crate) fn sort(&mut self) {
|
||||
self.header_rows.sort_unstable();
|
||||
self.funding_rows.sort_unstable();
|
||||
self.spending_rows.sort_unstable();
|
||||
self.txid_rows.sort_unstable();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Options {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
/// RocksDB wrapper for index storage
|
||||
pub struct DBStore {
|
||||
db: rocksdb::DB,
|
||||
bulk_import: AtomicBool,
|
||||
}
|
||||
|
||||
const CONFIG_CF: &str = "config";
|
||||
const HEADERS_CF: &str = "headers";
|
||||
const TXID_CF: &str = "txid";
|
||||
const FUNDING_CF: &str = "funding";
|
||||
const SPENDING_CF: &str = "spending";
|
||||
|
||||
const COLUMN_FAMILIES: &[&str] = &[CONFIG_CF, HEADERS_CF, TXID_CF, FUNDING_CF, SPENDING_CF];
|
||||
|
||||
const CONFIG_KEY: &str = "C";
|
||||
const TIP_KEY: &[u8] = b"T";
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
struct Config {
|
||||
compacted: bool,
|
||||
format: u64,
|
||||
}
|
||||
|
||||
const CURRENT_FORMAT: u64 = 0;
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Config {
|
||||
compacted: false,
|
||||
format: CURRENT_FORMAT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_opts() -> rocksdb::Options {
|
||||
let mut opts = rocksdb::Options::default();
|
||||
opts.set_keep_log_file_num(10);
|
||||
opts.set_max_open_files(16);
|
||||
opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
||||
opts.set_compression_type(rocksdb::DBCompressionType::Zstd);
|
||||
opts.set_target_file_size_base(256 << 20);
|
||||
opts.set_write_buffer_size(256 << 20);
|
||||
opts.set_disable_auto_compactions(true); // for initial bulk load
|
||||
opts.set_advise_random_on_open(false); // bulk load uses sequential I/O
|
||||
opts.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix(8));
|
||||
opts
|
||||
}
|
||||
|
||||
impl DBStore {
|
||||
fn create_cf_descriptors() -> Vec<rocksdb::ColumnFamilyDescriptor> {
|
||||
COLUMN_FAMILIES
|
||||
.iter()
|
||||
.map(|&name| rocksdb::ColumnFamilyDescriptor::new(name, default_opts()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn open_internal(path: &Path) -> Result<Self> {
|
||||
let mut db_opts = default_opts();
|
||||
db_opts.create_if_missing(true);
|
||||
db_opts.create_missing_column_families(true);
|
||||
|
||||
let db = rocksdb::DB::open_cf_descriptors(&db_opts, path, Self::create_cf_descriptors())
|
||||
.with_context(|| format!("failed to open DB: {}", path.display()))?;
|
||||
let live_files = db.live_files()?;
|
||||
info!(
|
||||
"{:?}: {} SST files, {} GB, {} Grows",
|
||||
path,
|
||||
live_files.len(),
|
||||
live_files.iter().map(|f| f.size).sum::<usize>() as f64 / 1e9,
|
||||
live_files.iter().map(|f| f.num_entries).sum::<u64>() as f64 / 1e9
|
||||
);
|
||||
let store = DBStore {
|
||||
db,
|
||||
bulk_import: AtomicBool::new(true),
|
||||
};
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
fn is_legacy_format(&self) -> bool {
|
||||
// In legacy DB format, all data was stored in a single (default) column family.
|
||||
self.db
|
||||
.iterator(rocksdb::IteratorMode::Start)
|
||||
.next()
|
||||
.is_some()
|
||||
}
|
||||
|
||||
/// Opens a new RocksDB at the specified location.
|
||||
pub fn open(path: &Path, auto_reindex: bool) -> Result<Self> {
|
||||
let mut store = Self::open_internal(path)?;
|
||||
let config = store.get_config();
|
||||
debug!("DB {:?}", config);
|
||||
let mut config = config.unwrap_or_default(); // use default config when DB is empty
|
||||
|
||||
let reindex_cause = if store.is_legacy_format() {
|
||||
Some("legacy format".to_owned())
|
||||
} else if config.format != CURRENT_FORMAT {
|
||||
Some(format!(
|
||||
"unsupported format {} != {}",
|
||||
config.format, CURRENT_FORMAT
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(cause) = reindex_cause {
|
||||
if !auto_reindex {
|
||||
bail!("re-index required due to {}", cause);
|
||||
}
|
||||
warn!(
|
||||
"Database needs to be re-indexed due to {}, going to delete {}",
|
||||
cause,
|
||||
path.display()
|
||||
);
|
||||
// close DB before deletion
|
||||
drop(store);
|
||||
rocksdb::DB::destroy(&default_opts(), &path).with_context(|| {
|
||||
format!(
|
||||
"re-index required but the old database ({}) can not be deleted",
|
||||
path.display()
|
||||
)
|
||||
})?;
|
||||
store = Self::open_internal(path)?;
|
||||
config = Config::default(); // re-init config after dropping DB
|
||||
}
|
||||
if config.compacted {
|
||||
store.start_compactions();
|
||||
}
|
||||
store.set_config(config);
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
fn config_cf(&self) -> &rocksdb::ColumnFamily {
|
||||
self.db.cf_handle(CONFIG_CF).expect("missing CONFIG_CF")
|
||||
}
|
||||
|
||||
fn funding_cf(&self) -> &rocksdb::ColumnFamily {
|
||||
self.db.cf_handle(FUNDING_CF).expect("missing FUNDING_CF")
|
||||
}
|
||||
|
||||
fn spending_cf(&self) -> &rocksdb::ColumnFamily {
|
||||
self.db.cf_handle(SPENDING_CF).expect("missing SPENDING_CF")
|
||||
}
|
||||
|
||||
fn txid_cf(&self) -> &rocksdb::ColumnFamily {
|
||||
self.db.cf_handle(TXID_CF).expect("missing TXID_CF")
|
||||
}
|
||||
|
||||
fn headers_cf(&self) -> &rocksdb::ColumnFamily {
|
||||
self.db.cf_handle(HEADERS_CF).expect("missing HEADERS_CF")
|
||||
}
|
||||
|
||||
pub(crate) fn iter_funding(&self, prefix: Row) -> ScanIterator {
|
||||
self.iter_prefix_cf(self.funding_cf(), prefix)
|
||||
}
|
||||
|
||||
pub(crate) fn iter_spending(&self, prefix: Row) -> ScanIterator {
|
||||
self.iter_prefix_cf(self.spending_cf(), prefix)
|
||||
}
|
||||
|
||||
pub(crate) fn iter_txid(&self, prefix: Row) -> ScanIterator {
|
||||
self.iter_prefix_cf(self.txid_cf(), prefix)
|
||||
}
|
||||
|
||||
fn iter_prefix_cf(&self, cf: &rocksdb::ColumnFamily, prefix: Row) -> ScanIterator {
|
||||
let mode = rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward);
|
||||
let iter = self.db.iterator_cf(cf, mode);
|
||||
ScanIterator {
|
||||
prefix,
|
||||
iter,
|
||||
done: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn read_headers(&self) -> Vec<Row> {
|
||||
let mut opts = rocksdb::ReadOptions::default();
|
||||
opts.fill_cache(false);
|
||||
self.db
|
||||
.iterator_cf_opt(self.headers_cf(), opts, rocksdb::IteratorMode::Start)
|
||||
.map(|(key, _)| key)
|
||||
.filter(|key| &key[..] != TIP_KEY) // headers' rows are longer than TIP_KEY
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn get_tip(&self) -> Option<Vec<u8>> {
|
||||
self.db
|
||||
.get_cf(self.headers_cf(), TIP_KEY)
|
||||
.expect("get_tip failed")
|
||||
}
|
||||
|
||||
pub(crate) fn write(&self, batch: WriteBatch) {
|
||||
let mut db_batch = rocksdb::WriteBatch::default();
|
||||
for key in batch.funding_rows {
|
||||
db_batch.put_cf(self.funding_cf(), key, b"");
|
||||
}
|
||||
for key in batch.spending_rows {
|
||||
db_batch.put_cf(self.spending_cf(), key, b"");
|
||||
}
|
||||
for key in batch.txid_rows {
|
||||
db_batch.put_cf(self.txid_cf(), key, b"");
|
||||
}
|
||||
for key in batch.header_rows {
|
||||
db_batch.put_cf(self.headers_cf(), key, b"");
|
||||
}
|
||||
db_batch.put_cf(self.headers_cf(), TIP_KEY, batch.tip_row);
|
||||
|
||||
let mut opts = rocksdb::WriteOptions::new();
|
||||
let bulk_import = self.bulk_import.load(Ordering::Relaxed);
|
||||
opts.set_sync(!bulk_import);
|
||||
opts.disable_wal(bulk_import);
|
||||
self.db.write_opt(db_batch, &opts).unwrap();
|
||||
}
|
||||
|
||||
pub(crate) fn flush(&self) {
|
||||
let mut config = self.get_config().unwrap_or_default();
|
||||
for name in COLUMN_FAMILIES {
|
||||
let cf = self.db.cf_handle(name).expect("missing CF");
|
||||
self.db.flush_cf(cf).expect("CF flush failed");
|
||||
}
|
||||
if !config.compacted {
|
||||
for name in COLUMN_FAMILIES {
|
||||
info!("starting {} compaction", name);
|
||||
let cf = self.db.cf_handle(name).expect("missing CF");
|
||||
self.db.compact_range_cf(cf, None::<&[u8]>, None::<&[u8]>);
|
||||
}
|
||||
config.compacted = true;
|
||||
self.set_config(config);
|
||||
info!("finished full compaction");
|
||||
self.start_compactions();
|
||||
}
|
||||
if log_enabled!(log::Level::Trace) {
|
||||
for property in &["rocksdb.dbstats"] {
|
||||
let stats = self
|
||||
.db
|
||||
.property_value(property)
|
||||
.expect("failed to get property")
|
||||
.expect("missing property");
|
||||
trace!("{}: {}", property, stats);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn start_compactions(&self) {
|
||||
self.bulk_import.store(false, Ordering::Relaxed);
|
||||
for name in COLUMN_FAMILIES {
|
||||
let cf = self.db.cf_handle(name).expect("missing CF");
|
||||
self.db
|
||||
.set_options_cf(cf, &[("disable_auto_compactions", "false")])
|
||||
.expect("failed to start auto-compactions");
|
||||
}
|
||||
debug!("auto-compactions enabled");
|
||||
}
|
||||
|
||||
fn set_config(&self, config: Config) {
|
||||
let mut opts = rocksdb::WriteOptions::default();
|
||||
opts.set_sync(true);
|
||||
opts.disable_wal(false);
|
||||
let value = serde_json::to_vec(&config).expect("failed to serialize config");
|
||||
self.db
|
||||
.put_cf_opt(self.config_cf(), CONFIG_KEY, value, &opts)
|
||||
.expect("DB::put failed");
|
||||
}
|
||||
|
||||
fn get_config(&self) -> Option<Config> {
|
||||
self.db
|
||||
.get_cf(self.config_cf(), CONFIG_KEY)
|
||||
.expect("DB::get failed")
|
||||
.map(|value| serde_json::from_slice(&value).expect("failed to deserialize Config"))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ScanIterator<'a> {
|
||||
prefix: Row,
|
||||
iter: rocksdb::DBIterator<'a>,
|
||||
done: bool,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for ScanIterator<'a> {
|
||||
type Item = Row;
|
||||
|
||||
fn next(&mut self) -> Option<Row> {
|
||||
if self.done {
|
||||
return None;
|
||||
}
|
||||
let (key, _) = self.iter.next()?;
|
||||
if !key.starts_with(&self.prefix) {
|
||||
self.done = true;
|
||||
return None;
|
||||
}
|
||||
Some(key)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DBStore {
|
||||
fn drop(&mut self) {
|
||||
info!("closing DB at {}", self.db.path().display());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{DBStore, CURRENT_FORMAT};
|
||||
|
||||
#[test]
|
||||
fn test_reindex_new_format() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
{
|
||||
let store = DBStore::open(dir.path(), false).unwrap();
|
||||
let mut config = store.get_config().unwrap();
|
||||
config.format += 1;
|
||||
store.set_config(config);
|
||||
};
|
||||
assert_eq!(
|
||||
DBStore::open(dir.path(), false).err().unwrap().to_string(),
|
||||
format!(
|
||||
"re-index required due to unsupported format {} != {}",
|
||||
CURRENT_FORMAT + 1,
|
||||
CURRENT_FORMAT
|
||||
)
|
||||
);
|
||||
{
|
||||
let store = DBStore::open(dir.path(), true).unwrap();
|
||||
store.flush();
|
||||
let config = store.get_config().unwrap();
|
||||
assert_eq!(config.format, CURRENT_FORMAT);
|
||||
assert_eq!(store.is_legacy_format(), false);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reindex_legacy_format() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
{
|
||||
let mut db_opts = rocksdb::Options::default();
|
||||
db_opts.create_if_missing(true);
|
||||
let db = rocksdb::DB::open(&db_opts, dir.path()).unwrap();
|
||||
db.put(b"F", b"").unwrap(); // insert legacy DB compaction marker (in 'default' column family)
|
||||
};
|
||||
assert_eq!(
|
||||
DBStore::open(dir.path(), false).err().unwrap().to_string(),
|
||||
format!("re-index required due to legacy format",)
|
||||
);
|
||||
{
|
||||
let store = DBStore::open(dir.path(), true).unwrap();
|
||||
store.flush();
|
||||
let config = store.get_config().unwrap();
|
||||
assert_eq!(config.format, CURRENT_FORMAT);
|
||||
}
|
||||
}
|
||||
}
|
525
src/electrum.rs
Normal file
525
src/electrum.rs
Normal file
@ -0,0 +1,525 @@
|
||||
use anyhow::{bail, Context, Result};
|
||||
use bitcoin::{
|
||||
consensus::{deserialize, serialize},
|
||||
hashes::hex::{FromHex, ToHex},
|
||||
BlockHash, Txid,
|
||||
};
|
||||
use rayon::prelude::*;
|
||||
use serde_derive::Deserialize;
|
||||
use serde_json::{self, json, Value};
|
||||
|
||||
use std::collections::{hash_map::Entry, HashMap};
|
||||
use std::iter::FromIterator;
|
||||
|
||||
use crate::{
|
||||
cache::Cache,
|
||||
config::Config,
|
||||
daemon::{self, extract_bitcoind_error, Daemon},
|
||||
merkle::Proof,
|
||||
metrics::Histogram,
|
||||
status::ScriptHashStatus,
|
||||
tracker::Tracker,
|
||||
types::ScriptHash,
|
||||
};
|
||||
|
||||
const ELECTRS_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
const PROTOCOL_VERSION: &str = "1.4";
|
||||
|
||||
const UNKNOWN_FEE: isize = -1; // (allowed by Electrum protocol)
|
||||
|
||||
/// Per-client Electrum protocol state
|
||||
#[derive(Default)]
|
||||
pub struct Client {
|
||||
tip: Option<BlockHash>,
|
||||
scripthashes: HashMap<ScriptHash, ScriptHashStatus>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Request {
|
||||
id: Value,
|
||||
method: String,
|
||||
|
||||
#[serde(default)]
|
||||
params: Value,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum Requests {
|
||||
Single(Request),
|
||||
Batch(Vec<Request>),
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, PartialEq, Eq)]
|
||||
#[serde(untagged)]
|
||||
enum Version {
|
||||
Single(String),
|
||||
Range(String, String),
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum TxGetArgs {
|
||||
Txid((Txid,)),
|
||||
TxidVerbose(Txid, bool),
|
||||
}
|
||||
|
||||
impl From<TxGetArgs> for (Txid, bool) {
|
||||
fn from(args: TxGetArgs) -> Self {
|
||||
match args {
|
||||
TxGetArgs::Txid((txid,)) => (txid, false),
|
||||
TxGetArgs::TxidVerbose(txid, verbose) => (txid, verbose),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum StandardError {
|
||||
ParseError,
|
||||
InvalidRequest,
|
||||
MethodNotFound,
|
||||
InvalidParams,
|
||||
}
|
||||
|
||||
enum RpcError {
|
||||
// JSON-RPC spec errors
|
||||
Standard(StandardError),
|
||||
// Electrum-specific errors
|
||||
BadRequest(anyhow::Error),
|
||||
DaemonError(daemon::RpcError),
|
||||
}
|
||||
|
||||
impl RpcError {
|
||||
fn to_value(&self) -> Value {
|
||||
match self {
|
||||
RpcError::Standard(err) => match err {
|
||||
StandardError::ParseError => json!({"code": -32700, "message": "parse error"}),
|
||||
StandardError::InvalidRequest => {
|
||||
json!({"code": -32600, "message": "invalid request"})
|
||||
}
|
||||
StandardError::MethodNotFound => {
|
||||
json!({"code": -32601, "message": "method not found"})
|
||||
}
|
||||
StandardError::InvalidParams => {
|
||||
json!({"code": -32602, "message": "invalid params"})
|
||||
}
|
||||
},
|
||||
RpcError::BadRequest(err) => json!({"code": 1, "message": err.to_string()}),
|
||||
RpcError::DaemonError(err) => json!({"code": 2, "message": err.message}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Electrum RPC handler
|
||||
pub struct Rpc {
|
||||
tracker: Tracker,
|
||||
cache: Cache,
|
||||
rpc_duration: Histogram,
|
||||
daemon: Daemon,
|
||||
banner: String,
|
||||
port: u16,
|
||||
}
|
||||
|
||||
impl Rpc {
|
||||
pub fn new(config: &Config, tracker: Tracker) -> Result<Self> {
|
||||
let rpc_duration =
|
||||
tracker
|
||||
.metrics()
|
||||
.histogram_vec("rpc_duration", "RPC duration (in seconds)", "method");
|
||||
Ok(Self {
|
||||
tracker,
|
||||
cache: Cache::default(),
|
||||
rpc_duration,
|
||||
daemon: Daemon::connect(config)?,
|
||||
banner: config.server_banner.clone(),
|
||||
port: config.electrum_rpc_addr.port(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn sync(&mut self) -> Result<()> {
|
||||
self.tracker.sync(&self.daemon)
|
||||
}
|
||||
|
||||
pub fn update_client(&self, client: &mut Client) -> Result<Vec<String>> {
|
||||
let chain = self.tracker.chain();
|
||||
let mut notifications = client
|
||||
.scripthashes
|
||||
.par_iter_mut()
|
||||
.filter_map(|(scripthash, status)| -> Option<Result<Value>> {
|
||||
match self
|
||||
.tracker
|
||||
.update_scripthash_status(status, &self.daemon, &self.cache)
|
||||
{
|
||||
Ok(true) => Some(Ok(notification(
|
||||
"blockchain.scripthash.subscribe",
|
||||
&[json!(scripthash), json!(status.statushash())],
|
||||
))),
|
||||
Ok(false) => None, // statushash is the same
|
||||
Err(e) => Some(Err(e)),
|
||||
}
|
||||
})
|
||||
.collect::<Result<Vec<Value>>>()
|
||||
.context("failed to update status")?;
|
||||
|
||||
if let Some(old_tip) = client.tip {
|
||||
let new_tip = self.tracker.chain().tip();
|
||||
if old_tip != new_tip {
|
||||
client.tip = Some(new_tip);
|
||||
let height = chain.height();
|
||||
let header = chain.get_block_header(height).unwrap();
|
||||
notifications.push(notification(
|
||||
"blockchain.headers.subscribe",
|
||||
&[json!({"hex": serialize(&header).to_hex(), "height": height})],
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(notifications.into_iter().map(|v| v.to_string()).collect())
|
||||
}
|
||||
|
||||
fn headers_subscribe(&self, client: &mut Client) -> Result<Value> {
|
||||
let chain = self.tracker.chain();
|
||||
client.tip = Some(chain.tip());
|
||||
let height = chain.height();
|
||||
let header = chain.get_block_header(height).unwrap();
|
||||
Ok(json!({"hex": serialize(header).to_hex(), "height": height}))
|
||||
}
|
||||
|
||||
fn block_header(&self, (height,): (usize,)) -> Result<Value> {
|
||||
let chain = self.tracker.chain();
|
||||
let header = match chain.get_block_header(height) {
|
||||
None => bail!("no header at {}", height),
|
||||
Some(header) => header,
|
||||
};
|
||||
Ok(json!(serialize(header).to_hex()))
|
||||
}
|
||||
|
||||
fn block_headers(&self, (start_height, count): (usize, usize)) -> Result<Value> {
|
||||
let chain = self.tracker.chain();
|
||||
let max_count = 2016usize;
|
||||
|
||||
let count = std::cmp::min(
|
||||
std::cmp::min(count, max_count),
|
||||
chain.height() - start_height + 1,
|
||||
);
|
||||
let heights = start_height..(start_height + count);
|
||||
let hex_headers = String::from_iter(
|
||||
heights.map(|height| serialize(chain.get_block_header(height).unwrap()).to_hex()),
|
||||
);
|
||||
|
||||
Ok(json!({"count": count, "hex": hex_headers, "max": max_count}))
|
||||
}
|
||||
|
||||
fn estimate_fee(&self, (nblocks,): (u16,)) -> Result<Value> {
|
||||
Ok(self
|
||||
.daemon
|
||||
.estimate_fee(nblocks)?
|
||||
.map(|fee_rate| json!(fee_rate.as_btc()))
|
||||
.unwrap_or_else(|| json!(UNKNOWN_FEE)))
|
||||
}
|
||||
|
||||
fn relayfee(&self) -> Result<Value> {
|
||||
Ok(json!(self.daemon.get_relay_fee()?.as_btc())) // [BTC/kB]
|
||||
}
|
||||
|
||||
fn scripthash_get_balance(
|
||||
&self,
|
||||
client: &Client,
|
||||
(scripthash,): (ScriptHash,),
|
||||
) -> Result<Value> {
|
||||
let balance = match client.scripthashes.get(&scripthash) {
|
||||
Some(status) => self.tracker.get_balance(&status),
|
||||
None => {
|
||||
warn!(
|
||||
"blockchain.scripthash.get_balance called for unsubscribed scripthash: {}",
|
||||
scripthash
|
||||
);
|
||||
self.tracker.get_balance(&self.new_status(scripthash)?)
|
||||
}
|
||||
};
|
||||
Ok(json!(balance))
|
||||
}
|
||||
|
||||
fn scripthash_get_history(
|
||||
&self,
|
||||
client: &Client,
|
||||
(scripthash,): (ScriptHash,),
|
||||
) -> Result<Value> {
|
||||
let history_entries = match client.scripthashes.get(&scripthash) {
|
||||
Some(status) => self.tracker.get_history(status),
|
||||
None => {
|
||||
warn!(
|
||||
"blockchain.scripthash.get_history called for unsubscribed scripthash: {}",
|
||||
scripthash
|
||||
);
|
||||
self.tracker.get_history(&self.new_status(scripthash)?)
|
||||
}
|
||||
};
|
||||
Ok(json!(history_entries))
|
||||
}
|
||||
|
||||
fn scripthash_list_unspent(
|
||||
&self,
|
||||
client: &Client,
|
||||
(scripthash,): (ScriptHash,),
|
||||
) -> Result<Value> {
|
||||
let unspent_entries = match client.scripthashes.get(&scripthash) {
|
||||
Some(status) => self.tracker.get_unspent(status),
|
||||
None => {
|
||||
warn!(
|
||||
"blockchain.scripthash.listunspent called for unsubscribed scripthash: {}",
|
||||
scripthash
|
||||
);
|
||||
self.tracker.get_unspent(&self.new_status(scripthash)?)
|
||||
}
|
||||
};
|
||||
Ok(json!(unspent_entries))
|
||||
}
|
||||
|
||||
fn scripthash_subscribe(
|
||||
&self,
|
||||
client: &mut Client,
|
||||
(scripthash,): (ScriptHash,),
|
||||
) -> Result<Value> {
|
||||
let result = match client.scripthashes.entry(scripthash) {
|
||||
Entry::Occupied(e) => e.get().statushash(),
|
||||
Entry::Vacant(e) => e.insert(self.new_status(scripthash)?).statushash(),
|
||||
};
|
||||
Ok(json!(result))
|
||||
}
|
||||
|
||||
fn new_status(&self, scripthash: ScriptHash) -> Result<ScriptHashStatus> {
|
||||
let mut status = ScriptHashStatus::new(scripthash);
|
||||
self.tracker
|
||||
.update_scripthash_status(&mut status, &self.daemon, &self.cache)?;
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
fn transaction_broadcast(&self, (tx_hex,): (String,)) -> Result<Value> {
|
||||
let tx_bytes = Vec::from_hex(&tx_hex).context("non-hex transaction")?;
|
||||
let tx = deserialize(&tx_bytes).context("invalid transaction")?;
|
||||
let txid = self.daemon.broadcast(&tx)?;
|
||||
Ok(json!(txid))
|
||||
}
|
||||
|
||||
fn transaction_get(&self, args: TxGetArgs) -> Result<Value> {
|
||||
let (txid, verbose) = args.into();
|
||||
if verbose {
|
||||
let blockhash = self.tracker.get_blockhash_by_txid(txid);
|
||||
return self.daemon.get_transaction_info(&txid, blockhash);
|
||||
}
|
||||
let cached = self.cache.get_tx(&txid, |tx| serialize(tx).to_hex());
|
||||
Ok(match cached {
|
||||
Some(tx_hex) => json!(tx_hex),
|
||||
None => {
|
||||
debug!("tx cache miss: {}", txid);
|
||||
let blockhash = self.tracker.get_blockhash_by_txid(txid);
|
||||
json!(self.daemon.get_transaction_hex(&txid, blockhash)?)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn transaction_get_merkle(&self, (txid, height): (Txid, usize)) -> Result<Value> {
|
||||
let chain = self.tracker.chain();
|
||||
let blockhash = match chain.get_block_hash(height) {
|
||||
None => bail!("missing block at {}", height),
|
||||
Some(blockhash) => blockhash,
|
||||
};
|
||||
let proof_to_value = |proof: &Proof| {
|
||||
json!({
|
||||
"block_height": height,
|
||||
"pos": proof.position(),
|
||||
"merkle": proof.to_hex(),
|
||||
})
|
||||
};
|
||||
if let Some(result) = self.cache.get_proof(blockhash, txid, proof_to_value) {
|
||||
return Ok(result);
|
||||
}
|
||||
debug!("txids cache miss: {}", blockhash);
|
||||
let txids = self.daemon.get_block_txids(blockhash)?;
|
||||
match txids.iter().position(|current_txid| *current_txid == txid) {
|
||||
None => bail!("missing txid {} in block {}", txid, blockhash),
|
||||
Some(position) => Ok(proof_to_value(&Proof::create(&txids, position))),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_fee_histogram(&self) -> Result<Value> {
|
||||
Ok(json!(self.tracker.fees_histogram()))
|
||||
}
|
||||
|
||||
fn server_id(&self) -> String {
|
||||
format!("electrs/{}", ELECTRS_VERSION)
|
||||
}
|
||||
|
||||
fn version(&self, (client_id, client_version): (String, Version)) -> Result<Value> {
|
||||
match client_version {
|
||||
Version::Single(v) if v == PROTOCOL_VERSION => {
|
||||
Ok(json!([self.server_id(), PROTOCOL_VERSION]))
|
||||
}
|
||||
_ => {
|
||||
bail!(
|
||||
"{} requested {:?}, server supports {}",
|
||||
client_id,
|
||||
client_version,
|
||||
PROTOCOL_VERSION
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn features(&self) -> Result<Value> {
|
||||
Ok(json!({
|
||||
"genesis_hash": self.tracker.chain().get_block_hash(0),
|
||||
"hosts": { "tcp_port": self.port },
|
||||
"protocol_max": PROTOCOL_VERSION,
|
||||
"protocol_min": PROTOCOL_VERSION,
|
||||
"pruning": null,
|
||||
"server_version": self.server_id(),
|
||||
"hash_function": "sha256"
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn handle_request(&self, client: &mut Client, line: &str) -> String {
|
||||
let error_msg_no_id = |err| error_msg(Value::Null, RpcError::Standard(err));
|
||||
let response: Value = match serde_json::from_str(line) {
|
||||
// parse JSON from str
|
||||
Ok(value) => match serde_json::from_value(value) {
|
||||
// parse RPC from JSON
|
||||
Ok(requests) => match requests {
|
||||
Requests::Single(request) => self.call(client, request),
|
||||
Requests::Batch(requests) => json!(requests
|
||||
.into_iter()
|
||||
.map(|request| self.call(client, request))
|
||||
.collect::<Vec<Value>>()),
|
||||
},
|
||||
Err(err) => {
|
||||
warn!("invalid RPC request ({:?}): {}", line, err);
|
||||
error_msg_no_id(StandardError::InvalidRequest)
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
warn!("invalid JSON ({:?}): {}", line, err);
|
||||
error_msg_no_id(StandardError::ParseError)
|
||||
}
|
||||
};
|
||||
response.to_string()
|
||||
}
|
||||
|
||||
fn call(&self, client: &mut Client, request: Request) -> Value {
|
||||
let Request { id, method, params } = request;
|
||||
let call = match Call::parse(&method, params) {
|
||||
Ok(call) => call,
|
||||
Err(err) => return error_msg(id, RpcError::Standard(err)),
|
||||
};
|
||||
self.rpc_duration.observe_duration(&method, || {
|
||||
let result = match call {
|
||||
Call::Banner => Ok(json!(self.banner)),
|
||||
Call::BlockHeader(args) => self.block_header(args),
|
||||
Call::BlockHeaders(args) => self.block_headers(args),
|
||||
Call::Donation => Ok(Value::Null),
|
||||
Call::EstimateFee(args) => self.estimate_fee(args),
|
||||
Call::Features => self.features(),
|
||||
Call::HeadersSubscribe => self.headers_subscribe(client),
|
||||
Call::MempoolFeeHistogram => self.get_fee_histogram(),
|
||||
Call::PeersSubscribe => Ok(json!([])),
|
||||
Call::Ping => Ok(Value::Null),
|
||||
Call::RelayFee => self.relayfee(),
|
||||
Call::ScriptHashGetBalance(args) => self.scripthash_get_balance(client, args),
|
||||
Call::ScriptHashGetHistory(args) => self.scripthash_get_history(client, args),
|
||||
Call::ScriptHashListUnspent(args) => self.scripthash_list_unspent(client, args),
|
||||
Call::ScriptHashSubscribe(args) => self.scripthash_subscribe(client, args),
|
||||
Call::TransactionBroadcast(args) => self.transaction_broadcast(args),
|
||||
Call::TransactionGet(args) => self.transaction_get(args),
|
||||
Call::TransactionGetMerkle(args) => self.transaction_get_merkle(args),
|
||||
Call::Version(args) => self.version(args),
|
||||
};
|
||||
match result {
|
||||
Ok(value) => result_msg(id, value),
|
||||
Err(err) => {
|
||||
warn!("RPC {} failed: {:#}", method, err);
|
||||
match err
|
||||
.downcast_ref::<bitcoincore_rpc::Error>()
|
||||
.and_then(extract_bitcoind_error)
|
||||
{
|
||||
Some(e) => error_msg(id, RpcError::DaemonError(e.clone())),
|
||||
None => error_msg(id, RpcError::BadRequest(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
enum Call {
|
||||
Banner,
|
||||
BlockHeader((usize,)),
|
||||
BlockHeaders((usize, usize)),
|
||||
TransactionBroadcast((String,)),
|
||||
Donation,
|
||||
EstimateFee((u16,)),
|
||||
Features,
|
||||
HeadersSubscribe,
|
||||
MempoolFeeHistogram,
|
||||
PeersSubscribe,
|
||||
Ping,
|
||||
RelayFee,
|
||||
ScriptHashGetBalance((ScriptHash,)),
|
||||
ScriptHashGetHistory((ScriptHash,)),
|
||||
ScriptHashListUnspent((ScriptHash,)),
|
||||
ScriptHashSubscribe((ScriptHash,)),
|
||||
TransactionGet(TxGetArgs),
|
||||
TransactionGetMerkle((Txid, usize)),
|
||||
Version((String, Version)),
|
||||
}
|
||||
|
||||
impl Call {
|
||||
fn parse(method: &str, params: Value) -> std::result::Result<Call, StandardError> {
|
||||
Ok(match method {
|
||||
"blockchain.block.header" => Call::BlockHeader(convert(params)?),
|
||||
"blockchain.block.headers" => Call::BlockHeaders(convert(params)?),
|
||||
"blockchain.estimatefee" => Call::EstimateFee(convert(params)?),
|
||||
"blockchain.headers.subscribe" => Call::HeadersSubscribe,
|
||||
"blockchain.relayfee" => Call::RelayFee,
|
||||
"blockchain.scripthash.get_balance" => Call::ScriptHashGetBalance(convert(params)?),
|
||||
"blockchain.scripthash.get_history" => Call::ScriptHashGetHistory(convert(params)?),
|
||||
"blockchain.scripthash.listunspent" => Call::ScriptHashListUnspent(convert(params)?),
|
||||
"blockchain.scripthash.subscribe" => Call::ScriptHashSubscribe(convert(params)?),
|
||||
"blockchain.transaction.broadcast" => Call::TransactionBroadcast(convert(params)?),
|
||||
"blockchain.transaction.get" => Call::TransactionGet(convert(params)?),
|
||||
"blockchain.transaction.get_merkle" => Call::TransactionGetMerkle(convert(params)?),
|
||||
"mempool.get_fee_histogram" => Call::MempoolFeeHistogram,
|
||||
"server.banner" => Call::Banner,
|
||||
"server.donation_address" => Call::Donation,
|
||||
"server.features" => Call::Features,
|
||||
"server.peers.subscribe" => Call::PeersSubscribe,
|
||||
"server.ping" => Call::Ping,
|
||||
"server.version" => Call::Version(convert(params)?),
|
||||
_ => {
|
||||
warn!("unknown method {}", method);
|
||||
return Err(StandardError::MethodNotFound);
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn convert<T>(params: Value) -> std::result::Result<T, StandardError>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let params_str = params.to_string();
|
||||
serde_json::from_value(params).map_err(|err| {
|
||||
warn!("invalid params {}: {}", params_str, err);
|
||||
StandardError::InvalidParams
|
||||
})
|
||||
}
|
||||
|
||||
fn notification(method: &str, params: &[Value]) -> Value {
|
||||
json!({"jsonrpc": "2.0", "method": method, "params": params})
|
||||
}
|
||||
|
||||
fn result_msg(id: Value, result: Value) -> Value {
|
||||
json!({"jsonrpc": "2.0", "id": id, "result": result})
|
||||
}
|
||||
|
||||
fn error_msg(id: Value, error: RpcError) -> Value {
|
||||
json!({"jsonrpc": "2.0", "id": id, "error": error.to_value()})
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
use serde_json::Value;
|
||||
|
||||
error_chain! {
|
||||
types {
|
||||
Error, ErrorKind, ResultExt, Result;
|
||||
}
|
||||
|
||||
errors {
|
||||
Daemon(method: String, err: Value) {
|
||||
description("RPC error")
|
||||
display("{} RPC error: {}", method, err)
|
||||
}
|
||||
|
||||
Connection(msg: String) {
|
||||
description("Connection error")
|
||||
display("Connection error: {}", msg)
|
||||
}
|
||||
|
||||
Interrupt(sig: i32) {
|
||||
description("Interruption by external signal")
|
||||
display("Interrupted by signal {}", sig)
|
||||
}
|
||||
|
||||
MethodNotFound(method: String) {
|
||||
description("method not found")
|
||||
display("method not found '{}'", method)
|
||||
}
|
||||
|
||||
InvalidRequest(message: &'static str) {
|
||||
description("invalid request")
|
||||
display("invalid request: {}", message)
|
||||
}
|
||||
|
||||
ParseError {
|
||||
description("parse error")
|
||||
display("parse error")
|
||||
}
|
||||
}
|
||||
}
|
37
src/fake.rs
37
src/fake.rs
@ -1,37 +0,0 @@
|
||||
use crate::store::{ReadStore, Row, WriteStore};
|
||||
use crate::util::Bytes;
|
||||
|
||||
pub struct FakeStore;
|
||||
|
||||
impl ReadStore for FakeStore {
|
||||
fn get(&self, _key: &[u8]) -> Option<Bytes> {
|
||||
None
|
||||
}
|
||||
fn scan(&self, _prefix: &[u8]) -> Vec<Row> {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
impl WriteStore for FakeStore {
|
||||
fn write<I: IntoIterator<Item = Row>>(&self, _rows: I) {}
|
||||
fn flush(&self) {}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn test_fakestore() {
|
||||
use crate::fake;
|
||||
use crate::store::{ReadStore, Row, WriteStore};
|
||||
|
||||
let store = fake::FakeStore {};
|
||||
store.write(vec![Row {
|
||||
key: b"k".to_vec(),
|
||||
value: b"v".to_vec(),
|
||||
}]);
|
||||
store.flush();
|
||||
// nothing was actually written
|
||||
assert!(store.get(b"").is_none());
|
||||
assert!(store.scan(b"").is_empty());
|
||||
}
|
||||
}
|
595
src/index.rs
595
src/index.rs
@ -1,437 +1,234 @@
|
||||
use bitcoin::blockdata::block::{Block, BlockHeader};
|
||||
use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut};
|
||||
use bitcoin::consensus::encode::{deserialize, serialize};
|
||||
use bitcoin::hash_types::{BlockHash, Txid};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::iter::FromIterator;
|
||||
use std::sync::RwLock;
|
||||
use anyhow::Result;
|
||||
use bitcoin::consensus::{deserialize, serialize};
|
||||
use bitcoin::{Block, BlockHash, OutPoint, Txid};
|
||||
|
||||
use crate::daemon::Daemon;
|
||||
use crate::errors::*;
|
||||
use crate::metrics::{
|
||||
Counter, Gauge, HistogramOpts, HistogramTimer, HistogramVec, MetricOpts, Metrics,
|
||||
};
|
||||
use crate::signal::Waiter;
|
||||
use crate::store::{ReadStore, Row, WriteStore};
|
||||
use crate::util::{
|
||||
full_hash, hash_prefix, spawn_thread, Bytes, FullHash, HashPrefix, HeaderEntry, HeaderList,
|
||||
HeaderMap, SyncChannel, HASH_PREFIX_LEN,
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::{
|
||||
chain::Chain,
|
||||
daemon::Daemon,
|
||||
db::{DBStore, Row, WriteBatch},
|
||||
metrics::{Histogram, Metrics},
|
||||
types::{HeaderRow, ScriptHash, ScriptHashRow, SpendingPrefixRow, TxidRow},
|
||||
};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct TxInKey {
|
||||
pub code: u8,
|
||||
pub prev_hash_prefix: HashPrefix,
|
||||
pub prev_index: u16,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct TxInRow {
|
||||
key: TxInKey,
|
||||
pub txid_prefix: HashPrefix,
|
||||
}
|
||||
|
||||
impl TxInRow {
|
||||
pub fn new(txid: &Txid, input: &TxIn) -> TxInRow {
|
||||
TxInRow {
|
||||
key: TxInKey {
|
||||
code: b'I',
|
||||
prev_hash_prefix: hash_prefix(&input.previous_output.txid[..]),
|
||||
prev_index: input.previous_output.vout as u16,
|
||||
},
|
||||
txid_prefix: hash_prefix(&txid[..]),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn filter(txid: &Txid, output_index: usize) -> Bytes {
|
||||
bincode::serialize(&TxInKey {
|
||||
code: b'I',
|
||||
prev_hash_prefix: hash_prefix(&txid[..]),
|
||||
prev_index: output_index as u16,
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn to_row(&self) -> Row {
|
||||
Row {
|
||||
key: bincode::serialize(&self).unwrap(),
|
||||
value: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_row(row: &Row) -> TxInRow {
|
||||
bincode::deserialize(&row.key).expect("failed to parse TxInRow")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct TxOutKey {
|
||||
code: u8,
|
||||
script_hash_prefix: HashPrefix,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct TxOutRow {
|
||||
key: TxOutKey,
|
||||
pub txid_prefix: HashPrefix,
|
||||
}
|
||||
|
||||
impl TxOutRow {
|
||||
pub fn new(txid: &Txid, output: &TxOut) -> TxOutRow {
|
||||
TxOutRow {
|
||||
key: TxOutKey {
|
||||
code: b'O',
|
||||
script_hash_prefix: hash_prefix(&compute_script_hash(&output.script_pubkey[..])),
|
||||
},
|
||||
txid_prefix: hash_prefix(&txid[..]),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn filter(script_hash: &[u8]) -> Bytes {
|
||||
bincode::serialize(&TxOutKey {
|
||||
code: b'O',
|
||||
script_hash_prefix: hash_prefix(&script_hash[..HASH_PREFIX_LEN]),
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn to_row(&self) -> Row {
|
||||
Row {
|
||||
key: bincode::serialize(&self).unwrap(),
|
||||
value: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_row(row: &Row) -> TxOutRow {
|
||||
bincode::deserialize(&row.key).expect("failed to parse TxOutRow")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct TxKey {
|
||||
code: u8,
|
||||
pub txid: FullHash,
|
||||
}
|
||||
|
||||
pub struct TxRow {
|
||||
pub key: TxKey,
|
||||
pub height: u32, // value
|
||||
}
|
||||
|
||||
impl TxRow {
|
||||
pub fn new(txid: &Txid, height: u32) -> TxRow {
|
||||
TxRow {
|
||||
key: TxKey {
|
||||
code: b'T',
|
||||
txid: full_hash(&txid[..]),
|
||||
},
|
||||
height,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn filter_prefix(txid_prefix: HashPrefix) -> Bytes {
|
||||
[b"T", &txid_prefix[..]].concat()
|
||||
}
|
||||
|
||||
pub fn filter_full(txid: &Txid) -> Bytes {
|
||||
[b"T", &txid[..]].concat()
|
||||
}
|
||||
|
||||
pub fn to_row(&self) -> Row {
|
||||
Row {
|
||||
key: bincode::serialize(&self.key).unwrap(),
|
||||
value: bincode::serialize(&self.height).unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_row(row: &Row) -> TxRow {
|
||||
TxRow {
|
||||
key: bincode::deserialize(&row.key).expect("failed to parse TxKey"),
|
||||
height: bincode::deserialize(&row.value).expect("failed to parse height"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct BlockKey {
|
||||
code: u8,
|
||||
hash: FullHash,
|
||||
}
|
||||
|
||||
pub fn compute_script_hash(data: &[u8]) -> FullHash {
|
||||
let mut sha2 = Sha256::new();
|
||||
sha2.update(data);
|
||||
sha2.finalize().into()
|
||||
}
|
||||
|
||||
pub fn index_transaction<'a>(
|
||||
txn: &'a Transaction,
|
||||
height: usize,
|
||||
) -> impl 'a + Iterator<Item = Row> {
|
||||
let null_hash = Txid::default();
|
||||
let txid = txn.txid();
|
||||
|
||||
let inputs = txn.input.iter().filter_map(move |input| {
|
||||
if input.previous_output.txid == null_hash {
|
||||
None
|
||||
} else {
|
||||
Some(TxInRow::new(&txid, input).to_row())
|
||||
}
|
||||
});
|
||||
let outputs = txn
|
||||
.output
|
||||
.iter()
|
||||
.map(move |output| TxOutRow::new(&txid, output).to_row());
|
||||
|
||||
// Persist transaction ID and confirmed height
|
||||
inputs
|
||||
.chain(outputs)
|
||||
.chain(std::iter::once(TxRow::new(&txid, height as u32).to_row()))
|
||||
}
|
||||
|
||||
pub fn index_block<'a>(block: &'a Block, height: usize) -> impl 'a + Iterator<Item = Row> {
|
||||
let blockhash = block.block_hash();
|
||||
// Persist block hash and header
|
||||
let row = Row {
|
||||
key: bincode::serialize(&BlockKey {
|
||||
code: b'B',
|
||||
hash: full_hash(&blockhash[..]),
|
||||
})
|
||||
.unwrap(),
|
||||
value: serialize(&block.header),
|
||||
};
|
||||
block
|
||||
.txdata
|
||||
.iter()
|
||||
.flat_map(move |txn| index_transaction(txn, height))
|
||||
.chain(std::iter::once(row))
|
||||
}
|
||||
|
||||
pub fn last_indexed_block(blockhash: &BlockHash) -> Row {
|
||||
// Store last indexed block (i.e. all previous blocks were indexed)
|
||||
Row {
|
||||
key: b"L".to_vec(),
|
||||
value: serialize(blockhash),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_indexed_blockhashes(store: &dyn ReadStore) -> HashSet<BlockHash> {
|
||||
let mut result = HashSet::new();
|
||||
for row in store.scan(b"B") {
|
||||
let key: BlockKey = bincode::deserialize(&row.key).unwrap();
|
||||
result.insert(deserialize(&key.hash).unwrap());
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn read_indexed_headers(store: &dyn ReadStore) -> HeaderList {
|
||||
let latest_blockhash: BlockHash = match store.get(b"L") {
|
||||
// latest blockheader persisted in the DB.
|
||||
Some(row) => deserialize(&row).unwrap(),
|
||||
None => BlockHash::default(),
|
||||
};
|
||||
trace!("latest indexed blockhash: {}", latest_blockhash);
|
||||
let mut map = HeaderMap::new();
|
||||
for row in store.scan(b"B") {
|
||||
let key: BlockKey = bincode::deserialize(&row.key).unwrap();
|
||||
let header: BlockHeader = deserialize(&row.value).unwrap();
|
||||
map.insert(deserialize(&key.hash).unwrap(), header);
|
||||
}
|
||||
let mut headers = vec![];
|
||||
let null_hash = BlockHash::default();
|
||||
let mut blockhash = latest_blockhash;
|
||||
while blockhash != null_hash {
|
||||
let header = map
|
||||
.remove(&blockhash)
|
||||
.unwrap_or_else(|| panic!("missing {} header in DB", blockhash));
|
||||
blockhash = header.prev_blockhash;
|
||||
headers.push(header);
|
||||
}
|
||||
headers.reverse();
|
||||
assert_eq!(
|
||||
headers
|
||||
.first()
|
||||
.map(|h| h.prev_blockhash)
|
||||
.unwrap_or(null_hash),
|
||||
null_hash
|
||||
);
|
||||
assert_eq!(
|
||||
headers
|
||||
.last()
|
||||
.map(BlockHeader::block_hash)
|
||||
.unwrap_or(null_hash),
|
||||
latest_blockhash
|
||||
);
|
||||
let mut result = HeaderList::empty();
|
||||
let entries = result.order(headers);
|
||||
result.apply(entries, latest_blockhash);
|
||||
result
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Stats {
|
||||
blocks: Counter,
|
||||
txns: Counter,
|
||||
vsize: Counter,
|
||||
height: Gauge,
|
||||
duration: HistogramVec,
|
||||
update_duration: Histogram,
|
||||
update_size: Histogram,
|
||||
lookup_duration: Histogram,
|
||||
}
|
||||
|
||||
impl Stats {
|
||||
fn new(metrics: &Metrics) -> Stats {
|
||||
Stats {
|
||||
blocks: metrics.counter(MetricOpts::new(
|
||||
"electrs_index_blocks",
|
||||
"# of indexed blocks",
|
||||
)),
|
||||
txns: metrics.counter(MetricOpts::new(
|
||||
"electrs_index_txns",
|
||||
"# of indexed transactions",
|
||||
)),
|
||||
vsize: metrics.counter(MetricOpts::new(
|
||||
"electrs_index_vsize",
|
||||
"# of indexed vbytes",
|
||||
)),
|
||||
height: metrics.gauge(MetricOpts::new(
|
||||
"electrs_index_height",
|
||||
"Last indexed block's height",
|
||||
)),
|
||||
duration: metrics.histogram_vec(
|
||||
HistogramOpts::new("electrs_index_duration", "indexing duration (in seconds)"),
|
||||
&["step"],
|
||||
fn new(metrics: &Metrics) -> Self {
|
||||
Self {
|
||||
update_duration: metrics.histogram_vec(
|
||||
"index_update_duration",
|
||||
"Index update duration (in seconds)",
|
||||
"step",
|
||||
),
|
||||
update_size: metrics.histogram_vec(
|
||||
"index_update_size",
|
||||
"Index update size (in bytes)",
|
||||
"step",
|
||||
),
|
||||
lookup_duration: metrics.histogram_vec(
|
||||
"index_lookup_duration",
|
||||
"Index lookup duration (in seconds)",
|
||||
"step",
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn update(&self, block: &Block, height: usize) {
|
||||
self.blocks.inc();
|
||||
self.txns.inc_by(block.txdata.len() as i64);
|
||||
for tx in &block.txdata {
|
||||
self.vsize.inc_by(tx.get_weight() as i64 / 4);
|
||||
}
|
||||
self.update_height(height);
|
||||
fn observe_size(&self, label: &str, rows: &[Row]) {
|
||||
self.update_size.observe(label, db_rows_size(rows));
|
||||
}
|
||||
|
||||
fn update_height(&self, height: usize) {
|
||||
self.height.set(height as i64);
|
||||
}
|
||||
|
||||
fn start_timer(&self, step: &str) -> HistogramTimer {
|
||||
self.duration.with_label_values(&[step]).start_timer()
|
||||
fn report_stats(&self, batch: &WriteBatch) {
|
||||
self.observe_size("write_funding_rows", &batch.funding_rows);
|
||||
self.observe_size("write_spending_rows", &batch.spending_rows);
|
||||
self.observe_size("write_txid_rows", &batch.txid_rows);
|
||||
self.observe_size("write_header_rows", &batch.header_rows);
|
||||
debug!(
|
||||
"writing {} funding and {} spending rows from {} transactions, {} blocks",
|
||||
batch.funding_rows.len(),
|
||||
batch.spending_rows.len(),
|
||||
batch.txid_rows.len(),
|
||||
batch.header_rows.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
struct IndexResult {
|
||||
header_row: HeaderRow,
|
||||
funding_rows: Vec<ScriptHashRow>,
|
||||
spending_rows: Vec<SpendingPrefixRow>,
|
||||
txid_rows: Vec<TxidRow>,
|
||||
}
|
||||
|
||||
impl IndexResult {
|
||||
fn extend(&self, batch: &mut WriteBatch) {
|
||||
let funding_rows = self.funding_rows.iter().map(ScriptHashRow::to_db_row);
|
||||
batch.funding_rows.extend(funding_rows);
|
||||
|
||||
let spending_rows = self.spending_rows.iter().map(SpendingPrefixRow::to_db_row);
|
||||
batch.spending_rows.extend(spending_rows);
|
||||
|
||||
let txid_rows = self.txid_rows.iter().map(TxidRow::to_db_row);
|
||||
batch.txid_rows.extend(txid_rows);
|
||||
|
||||
batch.header_rows.push(self.header_row.to_db_row());
|
||||
batch.tip_row = serialize(&self.header_row.header.block_hash()).into_boxed_slice();
|
||||
}
|
||||
}
|
||||
|
||||
/// Confirmed transactions' address index
|
||||
pub struct Index {
|
||||
// TODO: store also latest snapshot.
|
||||
headers: RwLock<HeaderList>,
|
||||
daemon: Daemon,
|
||||
store: DBStore,
|
||||
lookup_limit: Option<usize>,
|
||||
chain: Chain,
|
||||
stats: Stats,
|
||||
batch_size: usize,
|
||||
}
|
||||
|
||||
impl Index {
|
||||
pub fn load(
|
||||
store: &dyn ReadStore,
|
||||
daemon: &Daemon,
|
||||
pub(crate) fn load(
|
||||
store: DBStore,
|
||||
mut chain: Chain,
|
||||
metrics: &Metrics,
|
||||
batch_size: usize,
|
||||
) -> Result<Index> {
|
||||
let stats = Stats::new(metrics);
|
||||
let headers = read_indexed_headers(store);
|
||||
stats.height.set((headers.len() as i64) - 1);
|
||||
lookup_limit: Option<usize>,
|
||||
) -> Result<Self> {
|
||||
if let Some(row) = store.get_tip() {
|
||||
let tip = deserialize(&row).expect("invalid tip");
|
||||
let headers = store
|
||||
.read_headers()
|
||||
.into_iter()
|
||||
.map(|row| HeaderRow::from_db_row(&row).header)
|
||||
.collect();
|
||||
chain.load(headers, tip);
|
||||
};
|
||||
|
||||
Ok(Index {
|
||||
headers: RwLock::new(headers),
|
||||
daemon: daemon.reconnect()?,
|
||||
stats,
|
||||
batch_size,
|
||||
store,
|
||||
lookup_limit,
|
||||
chain,
|
||||
stats: Stats::new(metrics),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn reload(&self, store: &dyn ReadStore) {
|
||||
let mut headers = self.headers.write().unwrap();
|
||||
*headers = read_indexed_headers(store);
|
||||
pub(crate) fn chain(&self) -> &Chain {
|
||||
&self.chain
|
||||
}
|
||||
|
||||
pub fn best_header(&self) -> Option<HeaderEntry> {
|
||||
let headers = self.headers.read().unwrap();
|
||||
headers.header_by_blockhash(&headers.tip()).cloned()
|
||||
}
|
||||
|
||||
pub fn get_header(&self, height: usize) -> Option<HeaderEntry> {
|
||||
self.headers
|
||||
.read()
|
||||
.unwrap()
|
||||
.header_by_height(height)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
pub fn update(&self, store: &impl WriteStore, waiter: &Waiter) -> Result<BlockHash> {
|
||||
let daemon = self.daemon.reconnect()?;
|
||||
let tip = daemon.getbestblockhash()?;
|
||||
let new_headers: Vec<HeaderEntry> = {
|
||||
let indexed_headers = self.headers.read().unwrap();
|
||||
indexed_headers.order(daemon.get_new_headers(&indexed_headers, &tip)?)
|
||||
pub(crate) fn limit_result<T>(&self, entries: impl Iterator<Item = T>) -> Result<Vec<T>> {
|
||||
let mut entries = entries.fuse();
|
||||
let result: Vec<T> = match self.lookup_limit {
|
||||
Some(lookup_limit) => entries.by_ref().take(lookup_limit).collect(),
|
||||
None => entries.by_ref().collect(),
|
||||
};
|
||||
if let Some(latest_header) = new_headers.last() {
|
||||
info!("{:?} ({} left to index)", latest_header, new_headers.len());
|
||||
};
|
||||
let height_map = HashMap::<BlockHash, usize>::from_iter(
|
||||
new_headers.iter().map(|h| (*h.hash(), h.height())),
|
||||
);
|
||||
if entries.next().is_some() {
|
||||
bail!(">{} index entries, query may take too long", result.len())
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
let chan = SyncChannel::new(1);
|
||||
let sender = chan.sender();
|
||||
let blockhashes: Vec<BlockHash> = new_headers.iter().map(|h| *h.hash()).collect();
|
||||
let batch_size = self.batch_size;
|
||||
let fetcher = spawn_thread("fetcher", move || {
|
||||
for blockhashes_chunk in blockhashes.chunks(batch_size) {
|
||||
let blocks = blockhashes_chunk
|
||||
.iter()
|
||||
.map(|blockhash| daemon.getblock(blockhash))
|
||||
.collect();
|
||||
sender
|
||||
.send(blocks)
|
||||
.expect("failed sending blocks to be indexed");
|
||||
}
|
||||
sender
|
||||
.send(Ok(vec![]))
|
||||
.expect("failed sending explicit end of stream");
|
||||
});
|
||||
pub(crate) fn filter_by_txid(&self, txid: Txid) -> impl Iterator<Item = BlockHash> + '_ {
|
||||
self.store
|
||||
.iter_txid(TxidRow::scan_prefix(txid))
|
||||
.map(|row| TxidRow::from_db_row(&row).height())
|
||||
.filter_map(move |height| self.chain.get_block_hash(height))
|
||||
}
|
||||
|
||||
pub(crate) fn filter_by_funding(
|
||||
&self,
|
||||
scripthash: ScriptHash,
|
||||
) -> impl Iterator<Item = BlockHash> + '_ {
|
||||
self.store
|
||||
.iter_funding(ScriptHashRow::scan_prefix(scripthash))
|
||||
.map(|row| ScriptHashRow::from_db_row(&row).height())
|
||||
.filter_map(move |height| self.chain.get_block_hash(height))
|
||||
}
|
||||
|
||||
pub(crate) fn filter_by_spending(
|
||||
&self,
|
||||
outpoint: OutPoint,
|
||||
) -> impl Iterator<Item = BlockHash> + '_ {
|
||||
self.store
|
||||
.iter_spending(SpendingPrefixRow::scan_prefix(outpoint))
|
||||
.map(|row| SpendingPrefixRow::from_db_row(&row).height())
|
||||
.filter_map(move |height| self.chain.get_block_hash(height))
|
||||
}
|
||||
|
||||
pub(crate) fn sync(&mut self, daemon: &Daemon, chunk_size: usize) -> Result<()> {
|
||||
loop {
|
||||
waiter.poll()?;
|
||||
let timer = self.stats.start_timer("fetch");
|
||||
let batch = chan
|
||||
.receiver()
|
||||
.recv()
|
||||
.expect("block fetch exited prematurely")?;
|
||||
timer.observe_duration();
|
||||
if batch.is_empty() {
|
||||
let new_headers = daemon.get_new_headers(&self.chain)?;
|
||||
if new_headers.is_empty() {
|
||||
break;
|
||||
}
|
||||
info!(
|
||||
"indexing {} blocks: [{}..{}]",
|
||||
new_headers.len(),
|
||||
new_headers.first().unwrap().height(),
|
||||
new_headers.last().unwrap().height()
|
||||
);
|
||||
for chunk in new_headers.chunks(chunk_size) {
|
||||
let blockhashes: Vec<BlockHash> = chunk.iter().map(|h| h.hash()).collect();
|
||||
let mut heights_map: HashMap<BlockHash, usize> =
|
||||
chunk.iter().map(|h| (h.hash(), h.height())).collect();
|
||||
|
||||
let rows_iter = batch.iter().flat_map(|block| {
|
||||
let blockhash = block.block_hash();
|
||||
let height = *height_map
|
||||
.get(&blockhash)
|
||||
.unwrap_or_else(|| panic!("missing header for block {}", blockhash));
|
||||
let mut batch = WriteBatch::default();
|
||||
|
||||
self.stats.update(block, height); // TODO: update stats after the block is indexed
|
||||
index_block(block, height).chain(std::iter::once(last_indexed_block(&blockhash)))
|
||||
});
|
||||
|
||||
let timer = self.stats.start_timer("index+write");
|
||||
store.write(rows_iter);
|
||||
timer.observe_duration();
|
||||
daemon.for_blocks(blockhashes, |blockhash, block| {
|
||||
let height = heights_map.remove(&blockhash).expect("unexpected block");
|
||||
let result = index_single_block(block, height);
|
||||
result.extend(&mut batch);
|
||||
})?;
|
||||
assert!(heights_map.is_empty(), "some blocks were not indexed");
|
||||
batch.sort();
|
||||
self.stats.report_stats(&batch);
|
||||
self.store.write(batch);
|
||||
}
|
||||
self.chain.update(new_headers);
|
||||
}
|
||||
let timer = self.stats.start_timer("flush");
|
||||
store.flush(); // make sure no row is left behind
|
||||
timer.observe_duration();
|
||||
|
||||
fetcher.join().expect("block fetcher failed");
|
||||
self.headers.write().unwrap().apply(new_headers, tip);
|
||||
assert_eq!(tip, self.headers.read().unwrap().tip());
|
||||
self.stats
|
||||
.update_height(self.headers.read().unwrap().len() - 1);
|
||||
Ok(tip)
|
||||
self.store.flush();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn db_rows_size(rows: &[Row]) -> usize {
|
||||
rows.iter().map(|key| key.len()).sum()
|
||||
}
|
||||
|
||||
fn index_single_block(block: Block, height: usize) -> IndexResult {
|
||||
let mut funding_rows = vec![];
|
||||
let mut spending_rows = vec![];
|
||||
let mut txid_rows = Vec::with_capacity(block.txdata.len());
|
||||
|
||||
for tx in &block.txdata {
|
||||
txid_rows.push(TxidRow::new(tx.txid(), height));
|
||||
|
||||
funding_rows.extend(
|
||||
tx.output
|
||||
.iter()
|
||||
.filter(|txo| !txo.script_pubkey.is_provably_unspendable())
|
||||
.map(|txo| {
|
||||
let scripthash = ScriptHash::new(&txo.script_pubkey);
|
||||
ScriptHashRow::new(scripthash, height)
|
||||
}),
|
||||
);
|
||||
|
||||
if tx.is_coin_base() {
|
||||
continue; // coinbase doesn't have inputs
|
||||
}
|
||||
spending_rows.extend(
|
||||
tx.input
|
||||
.iter()
|
||||
.map(|txin| SpendingPrefixRow::new(txin.previous_output, height)),
|
||||
);
|
||||
}
|
||||
IndexResult {
|
||||
funding_rows,
|
||||
spending_rows,
|
||||
txid_rows,
|
||||
header_row: HeaderRow::new(block.header),
|
||||
}
|
||||
}
|
||||
|
52
src/lib.rs
52
src/lib.rs
@ -1,28 +1,38 @@
|
||||
#![recursion_limit = "1024"]
|
||||
|
||||
#[macro_use]
|
||||
extern crate error_chain;
|
||||
extern crate anyhow;
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
#[macro_use]
|
||||
extern crate serde_json;
|
||||
// I really don't know why it fails without this line
|
||||
|
||||
extern crate configure_me;
|
||||
|
||||
pub mod app;
|
||||
pub mod bulk;
|
||||
pub mod cache;
|
||||
pub mod config;
|
||||
pub mod daemon;
|
||||
pub mod errors;
|
||||
pub mod fake;
|
||||
pub mod index;
|
||||
pub mod mempool;
|
||||
pub mod metrics;
|
||||
pub mod query;
|
||||
pub mod rpc;
|
||||
pub mod signal;
|
||||
pub mod store;
|
||||
pub mod util;
|
||||
mod cache;
|
||||
mod chain;
|
||||
mod config;
|
||||
mod daemon;
|
||||
mod db;
|
||||
mod electrum;
|
||||
mod index;
|
||||
mod mempool;
|
||||
mod merkle;
|
||||
mod metrics;
|
||||
mod p2p;
|
||||
pub mod server;
|
||||
mod signals;
|
||||
mod status;
|
||||
mod thread;
|
||||
mod tracker;
|
||||
mod types;
|
||||
|
||||
pub use {
|
||||
cache::Cache,
|
||||
config::Config,
|
||||
daemon::Daemon,
|
||||
electrum::{Client, Rpc},
|
||||
status::ScriptHashStatus,
|
||||
tracker::Tracker,
|
||||
types::ScriptHash,
|
||||
};
|
||||
|
450
src/mempool.rs
450
src/mempool.rs
@ -1,306 +1,230 @@
|
||||
use bitcoin::blockdata::transaction::Transaction;
|
||||
use bitcoin::hash_types::Txid;
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use anyhow::Result;
|
||||
|
||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
use std::convert::TryFrom;
|
||||
use std::iter::FromIterator;
|
||||
use std::ops::Bound;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use crate::daemon::{Daemon, MempoolEntry};
|
||||
use crate::errors::*;
|
||||
use crate::index::index_transaction;
|
||||
use crate::metrics::{
|
||||
Gauge, GaugeVec, HistogramOpts, HistogramTimer, HistogramVec, MetricOpts, Metrics,
|
||||
};
|
||||
use crate::store::{ReadStore, Row};
|
||||
use crate::util::Bytes;
|
||||
use bitcoin::hashes::Hash;
|
||||
use bitcoin::{Amount, OutPoint, Transaction, Txid};
|
||||
use bitcoincore_rpc::json;
|
||||
use rayon::prelude::*;
|
||||
use serde::ser::{Serialize, SerializeSeq, Serializer};
|
||||
|
||||
const VSIZE_BIN_WIDTH: u32 = 100_000; // in vbytes
|
||||
use crate::{daemon::Daemon, types::ScriptHash};
|
||||
|
||||
struct MempoolStore {
|
||||
map: BTreeMap<Bytes, Vec<Bytes>>,
|
||||
pub(crate) struct Entry {
|
||||
pub txid: Txid,
|
||||
pub tx: Transaction,
|
||||
pub fee: Amount,
|
||||
pub vsize: u64,
|
||||
pub has_unconfirmed_inputs: bool,
|
||||
}
|
||||
|
||||
impl MempoolStore {
|
||||
fn new() -> MempoolStore {
|
||||
MempoolStore {
|
||||
map: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn add(&mut self, tx: &Transaction) {
|
||||
let rows = index_transaction(tx, 0);
|
||||
for row in rows {
|
||||
let (key, value) = row.into_pair();
|
||||
self.map.entry(key).or_insert_with(Vec::new).push(value);
|
||||
}
|
||||
}
|
||||
|
||||
fn remove(&mut self, tx: &Transaction) {
|
||||
let rows = index_transaction(tx, 0);
|
||||
for row in rows {
|
||||
let (key, value) = row.into_pair();
|
||||
let no_values_left = {
|
||||
let values = self
|
||||
.map
|
||||
.get_mut(&key)
|
||||
.unwrap_or_else(|| panic!("missing key {} in mempool", hex::encode(&key)));
|
||||
let last_value = values
|
||||
.pop()
|
||||
.unwrap_or_else(|| panic!("no values found for key {}", hex::encode(&key)));
|
||||
// TxInRow and TxOutRow have an empty value, TxRow has height=0 as value.
|
||||
assert_eq!(
|
||||
value,
|
||||
last_value,
|
||||
"wrong value for key {}: {}",
|
||||
hex::encode(&key),
|
||||
hex::encode(&last_value)
|
||||
);
|
||||
values.is_empty()
|
||||
};
|
||||
if no_values_left {
|
||||
self.map.remove(&key).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Mempool current state
|
||||
pub(crate) struct Mempool {
|
||||
entries: HashMap<Txid, Entry>,
|
||||
by_funding: BTreeSet<(ScriptHash, Txid)>,
|
||||
by_spending: BTreeSet<(OutPoint, Txid)>,
|
||||
histogram: Histogram,
|
||||
}
|
||||
|
||||
impl ReadStore for MempoolStore {
|
||||
fn get(&self, key: &[u8]) -> Option<Bytes> {
|
||||
Some(self.map.get(key)?.last()?.to_vec())
|
||||
}
|
||||
fn scan(&self, prefix: &[u8]) -> Vec<Row> {
|
||||
let range = self
|
||||
.map
|
||||
.range((Bound::Included(prefix.to_vec()), Bound::Unbounded));
|
||||
let mut rows = vec![];
|
||||
for (key, values) in range {
|
||||
if !key.starts_with(prefix) {
|
||||
break;
|
||||
}
|
||||
if let Some(value) = values.last() {
|
||||
rows.push(Row {
|
||||
key: key.to_vec(),
|
||||
value: value.to_vec(),
|
||||
});
|
||||
}
|
||||
}
|
||||
rows
|
||||
}
|
||||
// Smallest possible txid
|
||||
fn txid_min() -> Txid {
|
||||
Txid::from_inner([0x00; 32])
|
||||
}
|
||||
|
||||
struct Item {
|
||||
tx: Transaction, // stored for faster retrieval and index removal
|
||||
entry: MempoolEntry, // caches mempool fee rates
|
||||
// Largest possible txid
|
||||
fn txid_max() -> Txid {
|
||||
Txid::from_inner([0xFF; 32])
|
||||
}
|
||||
|
||||
struct Stats {
|
||||
count: Gauge,
|
||||
update: HistogramVec,
|
||||
vsize: GaugeVec,
|
||||
max_fee_rate: Mutex<f32>,
|
||||
}
|
||||
|
||||
impl Stats {
|
||||
fn start_timer(&self, step: &str) -> HistogramTimer {
|
||||
self.update.with_label_values(&[step]).start_timer()
|
||||
}
|
||||
|
||||
fn update(&self, entries: &[&MempoolEntry]) {
|
||||
let mut bands: Vec<(f32, u32)> = vec![];
|
||||
let mut fee_rate = 1.0f32; // [sat/vbyte]
|
||||
let mut vsize = 0u32; // vsize of transactions paying <= fee_rate
|
||||
for e in entries {
|
||||
while fee_rate < e.fee_per_vbyte() {
|
||||
bands.push((fee_rate, vsize));
|
||||
fee_rate *= 2.0;
|
||||
}
|
||||
vsize += e.vsize();
|
||||
}
|
||||
let mut max_fee_rate = self.max_fee_rate.lock().unwrap();
|
||||
loop {
|
||||
bands.push((fee_rate, vsize));
|
||||
if fee_rate < *max_fee_rate {
|
||||
fee_rate *= 2.0;
|
||||
continue;
|
||||
}
|
||||
*max_fee_rate = fee_rate;
|
||||
break;
|
||||
}
|
||||
drop(max_fee_rate);
|
||||
for (fee_rate, vsize) in bands {
|
||||
// labels should be ordered by fee_rate value
|
||||
let label = format!("≤{:10.0}", fee_rate);
|
||||
self.vsize
|
||||
.with_label_values(&[&label])
|
||||
.set(f64::from(vsize));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Tracker {
|
||||
items: HashMap<Txid, Item>,
|
||||
index: MempoolStore,
|
||||
histogram: Vec<(f32, u32)>,
|
||||
stats: Stats,
|
||||
}
|
||||
|
||||
impl Tracker {
|
||||
pub fn new(metrics: &Metrics) -> Tracker {
|
||||
Tracker {
|
||||
items: HashMap::new(),
|
||||
index: MempoolStore::new(),
|
||||
histogram: vec![],
|
||||
stats: Stats {
|
||||
count: metrics.gauge(MetricOpts::new(
|
||||
"electrs_mempool_count",
|
||||
"# of mempool transactions",
|
||||
)),
|
||||
update: metrics.histogram_vec(
|
||||
HistogramOpts::new(
|
||||
"electrs_mempool_update",
|
||||
"Time to update mempool (in seconds)",
|
||||
),
|
||||
&["step"],
|
||||
),
|
||||
vsize: metrics.gauge_vec(
|
||||
MetricOpts::new(
|
||||
"electrs_mempool_vsize",
|
||||
"Total vsize of transactions paying at most given fee rate",
|
||||
),
|
||||
&["fee_rate"],
|
||||
),
|
||||
max_fee_rate: Mutex::new(1.0),
|
||||
},
|
||||
impl Mempool {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
entries: Default::default(),
|
||||
by_funding: Default::default(),
|
||||
by_spending: Default::default(),
|
||||
histogram: Histogram::empty(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_txn(&self, txid: &Txid) -> bool {
|
||||
self.items.contains_key(txid)
|
||||
}
|
||||
|
||||
pub fn get_fee(&self, txid: &Txid) -> Option<u64> {
|
||||
self.items.get(txid).map(|stats| stats.entry.fee())
|
||||
}
|
||||
|
||||
/// Returns vector of (fee_rate, vsize) pairs, where fee_{n-1} > fee_n and vsize_n is the
|
||||
/// total virtual size of mempool transactions with fee in the bin [fee_{n-1}, fee_n].
|
||||
/// Note: fee_{-1} is implied to be infinite.
|
||||
pub fn fee_histogram(&self) -> &Vec<(f32, u32)> {
|
||||
pub(crate) fn fees_histogram(&self) -> &Histogram {
|
||||
&self.histogram
|
||||
}
|
||||
|
||||
pub fn index(&self) -> &dyn ReadStore {
|
||||
&self.index
|
||||
pub(crate) fn get(&self, txid: &Txid) -> Option<&Entry> {
|
||||
self.entries.get(txid)
|
||||
}
|
||||
|
||||
pub fn update(&mut self, daemon: &Daemon) -> Result<()> {
|
||||
let timer = self.stats.start_timer("fetch");
|
||||
let new_txids = daemon
|
||||
.getmempooltxids()
|
||||
.chain_err(|| "failed to update mempool from daemon")?;
|
||||
let old_txids = HashSet::from_iter(self.items.keys().cloned());
|
||||
timer.observe_duration();
|
||||
pub(crate) fn filter_by_funding(&self, scripthash: &ScriptHash) -> Vec<&Entry> {
|
||||
let range = (
|
||||
Bound::Included((*scripthash, txid_min())),
|
||||
Bound::Included((*scripthash, txid_max())),
|
||||
);
|
||||
self.by_funding
|
||||
.range(range)
|
||||
.map(|(_, txid)| self.get(txid).expect("missing funding mempool tx"))
|
||||
.collect()
|
||||
}
|
||||
|
||||
let timer = self.stats.start_timer("add");
|
||||
let txids_iter = new_txids.difference(&old_txids);
|
||||
let entries = txids_iter.filter_map(|txid| {
|
||||
match daemon.getmempoolentry(txid) {
|
||||
Ok(entry) => Some((txid, entry)),
|
||||
Err(err) => {
|
||||
debug!("no mempool entry {}: {}", txid, err); // e.g. new block or RBF
|
||||
None // ignore this transaction for now
|
||||
}
|
||||
}
|
||||
});
|
||||
for (txid, entry) in entries {
|
||||
match daemon.gettransaction(txid, None) {
|
||||
Ok(tx) => {
|
||||
assert_eq!(tx.txid(), *txid);
|
||||
self.add(txid, tx, entry);
|
||||
}
|
||||
Err(err) => {
|
||||
debug!("failed to get transaction {}: {}", txid, err); // e.g. new block or RBF
|
||||
}
|
||||
pub(crate) fn filter_by_spending(&self, outpoint: &OutPoint) -> Vec<&Entry> {
|
||||
let range = (
|
||||
Bound::Included((*outpoint, txid_min())),
|
||||
Bound::Included((*outpoint, txid_max())),
|
||||
);
|
||||
self.by_spending
|
||||
.range(range)
|
||||
.map(|(_, txid)| self.get(txid).expect("missing spending mempool tx"))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn sync(&mut self, daemon: &Daemon) {
|
||||
let txids = match daemon.get_mempool_txids() {
|
||||
Ok(txids) => txids,
|
||||
Err(e) => {
|
||||
warn!("mempool sync failed: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
debug!("loading {} mempool transactions", txids.len());
|
||||
|
||||
let new_txids = HashSet::<Txid>::from_iter(txids);
|
||||
let old_txids = HashSet::<Txid>::from_iter(self.entries.keys().copied());
|
||||
|
||||
let to_add = &new_txids - &old_txids;
|
||||
let to_remove = &old_txids - &new_txids;
|
||||
|
||||
let removed = to_remove.len();
|
||||
for txid in to_remove {
|
||||
self.remove_entry(txid);
|
||||
}
|
||||
timer.observe_duration();
|
||||
|
||||
let timer = self.stats.start_timer("remove");
|
||||
for txid in old_txids.difference(&new_txids) {
|
||||
self.remove(txid);
|
||||
let entries: Vec<_> = to_add
|
||||
.par_iter()
|
||||
.filter_map(|txid| {
|
||||
match (
|
||||
daemon.get_transaction(txid, None),
|
||||
daemon.get_mempool_entry(txid),
|
||||
) {
|
||||
(Ok(tx), Ok(entry)) => Some((txid, tx, entry)),
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let added = entries.len();
|
||||
for (txid, tx, entry) in entries {
|
||||
self.add_entry(*txid, tx, entry);
|
||||
}
|
||||
timer.observe_duration();
|
||||
|
||||
let timer = self.stats.start_timer("fees");
|
||||
self.update_fee_histogram();
|
||||
timer.observe_duration();
|
||||
|
||||
self.stats.count.set(self.items.len() as i64);
|
||||
Ok(())
|
||||
self.histogram = Histogram::new(self.entries.values().map(|e| (e.fee, e.vsize)));
|
||||
debug!(
|
||||
"{} mempool txs: {} added, {} removed",
|
||||
self.entries.len(),
|
||||
added,
|
||||
removed,
|
||||
);
|
||||
}
|
||||
|
||||
fn add(&mut self, txid: &Txid, tx: Transaction, entry: MempoolEntry) {
|
||||
self.index.add(&tx);
|
||||
self.items.insert(*txid, Item { tx, entry });
|
||||
fn add_entry(&mut self, txid: Txid, tx: Transaction, entry: json::GetMempoolEntryResult) {
|
||||
for txi in &tx.input {
|
||||
self.by_spending.insert((txi.previous_output, txid));
|
||||
}
|
||||
for txo in &tx.output {
|
||||
let scripthash = ScriptHash::new(&txo.script_pubkey);
|
||||
self.by_funding.insert((scripthash, txid)); // may have duplicates
|
||||
}
|
||||
let entry = Entry {
|
||||
txid,
|
||||
tx,
|
||||
vsize: entry.vsize,
|
||||
fee: entry.fees.base,
|
||||
has_unconfirmed_inputs: !entry.depends.is_empty(),
|
||||
};
|
||||
assert!(
|
||||
self.entries.insert(txid, entry).is_none(),
|
||||
"duplicate mempool txid"
|
||||
);
|
||||
}
|
||||
|
||||
fn remove(&mut self, txid: &Txid) {
|
||||
let stats = self
|
||||
.items
|
||||
.remove(txid)
|
||||
.unwrap_or_else(|| panic!("missing mempool tx {}", txid));
|
||||
self.index.remove(&stats.tx);
|
||||
}
|
||||
|
||||
fn update_fee_histogram(&mut self) {
|
||||
let mut entries: Vec<&MempoolEntry> = self.items.values().map(|stat| &stat.entry).collect();
|
||||
entries.sort_unstable_by(|e1, e2| {
|
||||
e1.fee_per_vbyte().partial_cmp(&e2.fee_per_vbyte()).unwrap()
|
||||
});
|
||||
self.histogram = electrum_fees(&entries);
|
||||
self.stats.update(&entries);
|
||||
fn remove_entry(&mut self, txid: Txid) {
|
||||
let entry = self.entries.remove(&txid).expect("missing tx from mempool");
|
||||
for txi in entry.tx.input {
|
||||
self.by_spending.remove(&(txi.previous_output, txid));
|
||||
}
|
||||
for txo in entry.tx.output {
|
||||
let scripthash = ScriptHash::new(&txo.script_pubkey);
|
||||
self.by_funding.remove(&(scripthash, txid)); // may have misses
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn electrum_fees(entries: &[&MempoolEntry]) -> Vec<(f32, u32)> {
|
||||
let mut histogram = vec![];
|
||||
let mut bin_size = 0;
|
||||
let mut last_fee_rate = 0.0;
|
||||
for e in entries.iter().rev() {
|
||||
let fee_rate = e.fee_per_vbyte();
|
||||
if bin_size > VSIZE_BIN_WIDTH && (last_fee_rate - fee_rate).abs() >= std::f32::EPSILON {
|
||||
// vsize of transactions paying >= e.fee_per_vbyte()
|
||||
histogram.push((last_fee_rate, bin_size));
|
||||
bin_size = 0;
|
||||
pub(crate) struct Histogram {
|
||||
/// bins[64-i] contains the total vsize of transactions with fee rate [2**(i-1), 2**i).
|
||||
/// bins[63] = [1, 2)
|
||||
/// bins[62] = [2, 4)
|
||||
/// bins[61] = [4, 8)
|
||||
/// bins[60] = [8, 16)
|
||||
/// ...
|
||||
/// bins[1] = [2**62, 2**63)
|
||||
/// bins[0] = [2**63, 2**64)
|
||||
bins: [u64; Histogram::SIZE],
|
||||
}
|
||||
|
||||
impl Histogram {
|
||||
const SIZE: usize = 64;
|
||||
|
||||
fn empty() -> Self {
|
||||
Self::new(std::iter::empty())
|
||||
}
|
||||
|
||||
fn new(items: impl Iterator<Item = (Amount, u64)>) -> Self {
|
||||
let mut bins = [0; Self::SIZE];
|
||||
for (fee, vsize) in items {
|
||||
let fee_rate = fee.as_sat() / vsize;
|
||||
let index = usize::try_from(fee_rate.leading_zeros()).unwrap();
|
||||
// skip transactions with too low fee rate (<1 sat/vB)
|
||||
if let Some(bin) = bins.get_mut(index) {
|
||||
*bin += vsize
|
||||
}
|
||||
}
|
||||
last_fee_rate = fee_rate;
|
||||
bin_size += e.vsize();
|
||||
Self { bins }
|
||||
}
|
||||
if bin_size > 0 {
|
||||
histogram.push((last_fee_rate, bin_size));
|
||||
}
|
||||
|
||||
impl Serialize for Histogram {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut seq = serializer.serialize_seq(Some(self.bins.len()))?;
|
||||
// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#mempool-get-fee-histogram
|
||||
let fee_rates = (0..Histogram::SIZE).map(|i| std::u64::MAX >> i);
|
||||
fee_rates
|
||||
.zip(self.bins.iter().copied())
|
||||
.skip_while(|(_fee_rate, vsize)| *vsize == 0)
|
||||
.try_for_each(|element| seq.serialize_element(&element))?;
|
||||
seq.end()
|
||||
}
|
||||
histogram
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn test_fakestore() {
|
||||
use crate::daemon::MempoolEntry;
|
||||
use crate::mempool::electrum_fees;
|
||||
use super::Histogram;
|
||||
use bitcoin::Amount;
|
||||
use serde_json::json;
|
||||
|
||||
let entries = [
|
||||
// (fee: u64, vsize: u32)
|
||||
&MempoolEntry::new(1_000, 1_000),
|
||||
&MempoolEntry::new(10_000, 10_000),
|
||||
&MempoolEntry::new(50_000, 50_000),
|
||||
&MempoolEntry::new(120_000, 60_000),
|
||||
&MempoolEntry::new(210_000, 70_000),
|
||||
&MempoolEntry::new(320_000, 80_000),
|
||||
#[test]
|
||||
fn test_histogram() {
|
||||
let items = vec![
|
||||
(Amount::from_sat(20), 10),
|
||||
(Amount::from_sat(10), 10),
|
||||
(Amount::from_sat(60), 10),
|
||||
(Amount::from_sat(30), 10),
|
||||
(Amount::from_sat(70), 10),
|
||||
(Amount::from_sat(50), 10),
|
||||
(Amount::from_sat(40), 10),
|
||||
(Amount::from_sat(80), 10),
|
||||
];
|
||||
assert_eq!(
|
||||
electrum_fees(&entries[..]),
|
||||
vec![(3.0, 150_000), (1.0, 121_000)]
|
||||
);
|
||||
let hist = json!(Histogram::new(items.into_iter()));
|
||||
assert_eq!(hist, json!([[15, 10], [7, 40], [3, 20], [1, 10]]));
|
||||
}
|
||||
}
|
||||
|
113
src/merkle.rs
Normal file
113
src/merkle.rs
Normal file
@ -0,0 +1,113 @@
|
||||
use bitcoin::{
|
||||
hashes::{hex::ToHex, Hash},
|
||||
TxMerkleNode, Txid,
|
||||
};
|
||||
|
||||
pub(crate) struct Proof {
|
||||
proof: Vec<TxMerkleNode>,
|
||||
position: usize,
|
||||
}
|
||||
|
||||
impl Proof {
|
||||
pub(crate) fn create(txids: &[Txid], position: usize) -> Self {
|
||||
assert!(position < txids.len());
|
||||
let mut offset = position;
|
||||
let mut hashes: Vec<TxMerkleNode> = txids
|
||||
.iter()
|
||||
.map(|txid| TxMerkleNode::from_hash(txid.as_hash()))
|
||||
.collect();
|
||||
|
||||
let mut proof = vec![];
|
||||
while hashes.len() > 1 {
|
||||
if hashes.len() % 2 != 0 {
|
||||
let last = *hashes.last().unwrap();
|
||||
hashes.push(last);
|
||||
}
|
||||
offset = if offset % 2 == 0 {
|
||||
offset + 1
|
||||
} else {
|
||||
offset - 1
|
||||
};
|
||||
proof.push(hashes[offset]);
|
||||
offset /= 2;
|
||||
hashes = hashes
|
||||
.chunks(2)
|
||||
.map(|pair| {
|
||||
let left = pair[0];
|
||||
let right = pair[1];
|
||||
let input = [&left[..], &right[..]].concat();
|
||||
TxMerkleNode::hash(&input)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
Self { proof, position }
|
||||
}
|
||||
|
||||
pub(crate) fn to_hex(&self) -> Vec<String> {
|
||||
self.proof.iter().map(|node| node.to_hex()).collect()
|
||||
}
|
||||
|
||||
pub(crate) fn position(&self) -> usize {
|
||||
self.position
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bitcoin::{consensus::encode::deserialize, Block, Txid};
|
||||
use std::path::Path;
|
||||
|
||||
use super::Proof;
|
||||
|
||||
#[test]
|
||||
fn test_merkle() {
|
||||
let proof = Proof::create(
|
||||
&load_block_txids("00000000000000001203c1ea455e38612bdf36e9967fdead11935c8e22283ecc"),
|
||||
157,
|
||||
);
|
||||
assert_eq!(
|
||||
proof.to_hex(),
|
||||
vec![
|
||||
"5d8cfb001d9ec17861ad9c158244239cb6e3298a619b2a5f7b176ddd54459c75",
|
||||
"06811172e13312f2e496259d2c8a7262f1192be5223fcf4d6a9ed7f58a2175ba",
|
||||
"cbcec841dea3294706809d1510c72b4424d141fac89106af65b70399b1d79f3f",
|
||||
"a24d6c3601a54d40f4350e6c8887bf82a873fe8619f95c772b573ec0373119d3",
|
||||
"2015c1bb133ee2c972e55fdcd205a9aee7b0122fd74c2f5d5d27b24a562c7790",
|
||||
"f379496fef2e603c4e1c03e2179ebaf5153d6463b8d61aa16d41db3321a18165",
|
||||
"7a798d6529663fd472d26cc90c434b64f78955747ac2f93c8dcd35b8f684946e",
|
||||
"ad3811062b8db664f2342cbff1b491865310b74416dd7b901f14d980886821f8"
|
||||
]
|
||||
);
|
||||
|
||||
let proof = Proof::create(
|
||||
&load_block_txids("000000000000000002d249a3d89f63ef3fee203adcca7c24008c13fd854513f2"),
|
||||
6,
|
||||
);
|
||||
assert_eq!(
|
||||
proof.to_hex(),
|
||||
vec![
|
||||
"d29769df672657689fd6d293b416ee9211c77fbe243ab7820813f327b0e8dd47",
|
||||
"d71f0947b47cab0f64948acfe52d41c293f492fe9627690c330d4004f2852ce4",
|
||||
"5f36c4330c727d7c8d98cc906cb286f13a61b5b4cab2124c5d041897834b42d8",
|
||||
"e77d181f83355ed38d0e6305fdb87c9637373fd90d1dfb911262ac55d260181e",
|
||||
"a8f83ca44dc486d9d45c4cff9567839c254bda96e6960d310a5e471c70c6a95b",
|
||||
"e9a5ff7f74cb060b451ed2cd27de038efff4df911f4e0f99e2661b46ebcc7e1c",
|
||||
"6b0144095e3f0e0d0551cbaa6c5dfc89387024f836528281b6d290e356e196cf",
|
||||
"bb0761b0636ffd387e0ce322289a3579e926b6813e090130a88228bd80cff982",
|
||||
"ac327124304cccf6739da308a25bb365a6b63e9344bad2be139b0b02c042567c",
|
||||
"42e11f2d67050cd31295f85507ebc7706fc4c1fddf1e5a45b98ae3f7c63d2592",
|
||||
"52657042fcfc88067524bf6c5f9a66414c7de4f4fcabcb65bca56fa84cf309b4"
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
fn load_block_txids(block_hash_hex: &str) -> Vec<Txid> {
|
||||
let path = Path::new("src")
|
||||
.join("tests")
|
||||
.join("blocks")
|
||||
.join(block_hash_hex);
|
||||
let data = std::fs::read(path).unwrap();
|
||||
let block: Block = deserialize(&data).unwrap();
|
||||
block.txdata.iter().map(|tx| tx.txid()).collect()
|
||||
}
|
||||
}
|
252
src/metrics.rs
252
src/metrics.rs
@ -1,162 +1,112 @@
|
||||
use prometheus::{self, Encoder, IntGauge};
|
||||
use std::fs;
|
||||
use std::io;
|
||||
use std::net::SocketAddr;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
#[cfg(feature = "metrics")]
|
||||
mod metrics_impl {
|
||||
use anyhow::{Context, Result};
|
||||
use prometheus::process_collector::ProcessCollector;
|
||||
use prometheus::{self, Encoder, HistogramOpts, HistogramVec, Registry};
|
||||
use tiny_http::{Response, Server};
|
||||
|
||||
pub use prometheus::{
|
||||
GaugeVec, Histogram, HistogramOpts, HistogramTimer, HistogramVec, IntCounter as Counter,
|
||||
IntCounterVec as CounterVec, IntGauge as Gauge, Opts as MetricOpts,
|
||||
};
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use crate::errors::*;
|
||||
use crate::util::spawn_thread;
|
||||
use crate::thread::spawn;
|
||||
|
||||
pub struct Metrics {
|
||||
reg: prometheus::Registry,
|
||||
addr: SocketAddr,
|
||||
}
|
||||
pub struct Metrics {
|
||||
reg: Registry,
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
pub fn new(addr: SocketAddr) -> Metrics {
|
||||
Metrics {
|
||||
reg: prometheus::Registry::new(),
|
||||
addr,
|
||||
impl Metrics {
|
||||
pub fn new(addr: SocketAddr) -> Result<Self> {
|
||||
let reg = Registry::new();
|
||||
|
||||
reg.register(Box::new(ProcessCollector::for_self()))
|
||||
.expect("failed to register ProcessCollector");
|
||||
|
||||
let result = Self { reg };
|
||||
let reg = result.reg.clone();
|
||||
spawn("metrics", move || {
|
||||
let server = Server::http(addr).unwrap();
|
||||
for request in server.incoming_requests() {
|
||||
let mut buffer = vec![];
|
||||
prometheus::TextEncoder::new()
|
||||
.encode(®.gather(), &mut buffer)
|
||||
.context("failed to encode metrics")?;
|
||||
request
|
||||
.respond(Response::from_data(buffer))
|
||||
.context("failed to send HTTP response")?;
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
|
||||
info!("serving Prometheus metrics on {}", addr);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn histogram_vec(&self, name: &str, desc: &str, label: &str) -> Histogram {
|
||||
let opts = HistogramOpts::new(name, desc);
|
||||
let hist = HistogramVec::new(opts, &[label]).unwrap();
|
||||
self.reg
|
||||
.register(Box::new(hist.clone()))
|
||||
.expect("failed to register Histogram");
|
||||
Histogram { hist }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn counter(&self, opts: prometheus::Opts) -> Counter {
|
||||
let c = Counter::with_opts(opts).unwrap();
|
||||
self.reg.register(Box::new(c.clone())).unwrap();
|
||||
c
|
||||
#[derive(Clone)]
|
||||
pub struct Histogram {
|
||||
hist: HistogramVec,
|
||||
}
|
||||
|
||||
pub fn counter_vec(&self, opts: prometheus::Opts, labels: &[&str]) -> CounterVec {
|
||||
let c = CounterVec::new(opts, labels).unwrap();
|
||||
self.reg.register(Box::new(c.clone())).unwrap();
|
||||
c
|
||||
}
|
||||
|
||||
pub fn gauge(&self, opts: prometheus::Opts) -> Gauge {
|
||||
let g = Gauge::with_opts(opts).unwrap();
|
||||
self.reg.register(Box::new(g.clone())).unwrap();
|
||||
g
|
||||
}
|
||||
|
||||
pub fn gauge_vec(&self, opts: prometheus::Opts, labels: &[&str]) -> GaugeVec {
|
||||
let g = GaugeVec::new(opts, labels).unwrap();
|
||||
self.reg.register(Box::new(g.clone())).unwrap();
|
||||
g
|
||||
}
|
||||
|
||||
pub fn gauge_int(&self, opts: prometheus::Opts) -> IntGauge {
|
||||
let g = Gauge::with_opts(opts).unwrap();
|
||||
self.reg.register(Box::new(g.clone())).unwrap();
|
||||
g
|
||||
}
|
||||
|
||||
pub fn histogram(&self, opts: prometheus::HistogramOpts) -> Histogram {
|
||||
let h = Histogram::with_opts(opts).unwrap();
|
||||
self.reg.register(Box::new(h.clone())).unwrap();
|
||||
h
|
||||
}
|
||||
|
||||
pub fn histogram_vec(&self, opts: prometheus::HistogramOpts, labels: &[&str]) -> HistogramVec {
|
||||
let h = HistogramVec::new(opts, labels).unwrap();
|
||||
self.reg.register(Box::new(h.clone())).unwrap();
|
||||
h
|
||||
}
|
||||
|
||||
pub fn start(&self) {
|
||||
let server = tiny_http::Server::http(self.addr).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"failed to start monitoring HTTP server at {}: {}",
|
||||
self.addr, e
|
||||
)
|
||||
});
|
||||
start_process_exporter(self);
|
||||
let reg = self.reg.clone();
|
||||
spawn_thread("metrics", move || loop {
|
||||
if let Err(e) = handle_request(®, server.recv()) {
|
||||
error!("http error: {}", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_request(
|
||||
reg: &prometheus::Registry,
|
||||
request: io::Result<tiny_http::Request>,
|
||||
) -> io::Result<()> {
|
||||
let request = request?;
|
||||
let mut buffer = vec![];
|
||||
prometheus::TextEncoder::new()
|
||||
.encode(®.gather(), &mut buffer)
|
||||
.unwrap();
|
||||
let response = tiny_http::Response::from_data(buffer);
|
||||
request.respond(response)
|
||||
}
|
||||
|
||||
struct Stats {
|
||||
utime: f64,
|
||||
rss: u64,
|
||||
fds: usize,
|
||||
}
|
||||
|
||||
fn parse_stats() -> Result<Stats> {
|
||||
let value =
|
||||
fs::read_to_string("/proc/self/stat").chain_err(|| "failed to read /proc/self/stat")?;
|
||||
let parts: Vec<&str> = value.split_whitespace().collect();
|
||||
let page_size = page_size::get() as u64;
|
||||
let ticks_per_second = sysconf::raw::sysconf(sysconf::raw::SysconfVariable::ScClkTck)
|
||||
.expect("failed to get _SC_CLK_TCK") as f64;
|
||||
|
||||
let parse_part = |index: usize, name: &str| -> Result<u64> {
|
||||
parts
|
||||
.get(index)
|
||||
.chain_err(|| format!("missing {}: {:?}", name, parts))?
|
||||
.parse::<u64>()
|
||||
.chain_err(|| format!("invalid {}: {:?}", name, parts))
|
||||
};
|
||||
|
||||
// For details, see '/proc/[pid]/stat' section at `man 5 proc`:
|
||||
let utime = parse_part(13, "utime")? as f64 / ticks_per_second;
|
||||
let rss = parse_part(23, "rss")? * page_size;
|
||||
let fds = fs::read_dir("/proc/self/fd")
|
||||
.chain_err(|| "failed to read /proc/self/fd directory")?
|
||||
.count();
|
||||
Ok(Stats { utime, rss, fds })
|
||||
}
|
||||
|
||||
fn start_process_exporter(metrics: &Metrics) {
|
||||
let rss = metrics.gauge(MetricOpts::new(
|
||||
"electrs_process_memory_rss",
|
||||
"Resident memory size [bytes]",
|
||||
));
|
||||
let cpu = metrics.gauge_vec(
|
||||
MetricOpts::new(
|
||||
"electrs_process_cpu_usage",
|
||||
"CPU usage by this process [seconds]",
|
||||
),
|
||||
&["type"],
|
||||
);
|
||||
let fds = metrics.gauge(MetricOpts::new(
|
||||
"electrs_process_open_fds",
|
||||
"# of file descriptors",
|
||||
));
|
||||
spawn_thread("exporter", move || loop {
|
||||
match parse_stats() {
|
||||
Ok(stats) => {
|
||||
cpu.with_label_values(&["utime"]).set(stats.utime as f64);
|
||||
rss.set(stats.rss as i64);
|
||||
fds.set(stats.fds as i64);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed to export process stats: {}", e);
|
||||
return;
|
||||
}
|
||||
impl Histogram {
|
||||
pub fn observe(&self, label: &str, value: usize) {
|
||||
self.hist.with_label_values(&[label]).observe(value as f64);
|
||||
}
|
||||
thread::sleep(Duration::from_secs(5));
|
||||
});
|
||||
|
||||
pub fn observe_duration<F, T>(&self, label: &str, func: F) -> T
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
self.hist
|
||||
.with_label_values(&[label])
|
||||
.observe_closure_duration(func)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
pub use metrics_impl::{Histogram, Metrics};
|
||||
|
||||
#[cfg(not(feature = "metrics"))]
|
||||
mod metrics_fake {
|
||||
use anyhow::Result;
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
||||
pub struct Metrics {}
|
||||
|
||||
impl Metrics {
|
||||
pub fn new(_addr: SocketAddr) -> Result<Self> {
|
||||
debug!("metrics collection is disabled");
|
||||
Ok(Self {})
|
||||
}
|
||||
|
||||
pub fn histogram_vec(&self, _name: &str, _desc: &str, _label: &str) -> Histogram {
|
||||
Histogram {}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Histogram {}
|
||||
|
||||
impl Histogram {
|
||||
pub fn observe(&self, _label: &str, _value: usize) {}
|
||||
|
||||
pub fn observe_duration<F, T>(&self, _label: &str, func: F) -> T
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
func()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "metrics"))]
|
||||
pub use metrics_fake::{Histogram, Metrics};
|
||||
|
@ -1,70 +0,0 @@
|
||||
// TODO: network::socket::Socket needs to be reimplemented.
|
||||
|
||||
use bitcoin::network::constants::Network;
|
||||
use bitcoin::network::message::NetworkMessage;
|
||||
use bitcoin::network::message_blockdata::InvType;
|
||||
use bitcoin::network::socket::Socket;
|
||||
use bitcoin::hash_types::Txid;
|
||||
use bitcoin::util::Error;
|
||||
|
||||
use std::sync::mpsc::Sender;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::util;
|
||||
|
||||
fn connect() -> Result<Socket, Error> {
|
||||
let mut sock = Socket::new(Network::Bitcoin);
|
||||
sock.connect("127.0.0.1", 8333)?;
|
||||
Ok(sock)
|
||||
}
|
||||
|
||||
fn handle(mut sock: Socket, tx: Sender<Txid>) {
|
||||
let mut outgoing = vec![sock.version_message(0).unwrap()];
|
||||
loop {
|
||||
for msg in outgoing.split_off(0) {
|
||||
trace!("send {:?}", msg);
|
||||
if let Err(e) = sock.send_message(msg.clone()) {
|
||||
warn!("failed to connect to node: {}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Receive new message
|
||||
let msg = match sock.receive_message() {
|
||||
Ok(msg) => msg,
|
||||
Err(e) => {
|
||||
warn!("failed to receive p2p message: {}", e);
|
||||
break;
|
||||
}
|
||||
};
|
||||
trace!("recv {:?}", msg);
|
||||
match msg {
|
||||
NetworkMessage::Alert(_) => continue, // deprecated
|
||||
NetworkMessage::Version(_) => outgoing.push(NetworkMessage::Verack),
|
||||
NetworkMessage::Ping(nonce) => outgoing.push(NetworkMessage::Pong(nonce)),
|
||||
NetworkMessage::Inv(ref inventory) => {
|
||||
inventory
|
||||
.iter()
|
||||
.filter(|inv| inv.inv_type == InvType::Block)
|
||||
.for_each(|inv| tx.send(inv.hash).expect("failed to send message"));
|
||||
}
|
||||
_ => (),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run() -> util::Channel<Txid> {
|
||||
let chan = util::Channel::new();
|
||||
let tx = chan.sender();
|
||||
|
||||
util::spawn_thread("p2p", move || loop {
|
||||
// TODO: support testnet and regtest as well.
|
||||
match connect() {
|
||||
Ok(sock) => handle(sock, tx.clone()),
|
||||
Err(e) => warn!("p2p error: {}", e),
|
||||
}
|
||||
thread::sleep(Duration::from_secs(3));
|
||||
});
|
||||
|
||||
chan
|
||||
}
|
175
src/p2p.rs
Normal file
175
src/p2p.rs
Normal file
@ -0,0 +1,175 @@
|
||||
use std::io::Write;
|
||||
use std::iter::FromIterator;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::chain::{Chain, NewHeader};
|
||||
use anyhow::{Context, Result};
|
||||
use bitcoin::{
|
||||
consensus::encode,
|
||||
network::{
|
||||
address, constants,
|
||||
message::{self, NetworkMessage},
|
||||
message_blockdata::{GetHeadersMessage, Inventory},
|
||||
message_network,
|
||||
stream_reader::StreamReader,
|
||||
},
|
||||
secp256k1::{self, rand::Rng},
|
||||
Block, BlockHash, Network,
|
||||
};
|
||||
|
||||
pub(crate) struct Connection {
|
||||
stream: TcpStream,
|
||||
reader: StreamReader<TcpStream>,
|
||||
network: Network,
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
/// Connect to a Bitcoin node via p2p protocol.
|
||||
/// See https://en.bitcoin.it/wiki/Protocol_documentation for details.
|
||||
pub fn connect(network: Network, address: SocketAddr) -> Result<Self> {
|
||||
let stream = TcpStream::connect(address)
|
||||
.with_context(|| format!("{} p2p failed to connect: {:?}", network, address))?;
|
||||
let reader = StreamReader::new(
|
||||
stream.try_clone().context("stream failed to clone")?,
|
||||
/*buffer_size*/ Some(1 << 20),
|
||||
);
|
||||
let mut conn = Self {
|
||||
stream,
|
||||
reader,
|
||||
network,
|
||||
};
|
||||
conn.send(build_version_message())?;
|
||||
if let NetworkMessage::GetHeaders(_) = conn.recv().context("failed to get headers")? {
|
||||
conn.send(NetworkMessage::Headers(vec![]))?;
|
||||
}
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
fn send(&mut self, msg: NetworkMessage) -> Result<()> {
|
||||
trace!("send: {:?}", msg);
|
||||
let raw_msg = message::RawNetworkMessage {
|
||||
magic: self.network.magic(),
|
||||
payload: msg,
|
||||
};
|
||||
self.stream
|
||||
.write_all(encode::serialize(&raw_msg).as_slice())
|
||||
.context("p2p failed to send")
|
||||
}
|
||||
|
||||
fn recv(&mut self) -> Result<NetworkMessage> {
|
||||
loop {
|
||||
let raw_msg: message::RawNetworkMessage =
|
||||
self.reader.read_next().context("p2p failed to recv")?;
|
||||
|
||||
trace!("recv: {:?}", raw_msg.payload);
|
||||
match raw_msg.payload {
|
||||
NetworkMessage::Version(version) => {
|
||||
debug!("peer version: {:?}", version);
|
||||
self.send(NetworkMessage::Verack)?;
|
||||
}
|
||||
NetworkMessage::Ping(nonce) => {
|
||||
self.send(NetworkMessage::Pong(nonce))?;
|
||||
}
|
||||
NetworkMessage::Verack
|
||||
| NetworkMessage::Alert(_)
|
||||
| NetworkMessage::Addr(_)
|
||||
| NetworkMessage::Inv(_) => {}
|
||||
payload => return Ok(payload),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Request and process the specified blocks.
|
||||
/// See https://en.bitcoin.it/wiki/Protocol_documentation#getblocks for details.
|
||||
pub(crate) fn for_blocks<B, F>(&mut self, blockhashes: B, mut func: F) -> Result<()>
|
||||
where
|
||||
B: IntoIterator<Item = BlockHash>,
|
||||
F: FnMut(BlockHash, Block) + Send,
|
||||
{
|
||||
let blockhashes = Vec::from_iter(blockhashes);
|
||||
if blockhashes.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let inv = blockhashes
|
||||
.iter()
|
||||
.map(|h| Inventory::WitnessBlock(*h))
|
||||
.collect();
|
||||
debug!("loading {} blocks", blockhashes.len());
|
||||
self.send(NetworkMessage::GetData(inv))?;
|
||||
|
||||
// receive, parse and process the blocks concurrently
|
||||
rayon::scope(|s| {
|
||||
let (tx, rx) = crossbeam_channel::bounded(10);
|
||||
s.spawn(|_| {
|
||||
// the loop will exit when the sender is dropped
|
||||
for (hash, block) in rx {
|
||||
func(hash, block);
|
||||
}
|
||||
});
|
||||
|
||||
for hash in blockhashes {
|
||||
match self
|
||||
.recv()
|
||||
.with_context(|| format!("failed to get block {}", hash))?
|
||||
{
|
||||
NetworkMessage::Block(block) => {
|
||||
ensure!(block.block_hash() == hash, "got unexpected block");
|
||||
tx.send((hash, block))
|
||||
.context("disconnected from block processor")?;
|
||||
}
|
||||
msg => bail!("unexpected {:?}", msg),
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// Get new block headers (supporting reorgs).
|
||||
/// https://en.bitcoin.it/wiki/Protocol_documentation#getheaders
|
||||
pub(crate) fn get_new_headers(&mut self, chain: &Chain) -> Result<Vec<NewHeader>> {
|
||||
let msg = GetHeadersMessage::new(chain.locator(), BlockHash::default());
|
||||
self.send(NetworkMessage::GetHeaders(msg))?;
|
||||
let headers = match self.recv().context("failed to get new headers")? {
|
||||
NetworkMessage::Headers(headers) => headers,
|
||||
msg => bail!("unexpected {:?}", msg),
|
||||
};
|
||||
|
||||
debug!("got {} new headers", headers.len());
|
||||
let prev_blockhash = match headers.first().map(|h| h.prev_blockhash) {
|
||||
None => return Ok(vec![]),
|
||||
Some(prev_blockhash) => prev_blockhash,
|
||||
};
|
||||
let new_heights = match chain.get_block_height(&prev_blockhash) {
|
||||
Some(last_height) => (last_height + 1)..,
|
||||
None => bail!("missing prev_blockhash: {}", prev_blockhash),
|
||||
};
|
||||
Ok(headers
|
||||
.into_iter()
|
||||
.zip(new_heights)
|
||||
.map(NewHeader::from)
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
fn build_version_message() -> NetworkMessage {
|
||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time error")
|
||||
.as_secs() as i64;
|
||||
|
||||
let services = constants::ServiceFlags::NETWORK | constants::ServiceFlags::WITNESS;
|
||||
|
||||
NetworkMessage::Version(message_network::VersionMessage {
|
||||
version: constants::PROTOCOL_VERSION,
|
||||
services,
|
||||
timestamp,
|
||||
receiver: address::Address::new(&addr, services),
|
||||
sender: address::Address::new(&addr, services),
|
||||
nonce: secp256k1::rand::thread_rng().gen(),
|
||||
user_agent: String::from("electrs"),
|
||||
start_height: 0,
|
||||
relay: false,
|
||||
})
|
||||
}
|
577
src/query.rs
577
src/query.rs
@ -1,577 +0,0 @@
|
||||
use bitcoin::blockdata::transaction::Transaction;
|
||||
use bitcoin::consensus::encode::deserialize;
|
||||
use bitcoin::hash_types::{BlockHash, TxMerkleNode, Txid};
|
||||
use bitcoin::hashes::hex::ToHex;
|
||||
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
|
||||
use bitcoin::hashes::Hash;
|
||||
use serde_json::Value;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use crate::app::App;
|
||||
use crate::cache::TransactionCache;
|
||||
use crate::errors::*;
|
||||
use crate::index::{compute_script_hash, TxInRow, TxOutRow, TxRow};
|
||||
use crate::mempool::Tracker;
|
||||
use crate::metrics::{HistogramOpts, HistogramVec, Metrics};
|
||||
use crate::store::{ReadStore, Row};
|
||||
use crate::util::{FullHash, HashPrefix, HeaderEntry};
|
||||
|
||||
pub struct FundingOutput {
|
||||
pub txn_id: Txid,
|
||||
pub height: u32,
|
||||
pub output_index: usize,
|
||||
pub value: u64,
|
||||
}
|
||||
|
||||
type OutPoint = (Txid, usize); // (txid, output_index)
|
||||
|
||||
struct SpendingInput {
|
||||
txn_id: Txid,
|
||||
height: u32,
|
||||
funding_output: OutPoint,
|
||||
value: u64,
|
||||
}
|
||||
|
||||
pub struct Status {
|
||||
confirmed: (Vec<FundingOutput>, Vec<SpendingInput>),
|
||||
mempool: (Vec<FundingOutput>, Vec<SpendingInput>),
|
||||
txn_fees: HashMap<Txid, u64>,
|
||||
}
|
||||
|
||||
fn calc_balance((funding, spending): &(Vec<FundingOutput>, Vec<SpendingInput>)) -> i64 {
|
||||
let funded: u64 = funding.iter().map(|output| output.value).sum();
|
||||
let spent: u64 = spending.iter().map(|input| input.value).sum();
|
||||
funded as i64 - spent as i64
|
||||
}
|
||||
|
||||
pub struct HistoryItem {
|
||||
height: i32,
|
||||
tx_hash: Txid,
|
||||
fee: Option<u64>, // need to be set only for unconfirmed transactions (i.e. height <= 0)
|
||||
}
|
||||
|
||||
impl HistoryItem {
|
||||
pub fn to_json(&self) -> Value {
|
||||
let mut result = json!({ "height": self.height, "tx_hash": self.tx_hash.to_hex()});
|
||||
self.fee.map(|f| {
|
||||
result
|
||||
.as_object_mut()
|
||||
.unwrap()
|
||||
.insert("fee".to_string(), json!(f))
|
||||
});
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
impl Status {
|
||||
fn funding(&self) -> impl Iterator<Item = &FundingOutput> {
|
||||
self.confirmed.0.iter().chain(self.mempool.0.iter())
|
||||
}
|
||||
|
||||
fn spending(&self) -> impl Iterator<Item = &SpendingInput> {
|
||||
self.confirmed.1.iter().chain(self.mempool.1.iter())
|
||||
}
|
||||
|
||||
pub fn confirmed_balance(&self) -> i64 {
|
||||
calc_balance(&self.confirmed)
|
||||
}
|
||||
|
||||
pub fn mempool_balance(&self) -> i64 {
|
||||
calc_balance(&self.mempool)
|
||||
}
|
||||
|
||||
pub fn history(&self) -> Vec<HistoryItem> {
|
||||
let mut txns_map = HashMap::<Txid, i32>::new();
|
||||
for f in self.funding() {
|
||||
txns_map.insert(f.txn_id, f.height as i32);
|
||||
}
|
||||
for s in self.spending() {
|
||||
txns_map.insert(s.txn_id, s.height as i32);
|
||||
}
|
||||
let mut items: Vec<HistoryItem> = txns_map
|
||||
.into_iter()
|
||||
.map(|item| HistoryItem {
|
||||
height: item.1,
|
||||
tx_hash: item.0,
|
||||
fee: self.txn_fees.get(&item.0).cloned(),
|
||||
})
|
||||
.collect();
|
||||
items.sort_unstable_by_key(|item| item.height);
|
||||
items
|
||||
}
|
||||
|
||||
pub fn unspent(&self) -> Vec<&FundingOutput> {
|
||||
let mut outputs_map = HashMap::<OutPoint, &FundingOutput>::new();
|
||||
for f in self.funding() {
|
||||
outputs_map.insert((f.txn_id, f.output_index), f);
|
||||
}
|
||||
for s in self.spending() {
|
||||
if outputs_map.remove(&s.funding_output).is_none() {
|
||||
warn!("failed to remove {:?}", s.funding_output);
|
||||
}
|
||||
}
|
||||
let mut outputs = outputs_map
|
||||
.into_iter()
|
||||
.map(|item| item.1) // a reference to unspent output
|
||||
.collect::<Vec<&FundingOutput>>();
|
||||
outputs.sort_unstable_by_key(|out| out.height);
|
||||
outputs
|
||||
}
|
||||
|
||||
pub fn hash(&self) -> Option<FullHash> {
|
||||
let txns = self.history();
|
||||
if txns.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let mut sha2 = Sha256::new();
|
||||
for item in txns {
|
||||
let part = format!("{}:{}:", item.tx_hash.to_hex(), item.height);
|
||||
sha2.update(part.as_bytes());
|
||||
}
|
||||
Some(sha2.finalize().into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct TxnHeight {
|
||||
txn: Transaction,
|
||||
height: u32,
|
||||
}
|
||||
|
||||
fn merklize<T: Hash>(left: T, right: T) -> T {
|
||||
let data = [&left[..], &right[..]].concat();
|
||||
<T as Hash>::hash(&data)
|
||||
}
|
||||
|
||||
fn create_merkle_branch_and_root<T: Hash>(mut hashes: Vec<T>, mut index: usize) -> (Vec<T>, T) {
|
||||
let mut merkle = vec![];
|
||||
while hashes.len() > 1 {
|
||||
if hashes.len() % 2 != 0 {
|
||||
let last = *hashes.last().unwrap();
|
||||
hashes.push(last);
|
||||
}
|
||||
index = if index % 2 == 0 { index + 1 } else { index - 1 };
|
||||
merkle.push(hashes[index]);
|
||||
index /= 2;
|
||||
hashes = hashes
|
||||
.chunks(2)
|
||||
.map(|pair| merklize(pair[0], pair[1]))
|
||||
.collect()
|
||||
}
|
||||
(merkle, hashes[0])
|
||||
}
|
||||
|
||||
// TODO: the functions below can be part of ReadStore.
|
||||
fn txrow_by_txid(store: &dyn ReadStore, txid: &Txid) -> Option<TxRow> {
|
||||
let key = TxRow::filter_full(txid);
|
||||
let value = store.get(&key)?;
|
||||
Some(TxRow::from_row(&Row { key, value }))
|
||||
}
|
||||
|
||||
fn txrows_by_prefix(store: &dyn ReadStore, txid_prefix: HashPrefix) -> Vec<TxRow> {
|
||||
store
|
||||
.scan(&TxRow::filter_prefix(txid_prefix))
|
||||
.iter()
|
||||
.map(|row| TxRow::from_row(row))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn txids_by_script_hash(store: &dyn ReadStore, script_hash: &[u8]) -> Vec<HashPrefix> {
|
||||
store
|
||||
.scan(&TxOutRow::filter(script_hash))
|
||||
.iter()
|
||||
.map(|row| TxOutRow::from_row(row).txid_prefix)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn txids_by_funding_output(
|
||||
store: &dyn ReadStore,
|
||||
txn_id: &Txid,
|
||||
output_index: usize,
|
||||
) -> Vec<HashPrefix> {
|
||||
store
|
||||
.scan(&TxInRow::filter(txn_id, output_index))
|
||||
.iter()
|
||||
.map(|row| TxInRow::from_row(row).txid_prefix)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub struct Query {
|
||||
app: Arc<App>,
|
||||
tracker: RwLock<Tracker>,
|
||||
tx_cache: TransactionCache,
|
||||
txid_limit: usize,
|
||||
duration: HistogramVec,
|
||||
}
|
||||
|
||||
impl Query {
|
||||
pub fn new(
|
||||
app: Arc<App>,
|
||||
metrics: &Metrics,
|
||||
tx_cache: TransactionCache,
|
||||
txid_limit: usize,
|
||||
) -> Arc<Query> {
|
||||
Arc::new(Query {
|
||||
app,
|
||||
tracker: RwLock::new(Tracker::new(metrics)),
|
||||
tx_cache,
|
||||
txid_limit,
|
||||
duration: metrics.histogram_vec(
|
||||
HistogramOpts::new("electrs_query_duration", "Request duration (in seconds)"),
|
||||
&["type"],
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
fn load_txns_by_prefix(
|
||||
&self,
|
||||
store: &dyn ReadStore,
|
||||
prefixes: Vec<HashPrefix>,
|
||||
) -> Result<Vec<TxnHeight>> {
|
||||
let mut txns = vec![];
|
||||
for txid_prefix in prefixes {
|
||||
for tx_row in txrows_by_prefix(store, txid_prefix) {
|
||||
let txid: Txid = deserialize(&tx_row.key.txid).unwrap();
|
||||
let txn = self.load_txn(&txid, Some(tx_row.height))?;
|
||||
txns.push(TxnHeight {
|
||||
txn,
|
||||
height: tx_row.height,
|
||||
})
|
||||
}
|
||||
}
|
||||
Ok(txns)
|
||||
}
|
||||
|
||||
fn find_spending_input(
|
||||
&self,
|
||||
store: &dyn ReadStore,
|
||||
funding: &FundingOutput,
|
||||
) -> Result<Option<SpendingInput>> {
|
||||
let spending_txns: Vec<TxnHeight> = self.load_txns_by_prefix(
|
||||
store,
|
||||
txids_by_funding_output(store, &funding.txn_id, funding.output_index),
|
||||
)?;
|
||||
let mut spending_inputs = vec![];
|
||||
for t in &spending_txns {
|
||||
for input in t.txn.input.iter() {
|
||||
if input.previous_output.txid == funding.txn_id
|
||||
&& input.previous_output.vout == funding.output_index as u32
|
||||
{
|
||||
spending_inputs.push(SpendingInput {
|
||||
txn_id: t.txn.txid(),
|
||||
height: t.height,
|
||||
funding_output: (funding.txn_id, funding.output_index),
|
||||
value: funding.value,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
assert!(spending_inputs.len() <= 1);
|
||||
Ok(if spending_inputs.len() == 1 {
|
||||
Some(spending_inputs.remove(0))
|
||||
} else {
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
fn find_funding_outputs(&self, t: &TxnHeight, script_hash: &[u8]) -> Vec<FundingOutput> {
|
||||
let mut result = vec![];
|
||||
let txn_id = t.txn.txid();
|
||||
for (index, output) in t.txn.output.iter().enumerate() {
|
||||
if compute_script_hash(&output.script_pubkey[..]) == script_hash {
|
||||
result.push(FundingOutput {
|
||||
txn_id,
|
||||
height: t.height,
|
||||
output_index: index,
|
||||
value: output.value,
|
||||
})
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn confirmed_status(
|
||||
&self,
|
||||
script_hash: &[u8],
|
||||
) -> Result<(Vec<FundingOutput>, Vec<SpendingInput>)> {
|
||||
let mut funding = vec![];
|
||||
let mut spending = vec![];
|
||||
let read_store = self.app.read_store();
|
||||
let txid_prefixes = txids_by_script_hash(read_store, script_hash);
|
||||
// if the limit is enabled
|
||||
if self.txid_limit > 0 && txid_prefixes.len() > self.txid_limit {
|
||||
bail!(
|
||||
"{}+ transactions found, query may take a long time",
|
||||
txid_prefixes.len()
|
||||
);
|
||||
}
|
||||
for t in self.load_txns_by_prefix(read_store, txid_prefixes)? {
|
||||
funding.extend(self.find_funding_outputs(&t, script_hash));
|
||||
}
|
||||
for funding_output in &funding {
|
||||
if let Some(spent) = self.find_spending_input(read_store, funding_output)? {
|
||||
spending.push(spent);
|
||||
}
|
||||
}
|
||||
Ok((funding, spending))
|
||||
}
|
||||
|
||||
fn mempool_status(
|
||||
&self,
|
||||
script_hash: &[u8],
|
||||
confirmed_funding: &[FundingOutput],
|
||||
tracker: &Tracker,
|
||||
) -> Result<(Vec<FundingOutput>, Vec<SpendingInput>)> {
|
||||
let mut funding = vec![];
|
||||
let mut spending = vec![];
|
||||
let txid_prefixes = txids_by_script_hash(tracker.index(), script_hash);
|
||||
for t in self.load_txns_by_prefix(tracker.index(), txid_prefixes)? {
|
||||
funding.extend(self.find_funding_outputs(&t, script_hash));
|
||||
}
|
||||
// // TODO: dedup outputs (somehow) both confirmed and in mempool (e.g. reorg?)
|
||||
for funding_output in funding.iter().chain(confirmed_funding.iter()) {
|
||||
if let Some(spent) = self.find_spending_input(tracker.index(), funding_output)? {
|
||||
spending.push(spent);
|
||||
}
|
||||
}
|
||||
Ok((funding, spending))
|
||||
}
|
||||
|
||||
pub fn status(&self, script_hash: &[u8]) -> Result<Status> {
|
||||
let timer = self
|
||||
.duration
|
||||
.with_label_values(&["confirmed_status"])
|
||||
.start_timer();
|
||||
let confirmed = self
|
||||
.confirmed_status(script_hash)
|
||||
.chain_err(|| "failed to get confirmed status")?;
|
||||
timer.observe_duration();
|
||||
|
||||
let tracker = self.tracker.read().unwrap();
|
||||
let timer = self
|
||||
.duration
|
||||
.with_label_values(&["mempool_status"])
|
||||
.start_timer();
|
||||
let mempool = self
|
||||
.mempool_status(script_hash, &confirmed.0, &tracker)
|
||||
.chain_err(|| "failed to get mempool status")?;
|
||||
timer.observe_duration();
|
||||
|
||||
let mut txn_fees = HashMap::new();
|
||||
let funding_txn_ids = mempool.0.iter().map(|funding| funding.txn_id);
|
||||
let spending_txn_ids = mempool.1.iter().map(|spending| spending.txn_id);
|
||||
for mempool_txid in funding_txn_ids.chain(spending_txn_ids) {
|
||||
tracker
|
||||
.get_fee(&mempool_txid)
|
||||
.map(|fee| txn_fees.insert(mempool_txid, fee));
|
||||
}
|
||||
|
||||
Ok(Status {
|
||||
confirmed,
|
||||
mempool,
|
||||
txn_fees,
|
||||
})
|
||||
}
|
||||
|
||||
fn lookup_confirmed_blockhash(
|
||||
&self,
|
||||
tx_hash: &Txid,
|
||||
block_height: Option<u32>,
|
||||
) -> Result<Option<BlockHash>> {
|
||||
let blockhash = if self.tracker.read().unwrap().has_txn(tx_hash) {
|
||||
None // found in mempool (as unconfirmed transaction)
|
||||
} else {
|
||||
// Lookup in confirmed transactions' index
|
||||
let height = match block_height {
|
||||
Some(height) => height,
|
||||
None => {
|
||||
txrow_by_txid(self.app.read_store(), tx_hash)
|
||||
.chain_err(|| format!("not indexed tx {}", tx_hash))?
|
||||
.height
|
||||
}
|
||||
};
|
||||
let header = self
|
||||
.app
|
||||
.index()
|
||||
.get_header(height as usize)
|
||||
.chain_err(|| format!("missing header at height {}", height))?;
|
||||
Some(*header.hash())
|
||||
};
|
||||
Ok(blockhash)
|
||||
}
|
||||
|
||||
// Internal API for transaction retrieval
|
||||
fn load_txn(&self, txid: &Txid, block_height: Option<u32>) -> Result<Transaction> {
|
||||
let _timer = self.duration.with_label_values(&["load_txn"]).start_timer();
|
||||
self.tx_cache.get_or_else(txid, || {
|
||||
let blockhash = self.lookup_confirmed_blockhash(txid, block_height)?;
|
||||
let value: Value = self
|
||||
.app
|
||||
.daemon()
|
||||
.gettransaction_raw(txid, blockhash, /*verbose*/ false)?;
|
||||
let value_hex: &str = value.as_str().chain_err(|| "non-string tx")?;
|
||||
hex::decode(&value_hex).chain_err(|| "non-hex tx")
|
||||
})
|
||||
}
|
||||
|
||||
// Public API for transaction retrieval (for Electrum RPC)
|
||||
pub fn get_transaction(&self, tx_hash: &Txid, verbose: bool) -> Result<Value> {
|
||||
let _timer = self
|
||||
.duration
|
||||
.with_label_values(&["get_transaction"])
|
||||
.start_timer();
|
||||
let blockhash = self.lookup_confirmed_blockhash(tx_hash, /*block_height*/ None)?;
|
||||
self.app
|
||||
.daemon()
|
||||
.gettransaction_raw(tx_hash, blockhash, verbose)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_blockhash(&self, tx_hash: &Txid) -> Result<Value> {
|
||||
let blockhash = self.lookup_confirmed_blockhash(tx_hash, None)?;
|
||||
Ok(json!({ "block_hash": blockhash }))
|
||||
}
|
||||
|
||||
pub fn get_headers(&self, heights: &[usize]) -> Vec<HeaderEntry> {
|
||||
let _timer = self
|
||||
.duration
|
||||
.with_label_values(&["get_headers"])
|
||||
.start_timer();
|
||||
let index = self.app.index();
|
||||
heights
|
||||
.iter()
|
||||
.filter_map(|height| index.get_header(*height))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_best_header(&self) -> Result<HeaderEntry> {
|
||||
let last_header = self.app.index().best_header();
|
||||
last_header.chain_err(|| "no headers indexed")
|
||||
}
|
||||
|
||||
pub fn get_merkle_proof(
|
||||
&self,
|
||||
tx_hash: &Txid,
|
||||
height: usize,
|
||||
) -> Result<(Vec<TxMerkleNode>, usize)> {
|
||||
let header_entry = self
|
||||
.app
|
||||
.index()
|
||||
.get_header(height)
|
||||
.chain_err(|| format!("missing block #{}", height))?;
|
||||
let txids = self.app.daemon().getblocktxids(header_entry.hash())?;
|
||||
let pos = txids
|
||||
.iter()
|
||||
.position(|txid| txid == tx_hash)
|
||||
.chain_err(|| format!("missing txid {}", tx_hash))?;
|
||||
let tx_nodes: Vec<TxMerkleNode> = txids
|
||||
.into_iter()
|
||||
.map(|txid| TxMerkleNode::from_inner(txid.into_inner()))
|
||||
.collect();
|
||||
let (branch, _root) = create_merkle_branch_and_root(tx_nodes, pos);
|
||||
Ok((branch, pos))
|
||||
}
|
||||
|
||||
pub fn get_header_merkle_proof(
|
||||
&self,
|
||||
height: usize,
|
||||
cp_height: usize,
|
||||
) -> Result<(Vec<Sha256dHash>, Sha256dHash)> {
|
||||
if cp_height < height {
|
||||
bail!("cp_height #{} < height #{}", cp_height, height);
|
||||
}
|
||||
|
||||
let best_height = self.get_best_header()?.height();
|
||||
if best_height < cp_height {
|
||||
bail!(
|
||||
"cp_height #{} above best block height #{}",
|
||||
cp_height,
|
||||
best_height
|
||||
);
|
||||
}
|
||||
|
||||
let heights: Vec<usize> = (0..=cp_height).collect();
|
||||
let header_hashes: Vec<BlockHash> = self
|
||||
.get_headers(&heights)
|
||||
.into_iter()
|
||||
.map(|h| *h.hash())
|
||||
.collect();
|
||||
let merkle_nodes: Vec<Sha256dHash> = header_hashes
|
||||
.iter()
|
||||
.map(|block_hash| Sha256dHash::from_inner(block_hash.into_inner()))
|
||||
.collect();
|
||||
assert_eq!(header_hashes.len(), heights.len());
|
||||
Ok(create_merkle_branch_and_root(merkle_nodes, height))
|
||||
}
|
||||
|
||||
pub fn get_id_from_pos(
|
||||
&self,
|
||||
height: usize,
|
||||
tx_pos: usize,
|
||||
want_merkle: bool,
|
||||
) -> Result<(Txid, Vec<TxMerkleNode>)> {
|
||||
let header_entry = self
|
||||
.app
|
||||
.index()
|
||||
.get_header(height)
|
||||
.chain_err(|| format!("missing block #{}", height))?;
|
||||
|
||||
let txids = self.app.daemon().getblocktxids(header_entry.hash())?;
|
||||
let txid = *txids
|
||||
.get(tx_pos)
|
||||
.chain_err(|| format!("No tx in position #{} in block #{}", tx_pos, height))?;
|
||||
|
||||
let tx_nodes = txids
|
||||
.into_iter()
|
||||
.map(|txid| TxMerkleNode::from_inner(txid.into_inner()))
|
||||
.collect();
|
||||
|
||||
let branch = if want_merkle {
|
||||
create_merkle_branch_and_root(tx_nodes, tx_pos).0
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
Ok((txid, branch))
|
||||
}
|
||||
|
||||
pub fn broadcast(&self, txn: &Transaction) -> Result<Txid> {
|
||||
self.app.daemon().broadcast(txn)
|
||||
}
|
||||
|
||||
pub fn update_mempool(&self) -> Result<()> {
|
||||
let _timer = self
|
||||
.duration
|
||||
.with_label_values(&["update_mempool"])
|
||||
.start_timer();
|
||||
self.tracker.write().unwrap().update(self.app.daemon())
|
||||
}
|
||||
|
||||
/// Returns [vsize, fee_rate] pairs (measured in vbytes and satoshis).
|
||||
pub fn get_fee_histogram(&self) -> Vec<(f32, u32)> {
|
||||
self.tracker.read().unwrap().fee_histogram().clone()
|
||||
}
|
||||
|
||||
// Fee rate [BTC/kB] to be confirmed in `blocks` from now.
|
||||
pub fn estimate_fee(&self, blocks: usize) -> f64 {
|
||||
let mut total_vsize = 0u32;
|
||||
let mut last_fee_rate = 0.0;
|
||||
let blocks_in_vbytes = (blocks * 1_000_000) as u32; // assume ~1MB blocks
|
||||
for (fee_rate, vsize) in self.tracker.read().unwrap().fee_histogram() {
|
||||
last_fee_rate = *fee_rate;
|
||||
total_vsize += vsize;
|
||||
if total_vsize >= blocks_in_vbytes {
|
||||
break; // under-estimate the fee rate a bit
|
||||
}
|
||||
}
|
||||
(last_fee_rate as f64) * 1e-5 // [BTC/kB] = 10^5 [sat/B]
|
||||
}
|
||||
|
||||
pub fn get_banner(&self) -> Result<String> {
|
||||
self.app.get_banner()
|
||||
}
|
||||
|
||||
pub fn get_relayfee(&self) -> Result<f64> {
|
||||
self.app.daemon().get_relayfee()
|
||||
}
|
||||
}
|
719
src/rpc.rs
719
src/rpc.rs
@ -1,719 +0,0 @@
|
||||
use bitcoin::blockdata::transaction::Transaction;
|
||||
use bitcoin::consensus::encode::{deserialize, serialize};
|
||||
use bitcoin::hashes::hex::{FromHex, ToHex};
|
||||
use bitcoin::hashes::{sha256d::Hash as Sha256dHash, Hash};
|
||||
use error_chain::ChainedError;
|
||||
use serde_json::{from_str, Value};
|
||||
use std::collections::HashMap;
|
||||
use std::io::{BufRead, BufReader, Write};
|
||||
use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream};
|
||||
use std::sync::mpsc::{self, Receiver, Sender, SyncSender, TrySendError};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
|
||||
use crate::errors::*;
|
||||
use crate::metrics::{Gauge, HistogramOpts, HistogramVec, MetricOpts, Metrics};
|
||||
use crate::query::{Query, Status};
|
||||
use crate::util::{spawn_thread, Channel, HeaderEntry};
|
||||
|
||||
const ELECTRS_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
const PROTOCOL_VERSION: &str = "1.4";
|
||||
|
||||
// JSON-RPC spec errors
|
||||
const PARSE_ERROR: i16 = -32700;
|
||||
const METHOD_NOT_FOUND: i16 = -32601;
|
||||
const INVALID_REQUEST: i16 = -32600;
|
||||
|
||||
// electrum-specific errors
|
||||
const BAD_REQUEST: i16 = 1;
|
||||
const DAEMON_ERROR: i16 = 2;
|
||||
|
||||
// TODO: Sha256dHash should be a generic hash-container (since script hash is single SHA256)
|
||||
fn hash_from_value<T: Hash>(val: Option<&Value>) -> Result<T> {
|
||||
let script_hash = val.chain_err(|| "missing hash")?;
|
||||
let script_hash = script_hash.as_str().chain_err(|| "non-string hash")?;
|
||||
let script_hash = T::from_hex(script_hash).chain_err(|| "non-hex hash")?;
|
||||
Ok(script_hash)
|
||||
}
|
||||
|
||||
fn usize_from_value(val: Option<&Value>, name: &str) -> Result<usize> {
|
||||
let val = val.chain_err(|| format!("missing {}", name))?;
|
||||
let val = val.as_u64().chain_err(|| format!("non-integer {}", name))?;
|
||||
Ok(val as usize)
|
||||
}
|
||||
|
||||
fn usize_from_value_or(val: Option<&Value>, name: &str, default: usize) -> Result<usize> {
|
||||
if val.is_none() {
|
||||
return Ok(default);
|
||||
}
|
||||
usize_from_value(val, name)
|
||||
}
|
||||
|
||||
fn bool_from_value(val: Option<&Value>, name: &str) -> Result<bool> {
|
||||
let val = val.chain_err(|| format!("missing {}", name))?;
|
||||
let val = val.as_bool().chain_err(|| format!("not a bool {}", name))?;
|
||||
Ok(val)
|
||||
}
|
||||
|
||||
fn bool_from_value_or(val: Option<&Value>, name: &str, default: bool) -> Result<bool> {
|
||||
if val.is_none() {
|
||||
return Ok(default);
|
||||
}
|
||||
bool_from_value(val, name)
|
||||
}
|
||||
|
||||
fn unspent_from_status(status: &Status) -> Value {
|
||||
json!(Value::Array(
|
||||
status
|
||||
.unspent()
|
||||
.into_iter()
|
||||
.map(|out| json!({
|
||||
"height": out.height,
|
||||
"tx_pos": out.output_index,
|
||||
"tx_hash": out.txn_id.to_hex(),
|
||||
"value": out.value,
|
||||
}))
|
||||
.collect()
|
||||
))
|
||||
}
|
||||
|
||||
fn json_rpc_error_from_error(error: &Error) -> Value {
|
||||
let code = {
|
||||
let mut error: &dyn std::error::Error = error;
|
||||
loop {
|
||||
if let Some(e) = error.downcast_ref::<Error>() {
|
||||
match e.kind() {
|
||||
ErrorKind::MethodNotFound(_) => break METHOD_NOT_FOUND,
|
||||
ErrorKind::InvalidRequest(_) => break INVALID_REQUEST,
|
||||
ErrorKind::ParseError => break PARSE_ERROR,
|
||||
ErrorKind::Daemon(_, _) => break DAEMON_ERROR,
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
error = match error.source() {
|
||||
Some(error) => error,
|
||||
None => break BAD_REQUEST,
|
||||
};
|
||||
}
|
||||
};
|
||||
json!({
|
||||
"code": code,
|
||||
"message": error.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
type Map = serde_json::Map<String, Value>;
|
||||
|
||||
struct Connection {
|
||||
query: Arc<Query>,
|
||||
last_header_entry: Option<HeaderEntry>,
|
||||
status_hashes: HashMap<Sha256dHash, Value>, // ScriptHash -> StatusHash
|
||||
stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
sender: SyncSender<Message>,
|
||||
stats: Arc<Stats>,
|
||||
relayfee: f64,
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
pub fn new(
|
||||
query: Arc<Query>,
|
||||
stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
stats: Arc<Stats>,
|
||||
relayfee: f64,
|
||||
sender: SyncSender<Message>,
|
||||
) -> Connection {
|
||||
Connection {
|
||||
query,
|
||||
last_header_entry: None, // disable header subscription for now
|
||||
status_hashes: HashMap::new(),
|
||||
stream,
|
||||
addr,
|
||||
sender,
|
||||
stats,
|
||||
relayfee,
|
||||
}
|
||||
}
|
||||
|
||||
fn blockchain_headers_subscribe(&mut self) -> Result<Value> {
|
||||
let entry = self.query.get_best_header()?;
|
||||
let hex_header = hex::encode(serialize(entry.header()));
|
||||
let result = json!({"hex": hex_header, "height": entry.height()});
|
||||
self.last_header_entry = Some(entry);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn server_version(&self, params: &[Value]) -> Result<Value> {
|
||||
if params.len() != 2 {
|
||||
bail!("invalid params: {:?}", params);
|
||||
}
|
||||
let client_id = params[0]
|
||||
.as_str()
|
||||
.chain_err(|| format!("invalid client_id: {:?}", params[0]))?;
|
||||
// TODO: support (min, max) protocol version limits
|
||||
let client_version = params[1]
|
||||
.as_str()
|
||||
.chain_err(|| format!("invalid client_version: {:?}", params[1]))?;
|
||||
|
||||
if client_version != PROTOCOL_VERSION {
|
||||
bail!(
|
||||
"{} requested protocol version {}, server supports {}",
|
||||
client_id,
|
||||
client_version,
|
||||
PROTOCOL_VERSION
|
||||
);
|
||||
}
|
||||
Ok(json!([
|
||||
format!("electrs {}", ELECTRS_VERSION),
|
||||
PROTOCOL_VERSION
|
||||
]))
|
||||
}
|
||||
|
||||
fn server_banner(&self) -> Result<Value> {
|
||||
Ok(json!(self.query.get_banner()?))
|
||||
}
|
||||
|
||||
fn server_donation_address(&self) -> Result<Value> {
|
||||
Ok(Value::Null)
|
||||
}
|
||||
|
||||
fn server_peers_subscribe(&self) -> Result<Value> {
|
||||
Ok(json!([]))
|
||||
}
|
||||
|
||||
fn mempool_get_fee_histogram(&self) -> Result<Value> {
|
||||
Ok(json!(self.query.get_fee_histogram()))
|
||||
}
|
||||
|
||||
fn blockchain_block_header(&self, params: &[Value]) -> Result<Value> {
|
||||
let height = usize_from_value(params.get(0), "height")?;
|
||||
let cp_height = usize_from_value_or(params.get(1), "cp_height", 0)?;
|
||||
|
||||
let raw_header_hex: String = self
|
||||
.query
|
||||
.get_headers(&[height])
|
||||
.into_iter()
|
||||
.map(|entry| hex::encode(&serialize(entry.header())))
|
||||
.collect();
|
||||
|
||||
if cp_height == 0 {
|
||||
return Ok(json!(raw_header_hex));
|
||||
}
|
||||
let (branch, root) = self.query.get_header_merkle_proof(height, cp_height)?;
|
||||
|
||||
let branch_vec: Vec<String> = branch.into_iter().map(|b| b.to_hex()).collect();
|
||||
|
||||
Ok(json!({
|
||||
"header": raw_header_hex,
|
||||
"root": root.to_hex(),
|
||||
"branch": branch_vec
|
||||
}))
|
||||
}
|
||||
|
||||
fn blockchain_block_headers(&self, params: &[Value]) -> Result<Value> {
|
||||
let start_height = usize_from_value(params.get(0), "start_height")?;
|
||||
let count = usize_from_value(params.get(1), "count")?;
|
||||
let cp_height = usize_from_value_or(params.get(2), "cp_height", 0)?;
|
||||
let heights: Vec<usize> = (start_height..(start_height + count)).collect();
|
||||
let headers: Vec<String> = self
|
||||
.query
|
||||
.get_headers(&heights)
|
||||
.into_iter()
|
||||
.map(|entry| hex::encode(&serialize(entry.header())))
|
||||
.collect();
|
||||
|
||||
if count == 0 || cp_height == 0 {
|
||||
return Ok(json!({
|
||||
"count": headers.len(),
|
||||
"hex": headers.join(""),
|
||||
"max": 2016,
|
||||
}));
|
||||
}
|
||||
|
||||
let (branch, root) = self
|
||||
.query
|
||||
.get_header_merkle_proof(start_height + (count - 1), cp_height)?;
|
||||
|
||||
let branch_vec: Vec<String> = branch.into_iter().map(|b| b.to_hex()).collect();
|
||||
|
||||
Ok(json!({
|
||||
"count": headers.len(),
|
||||
"hex": headers.join(""),
|
||||
"max": 2016,
|
||||
"root": root.to_hex(),
|
||||
"branch" : branch_vec
|
||||
}))
|
||||
}
|
||||
|
||||
fn blockchain_estimatefee(&self, params: &[Value]) -> Result<Value> {
|
||||
let blocks_count = usize_from_value(params.get(0), "blocks_count")?;
|
||||
let fee_rate = self.query.estimate_fee(blocks_count); // in BTC/kB
|
||||
Ok(json!(fee_rate.max(self.relayfee)))
|
||||
}
|
||||
|
||||
fn blockchain_relayfee(&self) -> Result<Value> {
|
||||
Ok(json!(self.relayfee)) // in BTC/kB
|
||||
}
|
||||
|
||||
fn blockchain_scripthash_subscribe(&mut self, params: &[Value]) -> Result<Value> {
|
||||
let script_hash =
|
||||
hash_from_value::<Sha256dHash>(params.get(0)).chain_err(|| "bad script_hash")?;
|
||||
let status = self.query.status(&script_hash[..])?;
|
||||
let result = status.hash().map_or(Value::Null, |h| json!(hex::encode(h)));
|
||||
if self
|
||||
.status_hashes
|
||||
.insert(script_hash, result.clone())
|
||||
.is_none()
|
||||
{
|
||||
self.stats.subscriptions.inc();
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn blockchain_scripthash_get_balance(&self, params: &[Value]) -> Result<Value> {
|
||||
let script_hash =
|
||||
hash_from_value::<Sha256dHash>(params.get(0)).chain_err(|| "bad script_hash")?;
|
||||
let status = self.query.status(&script_hash[..])?;
|
||||
Ok(
|
||||
json!({ "confirmed": status.confirmed_balance(), "unconfirmed": status.mempool_balance() }),
|
||||
)
|
||||
}
|
||||
|
||||
fn blockchain_scripthash_get_history(&self, params: &[Value]) -> Result<Value> {
|
||||
let script_hash =
|
||||
hash_from_value::<Sha256dHash>(params.get(0)).chain_err(|| "bad script_hash")?;
|
||||
let status = self.query.status(&script_hash[..])?;
|
||||
Ok(json!(Value::Array(
|
||||
status
|
||||
.history()
|
||||
.into_iter()
|
||||
.map(|item| item.to_json())
|
||||
.collect()
|
||||
)))
|
||||
}
|
||||
|
||||
fn blockchain_scripthash_listunspent(&self, params: &[Value]) -> Result<Value> {
|
||||
let script_hash =
|
||||
hash_from_value::<Sha256dHash>(params.get(0)).chain_err(|| "bad script_hash")?;
|
||||
Ok(unspent_from_status(&self.query.status(&script_hash[..])?))
|
||||
}
|
||||
|
||||
fn blockchain_transaction_broadcast(&self, params: &[Value]) -> Result<Value> {
|
||||
let tx = params.get(0).chain_err(|| "missing tx")?;
|
||||
let tx = tx.as_str().chain_err(|| "non-string tx")?;
|
||||
let tx = hex::decode(&tx).chain_err(|| "non-hex tx")?;
|
||||
let tx: Transaction = deserialize(&tx).chain_err(|| "failed to parse tx")?;
|
||||
let txid = self.query.broadcast(&tx)?;
|
||||
self.query.update_mempool()?;
|
||||
if let Err(e) = self.sender.try_send(Message::PeriodicUpdate) {
|
||||
warn!("failed to issue PeriodicUpdate after broadcast: {}", e);
|
||||
}
|
||||
Ok(json!(txid.to_hex()))
|
||||
}
|
||||
|
||||
fn blockchain_transaction_get(&self, params: &[Value]) -> Result<Value> {
|
||||
let tx_hash = hash_from_value(params.get(0)).chain_err(|| "bad tx_hash")?;
|
||||
let verbose = match params.get(1) {
|
||||
Some(value) => value.as_bool().chain_err(|| "non-bool verbose value")?,
|
||||
None => false,
|
||||
};
|
||||
self.query.get_transaction(&tx_hash, verbose)
|
||||
}
|
||||
|
||||
fn blockchain_transaction_get_confirmed_blockhash(&self, params: &[Value]) -> Result<Value> {
|
||||
let tx_hash = hash_from_value(params.get(0)).chain_err(|| "bad tx_hash")?;
|
||||
let value = self.query.get_confirmed_blockhash(&tx_hash)?;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
fn blockchain_transaction_get_merkle(&self, params: &[Value]) -> Result<Value> {
|
||||
let tx_hash = hash_from_value(params.get(0)).chain_err(|| "bad tx_hash")?;
|
||||
let height = usize_from_value(params.get(1), "height")?;
|
||||
let (merkle, pos) = self
|
||||
.query
|
||||
.get_merkle_proof(&tx_hash, height)
|
||||
.chain_err(|| "cannot create merkle proof")?;
|
||||
let merkle: Vec<String> = merkle.into_iter().map(|txid| txid.to_hex()).collect();
|
||||
Ok(json!({
|
||||
"block_height": height,
|
||||
"merkle": merkle,
|
||||
"pos": pos}))
|
||||
}
|
||||
|
||||
fn blockchain_transaction_id_from_pos(&self, params: &[Value]) -> Result<Value> {
|
||||
let height = usize_from_value(params.get(0), "height")?;
|
||||
let tx_pos = usize_from_value(params.get(1), "tx_pos")?;
|
||||
let want_merkle = bool_from_value_or(params.get(2), "merkle", false)?;
|
||||
|
||||
let (txid, merkle) = self.query.get_id_from_pos(height, tx_pos, want_merkle)?;
|
||||
|
||||
if !want_merkle {
|
||||
return Ok(json!(txid.to_hex()));
|
||||
}
|
||||
|
||||
let merkle_vec: Vec<String> = merkle.into_iter().map(|entry| entry.to_hex()).collect();
|
||||
|
||||
Ok(json!({
|
||||
"tx_hash" : txid.to_hex(),
|
||||
"merkle" : merkle_vec}))
|
||||
}
|
||||
|
||||
fn handle_command(&mut self, method: &str, params: &[Value], id: &Value) -> Result<Value> {
|
||||
let timer = self
|
||||
.stats
|
||||
.latency
|
||||
.with_label_values(&[method])
|
||||
.start_timer();
|
||||
let result = match method {
|
||||
"blockchain.block.header" => self.blockchain_block_header(params),
|
||||
"blockchain.block.headers" => self.blockchain_block_headers(params),
|
||||
"blockchain.estimatefee" => self.blockchain_estimatefee(params),
|
||||
"blockchain.headers.subscribe" => self.blockchain_headers_subscribe(),
|
||||
"blockchain.relayfee" => self.blockchain_relayfee(),
|
||||
"blockchain.scripthash.get_balance" => self.blockchain_scripthash_get_balance(params),
|
||||
"blockchain.scripthash.get_history" => self.blockchain_scripthash_get_history(params),
|
||||
"blockchain.scripthash.listunspent" => self.blockchain_scripthash_listunspent(params),
|
||||
"blockchain.scripthash.subscribe" => self.blockchain_scripthash_subscribe(params),
|
||||
"blockchain.transaction.broadcast" => self.blockchain_transaction_broadcast(params),
|
||||
"blockchain.transaction.get" => self.blockchain_transaction_get(params),
|
||||
"blockchain.transaction.get_merkle" => self.blockchain_transaction_get_merkle(params),
|
||||
"blockchain.transaction.get_confirmed_blockhash" => {
|
||||
self.blockchain_transaction_get_confirmed_blockhash(params)
|
||||
}
|
||||
"blockchain.transaction.id_from_pos" => self.blockchain_transaction_id_from_pos(params),
|
||||
"mempool.get_fee_histogram" => self.mempool_get_fee_histogram(),
|
||||
"server.banner" => self.server_banner(),
|
||||
"server.donation_address" => self.server_donation_address(),
|
||||
"server.peers.subscribe" => self.server_peers_subscribe(),
|
||||
"server.ping" => Ok(Value::Null),
|
||||
"server.version" => self.server_version(params),
|
||||
&_ => Err(ErrorKind::MethodNotFound(method.to_owned()).into()),
|
||||
};
|
||||
timer.observe_duration();
|
||||
if let Err(e) = &result {
|
||||
warn!(
|
||||
"rpc #{} {} {:?} failed: {}",
|
||||
id,
|
||||
method,
|
||||
params,
|
||||
e.display_chain()
|
||||
);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn update_subscriptions(&mut self) -> Result<Vec<Value>> {
|
||||
let timer = self
|
||||
.stats
|
||||
.latency
|
||||
.with_label_values(&["periodic_update"])
|
||||
.start_timer();
|
||||
let mut result = vec![];
|
||||
if let Some(ref mut last_entry) = self.last_header_entry {
|
||||
let entry = self.query.get_best_header()?;
|
||||
if *last_entry != entry {
|
||||
*last_entry = entry;
|
||||
let hex_header = hex::encode(serialize(last_entry.header()));
|
||||
let header = json!({"hex": hex_header, "height": last_entry.height()});
|
||||
result.push(json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "blockchain.headers.subscribe",
|
||||
"params": [header]}));
|
||||
}
|
||||
}
|
||||
for (script_hash, status_hash) in self.status_hashes.iter_mut() {
|
||||
let status = self.query.status(&script_hash[..])?;
|
||||
let new_status_hash = status.hash().map_or(Value::Null, |h| json!(hex::encode(h)));
|
||||
if new_status_hash == *status_hash {
|
||||
continue;
|
||||
}
|
||||
result.push(json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "blockchain.scripthash.subscribe",
|
||||
"params": [script_hash.to_hex(), new_status_hash]}));
|
||||
*status_hash = new_status_hash;
|
||||
}
|
||||
timer.observe_duration();
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn send_values(&mut self, values: &[Value]) -> Result<()> {
|
||||
for value in values {
|
||||
let line = value.to_string() + "\n";
|
||||
self.stream
|
||||
.write_all(line.as_bytes())
|
||||
.chain_err(|| format!("failed to send {}", value))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_replies(&mut self, receiver: Receiver<Message>) -> Result<()> {
|
||||
fn parse_id_from_request(line: String) -> Result<(Value, Map)> {
|
||||
let value: Value = from_str(&line).chain_err(|| ErrorKind::ParseError)?;
|
||||
let mut cmd = match value {
|
||||
Value::Object(cmd) => cmd,
|
||||
_ => bail!(ErrorKind::ParseError),
|
||||
};
|
||||
let id = cmd
|
||||
.remove("id")
|
||||
.chain_err(|| ErrorKind::InvalidRequest("missing id"))?;
|
||||
Ok((id, cmd))
|
||||
}
|
||||
|
||||
fn parse_method_and_params_from_request(mut cmd: Map) -> Result<(String, Vec<Value>)> {
|
||||
let method = match cmd
|
||||
.remove("method")
|
||||
.chain_err(|| ErrorKind::InvalidRequest("missing method"))?
|
||||
{
|
||||
Value::String(method) => method,
|
||||
_ => bail!(ErrorKind::InvalidRequest("method must be a string")),
|
||||
};
|
||||
let params = match cmd.remove("params") {
|
||||
None => Vec::new(),
|
||||
Some(Value::Array(params)) => params,
|
||||
Some(_) => bail!("params must be an array"),
|
||||
};
|
||||
Ok((method, params))
|
||||
}
|
||||
|
||||
loop {
|
||||
let msg = receiver.recv().chain_err(|| "channel closed")?;
|
||||
trace!("RPC {:?}", msg);
|
||||
match msg {
|
||||
Message::Request(line) => {
|
||||
let (id, result) = match parse_id_from_request(line) {
|
||||
Ok((id, cmd)) => match parse_method_and_params_from_request(cmd) {
|
||||
Ok((method, params)) => {
|
||||
let result = self.handle_command(&method, ¶ms, &id);
|
||||
(id, result)
|
||||
}
|
||||
Err(e) => (id, Err(e)),
|
||||
},
|
||||
Err(e) => (Value::Null, Err(e)),
|
||||
};
|
||||
let reply = match result {
|
||||
Ok(result) => json!({"jsonrpc": "2.0", "id": id, "result": result}),
|
||||
Err(e) => {
|
||||
let error = json_rpc_error_from_error(&e);
|
||||
json!({"jsonrpc": "2.0", "id": id, "error": error})
|
||||
}
|
||||
};
|
||||
self.send_values(&[reply])?
|
||||
}
|
||||
Message::PeriodicUpdate => {
|
||||
let values = self
|
||||
.update_subscriptions()
|
||||
.chain_err(|| "failed to update subscriptions")?;
|
||||
self.send_values(&values)?
|
||||
}
|
||||
Message::Done => return Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_requests(mut reader: BufReader<TcpStream>, tx: SyncSender<Message>) -> Result<()> {
|
||||
loop {
|
||||
let mut line = Vec::<u8>::new();
|
||||
reader
|
||||
.read_until(b'\n', &mut line)
|
||||
.chain_err(|| "failed to read a request")?;
|
||||
if line.is_empty() {
|
||||
tx.send(Message::Done).chain_err(|| "channel closed")?;
|
||||
return Ok(());
|
||||
} else {
|
||||
if line.starts_with(&[22, 3, 1]) {
|
||||
// (very) naive SSL handshake detection
|
||||
let _ = tx.send(Message::Done);
|
||||
bail!("invalid request - maybe SSL-encrypted data?: {:?}", line)
|
||||
}
|
||||
match String::from_utf8(line) {
|
||||
Ok(req) => tx
|
||||
.send(Message::Request(req))
|
||||
.chain_err(|| "channel closed")?,
|
||||
Err(err) => {
|
||||
let _ = tx.send(Message::Done);
|
||||
bail!("invalid UTF8: {}", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run(mut self, receiver: Receiver<Message>) {
|
||||
let reader = BufReader::new(self.stream.try_clone().expect("failed to clone TcpStream"));
|
||||
let sender = self.sender.clone();
|
||||
let child = spawn_thread("reader", || Connection::parse_requests(reader, sender));
|
||||
if let Err(e) = self.handle_replies(receiver) {
|
||||
error!(
|
||||
"[{}] connection handling failed: {}",
|
||||
self.addr,
|
||||
e.display_chain().to_string()
|
||||
);
|
||||
}
|
||||
self.stats
|
||||
.subscriptions
|
||||
.sub(self.status_hashes.len() as i64);
|
||||
debug!("[{}] shutting down connection", self.addr);
|
||||
let _ = self.stream.shutdown(Shutdown::Both);
|
||||
if let Err(err) = child.join().expect("receiver panicked") {
|
||||
error!("[{}] receiver failed: {}", self.addr, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Message {
|
||||
Request(String),
|
||||
PeriodicUpdate,
|
||||
Done,
|
||||
}
|
||||
|
||||
pub enum Notification {
|
||||
Periodic,
|
||||
Exit,
|
||||
}
|
||||
|
||||
pub struct RPC {
|
||||
notification: Sender<Notification>,
|
||||
server: Option<thread::JoinHandle<()>>, // so we can join the server while dropping this ojbect
|
||||
}
|
||||
|
||||
struct Stats {
|
||||
latency: HistogramVec,
|
||||
subscriptions: Gauge,
|
||||
}
|
||||
|
||||
impl RPC {
|
||||
fn start_notifier(
|
||||
notification: Channel<Notification>,
|
||||
senders: Arc<Mutex<Vec<SyncSender<Message>>>>,
|
||||
acceptor: Sender<Option<(TcpStream, SocketAddr)>>,
|
||||
) {
|
||||
spawn_thread("notification", move || {
|
||||
for msg in notification.receiver().iter() {
|
||||
let mut senders = senders.lock().unwrap();
|
||||
match msg {
|
||||
Notification::Periodic => {
|
||||
senders.retain(|sender| {
|
||||
if let Err(TrySendError::Disconnected(_)) =
|
||||
sender.try_send(Message::PeriodicUpdate)
|
||||
{
|
||||
false // drop disconnected clients
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
}
|
||||
Notification::Exit => acceptor.send(None).unwrap(), // mark acceptor as done
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn start_acceptor(addr: SocketAddr) -> Channel<Option<(TcpStream, SocketAddr)>> {
|
||||
let chan = Channel::unbounded();
|
||||
let acceptor = chan.sender();
|
||||
spawn_thread("acceptor", move || {
|
||||
let listener =
|
||||
TcpListener::bind(addr).unwrap_or_else(|e| panic!("bind({}) failed: {}", addr, e));
|
||||
info!(
|
||||
"Electrum RPC server running on {} (protocol {})",
|
||||
addr, PROTOCOL_VERSION
|
||||
);
|
||||
loop {
|
||||
let (stream, addr) = listener.accept().expect("accept failed");
|
||||
stream
|
||||
.set_nonblocking(false)
|
||||
.expect("failed to set connection as blocking");
|
||||
acceptor.send(Some((stream, addr))).expect("send failed");
|
||||
}
|
||||
});
|
||||
chan
|
||||
}
|
||||
|
||||
pub fn start(addr: SocketAddr, query: Arc<Query>, metrics: &Metrics, relayfee: f64) -> RPC {
|
||||
let stats = Arc::new(Stats {
|
||||
latency: metrics.histogram_vec(
|
||||
HistogramOpts::new("electrs_electrum_rpc", "Electrum RPC latency (seconds)"),
|
||||
&["method"],
|
||||
),
|
||||
subscriptions: metrics.gauge(MetricOpts::new(
|
||||
"electrs_electrum_subscriptions",
|
||||
"# of Electrum subscriptions",
|
||||
)),
|
||||
});
|
||||
stats.subscriptions.set(0);
|
||||
let notification = Channel::unbounded();
|
||||
|
||||
RPC {
|
||||
notification: notification.sender(),
|
||||
server: Some(spawn_thread("rpc", move || {
|
||||
let senders = Arc::new(Mutex::new(Vec::<SyncSender<Message>>::new()));
|
||||
|
||||
let acceptor = RPC::start_acceptor(addr);
|
||||
RPC::start_notifier(notification, senders.clone(), acceptor.sender());
|
||||
|
||||
let mut threads = HashMap::new();
|
||||
let (garbage_sender, garbage_receiver) = crossbeam_channel::unbounded();
|
||||
|
||||
while let Some((stream, addr)) = acceptor.receiver().recv().unwrap() {
|
||||
// explicitely scope the shadowed variables for the new thread
|
||||
let query = Arc::clone(&query);
|
||||
let stats = Arc::clone(&stats);
|
||||
let garbage_sender = garbage_sender.clone();
|
||||
let (sender, receiver) = mpsc::sync_channel(10);
|
||||
|
||||
senders.lock().unwrap().push(sender.clone());
|
||||
|
||||
let spawned = spawn_thread("peer", move || {
|
||||
info!("[{}] connected peer", addr);
|
||||
let conn = Connection::new(query, stream, addr, stats, relayfee, sender);
|
||||
conn.run(receiver);
|
||||
info!("[{}] disconnected peer", addr);
|
||||
let _ = garbage_sender.send(std::thread::current().id());
|
||||
});
|
||||
|
||||
trace!("[{}] spawned {:?}", addr, spawned.thread().id());
|
||||
threads.insert(spawned.thread().id(), spawned);
|
||||
while let Ok(id) = garbage_receiver.try_recv() {
|
||||
if let Some(thread) = threads.remove(&id) {
|
||||
trace!("[{}] joining {:?}", addr, id);
|
||||
if let Err(error) = thread.join() {
|
||||
error!("failed to join {:?}: {:?}", id, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("closing {} RPC connections", senders.lock().unwrap().len());
|
||||
for sender in senders.lock().unwrap().iter() {
|
||||
let _ = sender.send(Message::Done);
|
||||
}
|
||||
for (id, thread) in threads {
|
||||
trace!("joining {:?}", id);
|
||||
if let Err(error) = thread.join() {
|
||||
error!("failed to join {:?}: {:?}", id, error);
|
||||
}
|
||||
}
|
||||
|
||||
trace!("RPC connections are closed");
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn notify(&self) {
|
||||
self.notification.send(Notification::Periodic).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RPC {
|
||||
fn drop(&mut self) {
|
||||
trace!("stop accepting new RPCs");
|
||||
self.notification.send(Notification::Exit).unwrap();
|
||||
if let Some(handle) = self.server.take() {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
trace!("RPC server is stopped");
|
||||
}
|
||||
}
|
194
src/server.rs
Normal file
194
src/server.rs
Normal file
@ -0,0 +1,194 @@
|
||||
use anyhow::{Context, Result};
|
||||
use bitcoin::BlockHash;
|
||||
use bitcoincore_rpc::RpcApi;
|
||||
use crossbeam_channel::{bounded, select, unbounded, Receiver, Sender};
|
||||
use rayon::prelude::*;
|
||||
|
||||
use std::{
|
||||
collections::hash_map::HashMap,
|
||||
convert::TryFrom,
|
||||
io::{BufRead, BufReader, Write},
|
||||
net::{Shutdown, TcpListener, TcpStream},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::Config,
|
||||
daemon::rpc_connect,
|
||||
electrum::{Client, Rpc},
|
||||
signals,
|
||||
thread::spawn,
|
||||
};
|
||||
|
||||
struct Peer {
|
||||
id: usize,
|
||||
client: Client,
|
||||
stream: TcpStream,
|
||||
}
|
||||
|
||||
impl Peer {
|
||||
fn new(id: usize, stream: TcpStream) -> Self {
|
||||
let client = Client::default();
|
||||
Self { id, client, stream }
|
||||
}
|
||||
|
||||
fn send(&mut self, values: Vec<String>) -> Result<()> {
|
||||
for mut value in values {
|
||||
debug!("{}: send {}", self.id, value);
|
||||
value += "\n";
|
||||
self.stream
|
||||
.write_all(value.as_bytes())
|
||||
.with_context(|| format!("failed to send response: {:?}", value))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn disconnect(self) {
|
||||
let _ = self.stream.shutdown(Shutdown::Both);
|
||||
}
|
||||
}
|
||||
|
||||
fn tip_receiver(config: &Config) -> Result<Receiver<BlockHash>> {
|
||||
let duration = u64::try_from(config.wait_duration.as_millis()).unwrap();
|
||||
let (tip_tx, tip_rx) = bounded(0);
|
||||
let rpc = rpc_connect(config)?;
|
||||
|
||||
use crossbeam_channel::TrySendError;
|
||||
spawn("tip_loop", move || loop {
|
||||
let tip = rpc.get_best_block_hash()?;
|
||||
match tip_tx.try_send(tip) {
|
||||
Ok(_) | Err(TrySendError::Full(_)) => (),
|
||||
Err(TrySendError::Disconnected(_)) => bail!("tip receiver disconnected"),
|
||||
}
|
||||
rpc.wait_for_new_block(duration)?;
|
||||
});
|
||||
Ok(tip_rx)
|
||||
}
|
||||
|
||||
pub fn run(config: &Config, mut rpc: Rpc) -> Result<()> {
|
||||
let listener = TcpListener::bind(config.electrum_rpc_addr)?;
|
||||
let tip_rx = tip_receiver(config)?;
|
||||
info!("serving Electrum RPC on {}", listener.local_addr()?);
|
||||
|
||||
let (server_tx, server_rx) = unbounded();
|
||||
spawn("accept_loop", || accept_loop(listener, server_tx)); // detach accepting thread
|
||||
let signal_rx = signals::register();
|
||||
|
||||
let mut peers = HashMap::<usize, Peer>::new();
|
||||
loop {
|
||||
select! {
|
||||
recv(signal_rx) -> sig => {
|
||||
match sig.context("signal channel disconnected")? {
|
||||
signals::Signal::Exit => break,
|
||||
signals::Signal::Trigger => (),
|
||||
}
|
||||
},
|
||||
recv(tip_rx) -> tip => match tip {
|
||||
Ok(_) => (), // sync and update
|
||||
Err(_) => break, // daemon is shutting down
|
||||
},
|
||||
recv(server_rx) -> event => {
|
||||
let event = event.context("server disconnected")?;
|
||||
let buffered_events = server_rx.iter().take(server_rx.len());
|
||||
for event in std::iter::once(event).chain(buffered_events) {
|
||||
handle_event(&rpc, &mut peers, event);
|
||||
}
|
||||
},
|
||||
};
|
||||
rpc.sync().context("rpc sync failed")?;
|
||||
peers = notify_peers(&rpc, peers); // peers are disconnected on error.
|
||||
}
|
||||
info!("stopping Electrum RPC server");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_peers(rpc: &Rpc, peers: HashMap<usize, Peer>) -> HashMap<usize, Peer> {
|
||||
peers
|
||||
.into_par_iter()
|
||||
.filter_map(|(_, mut peer)| match notify_peer(rpc, &mut peer) {
|
||||
Ok(()) => Some((peer.id, peer)),
|
||||
Err(e) => {
|
||||
error!("failed to notify peer {}: {}", peer.id, e);
|
||||
peer.disconnect();
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn notify_peer(rpc: &Rpc, peer: &mut Peer) -> Result<()> {
|
||||
let notifications = rpc
|
||||
.update_client(&mut peer.client)
|
||||
.context("failed to generate notifications")?;
|
||||
peer.send(notifications)
|
||||
.context("failed to send notifications")
|
||||
}
|
||||
|
||||
struct Event {
|
||||
peer_id: usize,
|
||||
msg: Message,
|
||||
}
|
||||
|
||||
enum Message {
|
||||
New(TcpStream),
|
||||
Request(String),
|
||||
Done,
|
||||
}
|
||||
|
||||
fn handle_event(rpc: &Rpc, peers: &mut HashMap<usize, Peer>, event: Event) {
|
||||
let Event { msg, peer_id } = event;
|
||||
match msg {
|
||||
Message::New(stream) => {
|
||||
debug!("{}: connected", peer_id);
|
||||
peers.insert(peer_id, Peer::new(peer_id, stream));
|
||||
}
|
||||
Message::Request(line) => {
|
||||
let result = match peers.get_mut(&peer_id) {
|
||||
Some(peer) => handle_request(rpc, peer, &line),
|
||||
None => return, // unknown peer
|
||||
};
|
||||
if let Err(e) = result {
|
||||
error!("{}: disconnecting due to {}", peer_id, e);
|
||||
peers.remove(&peer_id).unwrap().disconnect();
|
||||
}
|
||||
}
|
||||
Message::Done => {
|
||||
// already disconnected, just remove from peers' map
|
||||
peers.remove(&peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_request(rpc: &Rpc, peer: &mut Peer, line: &str) -> Result<()> {
|
||||
let response = rpc.handle_request(&mut peer.client, line);
|
||||
peer.send(vec![response])
|
||||
}
|
||||
|
||||
fn accept_loop(listener: TcpListener, server_tx: Sender<Event>) -> Result<()> {
|
||||
for (peer_id, conn) in listener.incoming().enumerate() {
|
||||
let stream = conn.context("failed to accept")?;
|
||||
let tx = server_tx.clone();
|
||||
spawn("recv_loop", move || {
|
||||
let result = recv_loop(peer_id, &stream, tx);
|
||||
let _ = stream.shutdown(Shutdown::Both);
|
||||
result
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recv_loop(peer_id: usize, stream: &TcpStream, server_tx: Sender<Event>) -> Result<()> {
|
||||
let msg = Message::New(stream.try_clone()?);
|
||||
server_tx.send(Event { peer_id, msg })?;
|
||||
|
||||
for line in BufReader::new(stream).lines() {
|
||||
let line = line.with_context(|| format!("{}: recv failed", peer_id))?;
|
||||
debug!("{}: recv {}", peer_id, line);
|
||||
let msg = Message::Request(line);
|
||||
server_tx.send(Event { peer_id, msg })?;
|
||||
}
|
||||
|
||||
debug!("{}: disconnected", peer_id);
|
||||
let msg = Message::Done;
|
||||
server_tx.send(Event { peer_id, msg })?;
|
||||
Ok(())
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
use crossbeam_channel as channel;
|
||||
use crossbeam_channel::RecvTimeoutError;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::errors::*;
|
||||
|
||||
#[derive(Clone)] // so multiple threads could wait on signals
|
||||
pub struct Waiter {
|
||||
receiver: channel::Receiver<i32>,
|
||||
}
|
||||
|
||||
fn notify(signals: &[i32]) -> channel::Receiver<i32> {
|
||||
let (s, r) = channel::bounded(1);
|
||||
let signals =
|
||||
signal_hook::iterator::Signals::new(signals).expect("failed to register signal hook");
|
||||
thread::spawn(move || {
|
||||
for signal in signals.forever() {
|
||||
s.send(signal)
|
||||
.unwrap_or_else(|_| panic!("failed to send signal {}", signal));
|
||||
}
|
||||
});
|
||||
r
|
||||
}
|
||||
|
||||
impl Waiter {
|
||||
pub fn start() -> Waiter {
|
||||
Waiter {
|
||||
receiver: notify(&[
|
||||
signal_hook::SIGINT,
|
||||
signal_hook::SIGTERM,
|
||||
signal_hook::SIGUSR1, // allow external triggering (e.g. via bitcoind `blocknotify`)
|
||||
]),
|
||||
}
|
||||
}
|
||||
pub fn wait(&self, duration: Duration) -> Result<()> {
|
||||
match self.receiver.recv_timeout(duration) {
|
||||
Ok(sig) => {
|
||||
trace!("notified via SIG{}", sig);
|
||||
if sig != signal_hook::SIGUSR1 {
|
||||
bail!(ErrorKind::Interrupt(sig))
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
Err(RecvTimeoutError::Timeout) => Ok(()),
|
||||
Err(RecvTimeoutError::Disconnected) => bail!("signal hook channel disconnected"),
|
||||
}
|
||||
}
|
||||
pub fn poll(&self) -> Result<()> {
|
||||
self.wait(Duration::from_secs(0))
|
||||
}
|
||||
}
|
32
src/signals.rs
Normal file
32
src/signals.rs
Normal file
@ -0,0 +1,32 @@
|
||||
use anyhow::Context;
|
||||
use crossbeam_channel::{unbounded, Receiver};
|
||||
use signal_hook::consts::signal::*;
|
||||
use signal_hook::iterator::Signals;
|
||||
|
||||
use crate::thread::spawn;
|
||||
|
||||
pub(crate) enum Signal {
|
||||
Exit,
|
||||
Trigger,
|
||||
}
|
||||
|
||||
pub(crate) fn register() -> Receiver<Signal> {
|
||||
let ids = [
|
||||
SIGINT, SIGTERM,
|
||||
SIGUSR1, // allow external triggering (e.g. via bitcoind `blocknotify`)
|
||||
];
|
||||
let (tx, rx) = unbounded();
|
||||
let mut signals = Signals::new(&ids).expect("failed to register signal hook");
|
||||
spawn("signal", move || {
|
||||
for id in &mut signals {
|
||||
info!("notified via SIG{}", id);
|
||||
let signal = match id {
|
||||
SIGUSR1 => Signal::Trigger,
|
||||
_ => Signal::Exit,
|
||||
};
|
||||
tx.send(signal).context("failed to send signal")?;
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
rx
|
||||
}
|
540
src/status.rs
Normal file
540
src/status.rs
Normal file
@ -0,0 +1,540 @@
|
||||
use anyhow::Result;
|
||||
use bitcoin::{
|
||||
hashes::{sha256, Hash, HashEngine},
|
||||
Amount, Block, BlockHash, OutPoint, SignedAmount, Transaction, Txid,
|
||||
};
|
||||
use rayon::prelude::*;
|
||||
use serde::ser::{Serialize, Serializer};
|
||||
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use crate::{
|
||||
cache::Cache,
|
||||
chain::Chain,
|
||||
daemon::Daemon,
|
||||
index::Index,
|
||||
mempool::Mempool,
|
||||
merkle::Proof,
|
||||
types::{ScriptHash, StatusHash},
|
||||
};
|
||||
|
||||
/// Given a scripthash, store relevant inputs and outputs of a specific transaction
|
||||
struct TxEntry {
|
||||
txid: Txid,
|
||||
outputs: Vec<TxOutput>, // relevant funded outputs and their amounts
|
||||
spent: Vec<OutPoint>, // relevant spent outpoints
|
||||
}
|
||||
|
||||
struct TxOutput {
|
||||
index: u32,
|
||||
value: Amount,
|
||||
}
|
||||
|
||||
impl TxEntry {
|
||||
fn new(txid: Txid) -> Self {
|
||||
Self {
|
||||
txid,
|
||||
outputs: Vec::new(),
|
||||
spent: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Relevant (scripthash-wise) funded outpoints
|
||||
fn funding_outpoints(&self) -> impl Iterator<Item = OutPoint> + '_ {
|
||||
make_outpoints(&self.txid, &self.outputs)
|
||||
}
|
||||
}
|
||||
|
||||
// Confirmation height of a transaction or its mempool state:
|
||||
// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-history
|
||||
// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-mempool
|
||||
enum Height {
|
||||
Confirmed { height: usize },
|
||||
Unconfirmed { has_unconfirmed_inputs: bool },
|
||||
}
|
||||
|
||||
impl Height {
|
||||
fn as_i64(&self) -> i64 {
|
||||
match self {
|
||||
Self::Confirmed { height } => i64::try_from(*height).unwrap(),
|
||||
Self::Unconfirmed {
|
||||
has_unconfirmed_inputs: true,
|
||||
} => -1,
|
||||
Self::Unconfirmed {
|
||||
has_unconfirmed_inputs: false,
|
||||
} => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Height {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_i64(self.as_i64())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Height {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.as_i64().fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
// A single history entry:
|
||||
// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-history
|
||||
// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-mempool
|
||||
#[derive(Serialize)]
|
||||
pub(crate) struct HistoryEntry {
|
||||
#[serde(rename = "tx_hash")]
|
||||
txid: Txid,
|
||||
height: Height,
|
||||
#[serde(
|
||||
skip_serializing_if = "Option::is_none",
|
||||
with = "bitcoin::util::amount::serde::as_sat::opt"
|
||||
)]
|
||||
fee: Option<Amount>,
|
||||
}
|
||||
|
||||
impl HistoryEntry {
|
||||
fn hash(&self, engine: &mut sha256::HashEngine) {
|
||||
let s = format!("{}:{}:", self.txid, self.height);
|
||||
engine.input(s.as_bytes());
|
||||
}
|
||||
|
||||
fn confirmed(txid: Txid, height: usize) -> Self {
|
||||
Self {
|
||||
txid,
|
||||
height: Height::Confirmed { height },
|
||||
fee: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn unconfirmed(txid: Txid, has_unconfirmed_inputs: bool, fee: Amount) -> Self {
|
||||
Self {
|
||||
txid,
|
||||
height: Height::Unconfirmed {
|
||||
has_unconfirmed_inputs,
|
||||
},
|
||||
fee: Some(fee),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// ScriptHash subscription status
|
||||
pub struct ScriptHashStatus {
|
||||
scripthash: ScriptHash, // specfic scripthash to be queried
|
||||
tip: BlockHash, // used for skipping confirmed entries' sync
|
||||
confirmed: HashMap<BlockHash, Vec<TxEntry>>, // confirmed entries, partitioned per block (may contain stale blocks)
|
||||
mempool: Vec<TxEntry>, // unconfirmed entries
|
||||
history: Vec<HistoryEntry>, // computed from confirmed and mempool entries
|
||||
statushash: Option<StatusHash>, // computed from history
|
||||
}
|
||||
|
||||
/// Specific scripthash balance
|
||||
#[derive(Default, Eq, PartialEq, Serialize)]
|
||||
pub(crate) struct Balance {
|
||||
#[serde(with = "bitcoin::util::amount::serde::as_sat", rename = "confirmed")]
|
||||
confirmed_balance: Amount,
|
||||
#[serde(with = "bitcoin::util::amount::serde::as_sat", rename = "unconfirmed")]
|
||||
mempool_delta: SignedAmount,
|
||||
}
|
||||
|
||||
// A single unspent transaction output entry:
|
||||
// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-listunspent
|
||||
#[derive(Serialize)]
|
||||
pub(crate) struct UnspentEntry {
|
||||
height: usize, // 0 = mempool entry
|
||||
tx_hash: Txid,
|
||||
tx_pos: u32,
|
||||
#[serde(with = "bitcoin::util::amount::serde::as_sat")]
|
||||
value: Amount,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct Unspent {
|
||||
// mapping an outpoint to its value & confirmation height
|
||||
outpoints: HashMap<OutPoint, (Amount, usize)>,
|
||||
confirmed_balance: Amount,
|
||||
mempool_delta: SignedAmount,
|
||||
}
|
||||
|
||||
impl Unspent {
|
||||
fn build(status: &ScriptHashStatus, chain: &Chain) -> Self {
|
||||
let mut unspent = Unspent::default();
|
||||
|
||||
status
|
||||
.confirmed_height_entries(chain)
|
||||
.for_each(|(height, entries)| entries.iter().for_each(|e| unspent.insert(e, height)));
|
||||
status
|
||||
.confirmed_entries(chain)
|
||||
.for_each(|e| unspent.remove(e));
|
||||
|
||||
unspent.confirmed_balance = unspent.balance();
|
||||
|
||||
status.mempool.iter().for_each(|e| unspent.insert(e, 0)); // mempool height = 0
|
||||
status.mempool.iter().for_each(|e| unspent.remove(e));
|
||||
|
||||
unspent.mempool_delta =
|
||||
unspent.balance().to_signed().unwrap() - unspent.confirmed_balance.to_signed().unwrap();
|
||||
|
||||
unspent
|
||||
}
|
||||
|
||||
fn into_entries(self) -> Vec<UnspentEntry> {
|
||||
self.outpoints
|
||||
.into_iter()
|
||||
.map(|(outpoint, (value, height))| UnspentEntry {
|
||||
height,
|
||||
tx_hash: outpoint.txid,
|
||||
tx_pos: outpoint.vout,
|
||||
value,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn balance(&self) -> Amount {
|
||||
self.outpoints
|
||||
.values()
|
||||
.fold(Amount::default(), |acc, v| acc + v.0)
|
||||
}
|
||||
|
||||
fn insert(&mut self, entry: &TxEntry, height: usize) {
|
||||
for output in &entry.outputs {
|
||||
let outpoint = OutPoint {
|
||||
txid: entry.txid,
|
||||
vout: output.index,
|
||||
};
|
||||
self.outpoints.insert(outpoint, (output.value, height));
|
||||
}
|
||||
}
|
||||
|
||||
fn remove(&mut self, entry: &TxEntry) {
|
||||
for spent in &entry.spent {
|
||||
self.outpoints.remove(spent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ScriptHashStatus {
|
||||
/// Return non-synced (empty) status for a given script hash.
|
||||
pub fn new(scripthash: ScriptHash) -> Self {
|
||||
Self {
|
||||
scripthash,
|
||||
tip: BlockHash::default(),
|
||||
confirmed: HashMap::new(),
|
||||
mempool: Vec::new(),
|
||||
history: Vec::new(),
|
||||
statushash: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterate through confirmed TxEntries with their corresponding block heights.
|
||||
/// Skip entries from stale blocks.
|
||||
fn confirmed_height_entries<'a>(
|
||||
&'a self,
|
||||
chain: &'a Chain,
|
||||
) -> impl Iterator<Item = (usize, &[TxEntry])> + 'a {
|
||||
self.confirmed
|
||||
.iter()
|
||||
.filter_map(move |(blockhash, entries)| {
|
||||
chain
|
||||
.get_block_height(blockhash)
|
||||
.map(|height| (height, &entries[..]))
|
||||
})
|
||||
}
|
||||
|
||||
/// Iterate through confirmed TxEntries.
|
||||
/// Skip entries from stale blocks.
|
||||
fn confirmed_entries<'a>(&'a self, chain: &'a Chain) -> impl Iterator<Item = &TxEntry> + 'a {
|
||||
self.confirmed_height_entries(chain)
|
||||
.flat_map(|(_height, entries)| entries)
|
||||
}
|
||||
|
||||
/// Collect all funded and confirmed outpoints (as a set).
|
||||
fn confirmed_outpoints(&self, chain: &Chain) -> HashSet<OutPoint> {
|
||||
self.confirmed_entries(chain)
|
||||
.flat_map(TxEntry::funding_outpoints)
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn get_unspent(&self, chain: &Chain) -> Vec<UnspentEntry> {
|
||||
Unspent::build(self, chain).into_entries()
|
||||
}
|
||||
|
||||
pub(crate) fn get_balance(&self, chain: &Chain) -> Balance {
|
||||
let unspent = Unspent::build(self, chain);
|
||||
Balance {
|
||||
confirmed_balance: unspent.confirmed_balance,
|
||||
mempool_delta: unspent.mempool_delta,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_history(&self, chain: &Chain, mempool: &Mempool) -> Vec<HistoryEntry> {
|
||||
let mut result = self.get_confirmed_history(chain);
|
||||
result.extend(self.get_mempool_history(mempool));
|
||||
result
|
||||
}
|
||||
|
||||
/// Collect all confirmed history entries (in block order).
|
||||
fn get_confirmed_history(&self, chain: &Chain) -> Vec<HistoryEntry> {
|
||||
self.confirmed_height_entries(chain)
|
||||
.collect::<BTreeMap<usize, &[TxEntry]>>()
|
||||
.into_iter()
|
||||
.flat_map(|(height, entries)| {
|
||||
entries
|
||||
.iter()
|
||||
.map(move |e| HistoryEntry::confirmed(e.txid, height))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Collect all mempool history entries (keeping transactions with unconfirmed parents last).
|
||||
fn get_mempool_history(&self, mempool: &Mempool) -> Vec<HistoryEntry> {
|
||||
let mut entries = self
|
||||
.mempool
|
||||
.iter()
|
||||
.filter_map(|e| mempool.get(&e.txid))
|
||||
.collect::<Vec<_>>();
|
||||
entries.sort_by_key(|e| (e.has_unconfirmed_inputs, e.txid));
|
||||
entries
|
||||
.into_iter()
|
||||
.map(|e| HistoryEntry::unconfirmed(e.txid, e.has_unconfirmed_inputs, e.fee))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Apply func only on the new blocks (fetched from daemon).
|
||||
fn for_new_blocks<B, F>(&self, blockhashes: B, daemon: &Daemon, func: F) -> Result<()>
|
||||
where
|
||||
B: IntoIterator<Item = BlockHash>,
|
||||
F: FnMut(BlockHash, Block) + Send,
|
||||
{
|
||||
daemon.for_blocks(
|
||||
blockhashes
|
||||
.into_iter()
|
||||
.filter(|blockhash| !self.confirmed.contains_key(blockhash)),
|
||||
func,
|
||||
)
|
||||
}
|
||||
|
||||
/// Get funding and spending entries from new blocks.
|
||||
/// Also cache relevant transactions and their merkle proofs.
|
||||
fn sync_confirmed(
|
||||
&self,
|
||||
index: &Index,
|
||||
daemon: &Daemon,
|
||||
cache: &Cache,
|
||||
outpoints: &mut HashSet<OutPoint>,
|
||||
) -> Result<HashMap<BlockHash, Vec<TxEntry>>> {
|
||||
type TxPosition = usize; // transaction position within a block
|
||||
let mut result = HashMap::<BlockHash, HashMap<TxPosition, TxEntry>>::new();
|
||||
|
||||
let funding_blockhashes = index.limit_result(index.filter_by_funding(self.scripthash))?;
|
||||
self.for_new_blocks(funding_blockhashes, daemon, |blockhash, block| {
|
||||
let txids: Vec<Txid> = block.txdata.iter().map(|tx| tx.txid()).collect();
|
||||
for (pos, (tx, txid)) in block.txdata.into_iter().zip(txids.iter()).enumerate() {
|
||||
let funding_outputs = filter_outputs(&tx, &self.scripthash);
|
||||
if funding_outputs.is_empty() {
|
||||
continue;
|
||||
}
|
||||
cache.add_tx(*txid, move || tx);
|
||||
cache.add_proof(blockhash, *txid, || Proof::create(&txids, pos));
|
||||
outpoints.extend(make_outpoints(txid, &funding_outputs));
|
||||
result
|
||||
.entry(blockhash)
|
||||
.or_default()
|
||||
.entry(pos)
|
||||
.or_insert_with(|| TxEntry::new(*txid))
|
||||
.outputs = funding_outputs;
|
||||
}
|
||||
})?;
|
||||
let spending_blockhashes: HashSet<BlockHash> = outpoints
|
||||
.par_iter()
|
||||
.flat_map_iter(|outpoint| index.filter_by_spending(*outpoint))
|
||||
.collect();
|
||||
self.for_new_blocks(spending_blockhashes, daemon, |blockhash, block| {
|
||||
let txids: Vec<Txid> = block.txdata.iter().map(|tx| tx.txid()).collect();
|
||||
for (pos, (tx, txid)) in block.txdata.into_iter().zip(txids.iter()).enumerate() {
|
||||
let spent_outpoints = filter_inputs(&tx, outpoints);
|
||||
if spent_outpoints.is_empty() {
|
||||
continue;
|
||||
}
|
||||
cache.add_tx(*txid, move || tx);
|
||||
cache.add_proof(blockhash, *txid, || Proof::create(&txids, pos));
|
||||
result
|
||||
.entry(blockhash)
|
||||
.or_default()
|
||||
.entry(pos)
|
||||
.or_insert_with(|| TxEntry::new(*txid))
|
||||
.spent = spent_outpoints;
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok(result
|
||||
.into_iter()
|
||||
.map(|(blockhash, entries_map)| {
|
||||
// sort transactions by their position in a block
|
||||
let sorted_entries = entries_map
|
||||
.into_iter()
|
||||
.collect::<BTreeMap<TxPosition, TxEntry>>()
|
||||
.into_iter()
|
||||
.map(|(_pos, entry)| entry)
|
||||
.collect::<Vec<TxEntry>>();
|
||||
(blockhash, sorted_entries)
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// Get funding and spending entries from current mempool.
|
||||
/// Also cache relevant transactions.
|
||||
fn sync_mempool(
|
||||
&self,
|
||||
mempool: &Mempool,
|
||||
cache: &Cache,
|
||||
outpoints: &mut HashSet<OutPoint>,
|
||||
) -> Vec<TxEntry> {
|
||||
let mut result = HashMap::<Txid, TxEntry>::new();
|
||||
for entry in mempool.filter_by_funding(&self.scripthash) {
|
||||
let funding_outputs = filter_outputs(&entry.tx, &self.scripthash);
|
||||
assert!(!funding_outputs.is_empty());
|
||||
outpoints.extend(make_outpoints(&entry.txid, &funding_outputs));
|
||||
result
|
||||
.entry(entry.txid)
|
||||
.or_insert_with(|| TxEntry::new(entry.txid))
|
||||
.outputs = funding_outputs;
|
||||
cache.add_tx(entry.txid, || entry.tx.clone());
|
||||
}
|
||||
for entry in outpoints
|
||||
.iter()
|
||||
.flat_map(|outpoint| mempool.filter_by_spending(outpoint))
|
||||
{
|
||||
let spent_outpoints = filter_inputs(&entry.tx, outpoints);
|
||||
assert!(!spent_outpoints.is_empty());
|
||||
result
|
||||
.entry(entry.txid)
|
||||
.or_insert_with(|| TxEntry::new(entry.txid))
|
||||
.spent = spent_outpoints;
|
||||
cache.add_tx(entry.txid, || entry.tx.clone());
|
||||
}
|
||||
result.into_iter().map(|(_txid, entry)| entry).collect()
|
||||
}
|
||||
|
||||
/// Sync with currently confirmed txs and mempool, downloading non-cached transactions via p2p protocol.
|
||||
/// After a successful sync, scripthash status is updated.
|
||||
pub(crate) fn sync(
|
||||
&mut self,
|
||||
index: &Index,
|
||||
mempool: &Mempool,
|
||||
daemon: &Daemon,
|
||||
cache: &Cache,
|
||||
) -> Result<()> {
|
||||
let mut outpoints: HashSet<OutPoint> = self.confirmed_outpoints(index.chain());
|
||||
|
||||
let new_tip = index.chain().tip();
|
||||
if self.tip != new_tip {
|
||||
let update = self.sync_confirmed(index, daemon, cache, &mut outpoints)?;
|
||||
self.confirmed.extend(update);
|
||||
self.tip = new_tip;
|
||||
}
|
||||
if !self.confirmed.is_empty() {
|
||||
debug!(
|
||||
"{} transactions from {} blocks",
|
||||
self.confirmed.values().map(Vec::len).sum::<usize>(),
|
||||
self.confirmed.len()
|
||||
);
|
||||
}
|
||||
self.mempool = self.sync_mempool(mempool, cache, &mut outpoints);
|
||||
if !self.mempool.is_empty() {
|
||||
debug!("{} mempool transactions", self.mempool.len());
|
||||
}
|
||||
self.history = self.get_history(index.chain(), mempool);
|
||||
self.statushash = compute_status_hash(&self.history);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get current status hash.
|
||||
pub fn statushash(&self) -> Option<StatusHash> {
|
||||
self.statushash
|
||||
}
|
||||
}
|
||||
|
||||
fn make_outpoints<'a>(
|
||||
txid: &'a Txid,
|
||||
outputs: &'a [TxOutput],
|
||||
) -> impl Iterator<Item = OutPoint> + 'a {
|
||||
outputs
|
||||
.iter()
|
||||
.map(move |out| OutPoint::new(*txid, out.index))
|
||||
}
|
||||
|
||||
fn filter_outputs(tx: &Transaction, scripthash: &ScriptHash) -> Vec<TxOutput> {
|
||||
let outputs = tx.output.iter().zip(0u32..);
|
||||
outputs
|
||||
.filter_map(move |(txo, vout)| {
|
||||
if ScriptHash::new(&txo.script_pubkey) == *scripthash {
|
||||
Some(TxOutput {
|
||||
index: vout,
|
||||
value: Amount::from_sat(txo.value),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn filter_inputs(tx: &Transaction, outpoints: &HashSet<OutPoint>) -> Vec<OutPoint> {
|
||||
tx.input
|
||||
.iter()
|
||||
.filter_map(|txi| {
|
||||
if outpoints.contains(&txi.previous_output) {
|
||||
Some(txi.previous_output)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn compute_status_hash(history: &[HistoryEntry]) -> Option<StatusHash> {
|
||||
if history.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let mut engine = StatusHash::engine();
|
||||
for entry in history {
|
||||
entry.hash(&mut engine);
|
||||
}
|
||||
Some(StatusHash::from_engine(engine))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::HistoryEntry;
|
||||
use bitcoin::{hashes::hex::FromHex, Amount, Txid};
|
||||
use serde_json::json;
|
||||
|
||||
#[test]
|
||||
fn test_txinfo_json() {
|
||||
let txid =
|
||||
Txid::from_hex("5b75086dafeede555fc8f9a810d8b10df57c46f9f176ccc3dd8d2fa20edd685b")
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
json!(HistoryEntry::confirmed(txid, 123456)),
|
||||
json!({"tx_hash": "5b75086dafeede555fc8f9a810d8b10df57c46f9f176ccc3dd8d2fa20edd685b", "height": 123456})
|
||||
);
|
||||
assert_eq!(
|
||||
json!(HistoryEntry::unconfirmed(txid, true, Amount::from_sat(123))),
|
||||
json!({"tx_hash": "5b75086dafeede555fc8f9a810d8b10df57c46f9f176ccc3dd8d2fa20edd685b", "height": -1, "fee": 123})
|
||||
);
|
||||
assert_eq!(
|
||||
json!(HistoryEntry::unconfirmed(
|
||||
txid,
|
||||
false,
|
||||
Amount::from_sat(123)
|
||||
)),
|
||||
json!({"tx_hash": "5b75086dafeede555fc8f9a810d8b10df57c46f9f176ccc3dd8d2fa20edd685b", "height": 0, "fee": 123})
|
||||
);
|
||||
}
|
||||
}
|
193
src/store.rs
193
src/store.rs
@ -1,193 +0,0 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use crate::util::Bytes;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Row {
|
||||
pub key: Bytes,
|
||||
pub value: Bytes,
|
||||
}
|
||||
|
||||
impl Row {
|
||||
pub fn into_pair(self) -> (Bytes, Bytes) {
|
||||
(self.key, self.value)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ReadStore: Sync {
|
||||
fn get(&self, key: &[u8]) -> Option<Bytes>;
|
||||
fn scan(&self, prefix: &[u8]) -> Vec<Row>;
|
||||
}
|
||||
|
||||
pub trait WriteStore: Sync {
|
||||
fn write<I: IntoIterator<Item = Row>>(&self, rows: I);
|
||||
fn flush(&self);
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Options {
|
||||
path: PathBuf,
|
||||
bulk_import: bool,
|
||||
low_memory: bool,
|
||||
}
|
||||
|
||||
pub struct DBStore {
|
||||
db: rocksdb::DB,
|
||||
opts: Options,
|
||||
}
|
||||
|
||||
impl DBStore {
|
||||
fn open_opts(opts: Options) -> Self {
|
||||
debug!("opening DB at {:?}", opts.path);
|
||||
let mut db_opts = rocksdb::Options::default();
|
||||
db_opts.create_if_missing(true);
|
||||
// db_opts.set_keep_log_file_num(10);
|
||||
db_opts.set_max_open_files(if opts.bulk_import { 16 } else { 256 });
|
||||
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
||||
db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy);
|
||||
db_opts.set_target_file_size_base(256 << 20);
|
||||
db_opts.set_write_buffer_size(256 << 20);
|
||||
db_opts.set_disable_auto_compactions(opts.bulk_import); // for initial bulk load
|
||||
db_opts.set_advise_random_on_open(!opts.bulk_import); // bulk load uses sequential I/O
|
||||
if !opts.low_memory {
|
||||
db_opts.set_compaction_readahead_size(1 << 20);
|
||||
}
|
||||
|
||||
let mut block_opts = rocksdb::BlockBasedOptions::default();
|
||||
block_opts.set_block_size(if opts.low_memory { 256 << 10 } else { 1 << 20 });
|
||||
DBStore {
|
||||
db: rocksdb::DB::open(&db_opts, &opts.path).unwrap(),
|
||||
opts,
|
||||
}
|
||||
}
|
||||
|
||||
/// Opens a new RocksDB at the specified location.
|
||||
pub fn open(path: &Path, low_memory: bool) -> Self {
|
||||
DBStore::open_opts(Options {
|
||||
path: path.to_path_buf(),
|
||||
bulk_import: true,
|
||||
low_memory,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn enable_compaction(self) -> Self {
|
||||
let mut opts = self.opts.clone();
|
||||
if opts.bulk_import {
|
||||
opts.bulk_import = false;
|
||||
info!("enabling auto-compactions");
|
||||
let opts = [("disable_auto_compactions", "false")];
|
||||
self.db.set_options(&opts).unwrap();
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub fn compact(self) -> Self {
|
||||
info!("starting full compaction");
|
||||
self.db.compact_range(None::<&[u8]>, None::<&[u8]>); // would take a while
|
||||
info!("finished full compaction");
|
||||
self
|
||||
}
|
||||
|
||||
pub fn iter_scan(&self, prefix: &[u8]) -> ScanIterator {
|
||||
ScanIterator {
|
||||
prefix: prefix.to_vec(),
|
||||
iter: self.db.prefix_iterator(prefix),
|
||||
done: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ScanIterator<'a> {
|
||||
prefix: Vec<u8>,
|
||||
iter: rocksdb::DBIterator<'a>,
|
||||
done: bool,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for ScanIterator<'a> {
|
||||
type Item = Row;
|
||||
|
||||
fn next(&mut self) -> Option<Row> {
|
||||
if self.done {
|
||||
return None;
|
||||
}
|
||||
let (key, value) = self.iter.next()?;
|
||||
if !key.starts_with(&self.prefix) {
|
||||
self.done = true;
|
||||
return None;
|
||||
}
|
||||
Some(Row {
|
||||
key: key.to_vec(),
|
||||
value: value.to_vec(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadStore for DBStore {
|
||||
fn get(&self, key: &[u8]) -> Option<Bytes> {
|
||||
self.db.get(key).unwrap().map(|v| v.to_vec())
|
||||
}
|
||||
|
||||
// TODO: use generators
|
||||
fn scan(&self, prefix: &[u8]) -> Vec<Row> {
|
||||
let mut rows = vec![];
|
||||
for (key, value) in self.db.iterator(rocksdb::IteratorMode::From(
|
||||
prefix,
|
||||
rocksdb::Direction::Forward,
|
||||
)) {
|
||||
if !key.starts_with(prefix) {
|
||||
break;
|
||||
}
|
||||
rows.push(Row {
|
||||
key: key.to_vec(),
|
||||
value: value.to_vec(),
|
||||
});
|
||||
}
|
||||
rows
|
||||
}
|
||||
}
|
||||
|
||||
impl WriteStore for DBStore {
|
||||
fn write<I: IntoIterator<Item = Row>>(&self, rows: I) {
|
||||
let mut batch = rocksdb::WriteBatch::default();
|
||||
for row in rows {
|
||||
batch.put(row.key.as_slice(), row.value.as_slice()).unwrap();
|
||||
}
|
||||
let mut opts = rocksdb::WriteOptions::new();
|
||||
opts.set_sync(!self.opts.bulk_import);
|
||||
opts.disable_wal(self.opts.bulk_import);
|
||||
self.db.write_opt(batch, &opts).unwrap();
|
||||
}
|
||||
|
||||
fn flush(&self) {
|
||||
let mut opts = rocksdb::WriteOptions::new();
|
||||
opts.set_sync(true);
|
||||
opts.disable_wal(false);
|
||||
let empty = rocksdb::WriteBatch::default();
|
||||
self.db.write_opt(empty, &opts).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DBStore {
|
||||
fn drop(&mut self) {
|
||||
trace!("closing DB at {:?}", self.opts.path);
|
||||
}
|
||||
}
|
||||
|
||||
fn full_compaction_marker() -> Row {
|
||||
Row {
|
||||
key: b"F".to_vec(),
|
||||
value: b"".to_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn full_compaction(store: DBStore) -> DBStore {
|
||||
store.flush();
|
||||
let store = store.compact().enable_compaction();
|
||||
store.write(vec![full_compaction_marker()]);
|
||||
store
|
||||
}
|
||||
|
||||
pub fn is_fully_compacted(store: &dyn ReadStore) -> bool {
|
||||
let marker = store.get(&full_compaction_marker().key);
|
||||
marker.is_some()
|
||||
}
|
Binary file not shown.
Binary file not shown.
1
src/tests/fixtures/incomplete_block.hex
vendored
1
src/tests/fixtures/incomplete_block.hex
vendored
File diff suppressed because one or more lines are too long
15
src/thread.rs
Normal file
15
src/thread.rs
Normal file
@ -0,0 +1,15 @@
|
||||
use anyhow::Result;
|
||||
|
||||
pub(crate) fn spawn<F>(name: &'static str, f: F) -> std::thread::JoinHandle<()>
|
||||
where
|
||||
F: 'static + Send + FnOnce() -> Result<()>,
|
||||
{
|
||||
std::thread::Builder::new()
|
||||
.name(name.to_owned())
|
||||
.spawn(move || {
|
||||
if let Err(e) = f() {
|
||||
warn!("{} thread failed: {}", name, e);
|
||||
}
|
||||
})
|
||||
.expect("failed to spawn a thread")
|
||||
}
|
88
src/tracker.rs
Normal file
88
src/tracker.rs
Normal file
@ -0,0 +1,88 @@
|
||||
use anyhow::{Context, Result};
|
||||
use bitcoin::{BlockHash, Txid};
|
||||
|
||||
use crate::{
|
||||
cache::Cache,
|
||||
chain::Chain,
|
||||
config::Config,
|
||||
daemon::Daemon,
|
||||
db::DBStore,
|
||||
index::Index,
|
||||
mempool::{Histogram, Mempool},
|
||||
metrics::Metrics,
|
||||
status::{Balance, HistoryEntry, ScriptHashStatus, UnspentEntry},
|
||||
};
|
||||
|
||||
/// Electrum protocol subscriptions' tracker
|
||||
pub struct Tracker {
|
||||
index: Index,
|
||||
mempool: Mempool,
|
||||
metrics: Metrics,
|
||||
index_batch_size: usize,
|
||||
ignore_mempool: bool,
|
||||
}
|
||||
|
||||
impl Tracker {
|
||||
pub fn new(config: &Config) -> Result<Self> {
|
||||
let metrics = Metrics::new(config.monitoring_addr)?;
|
||||
let store = DBStore::open(&config.db_path, config.auto_reindex)?;
|
||||
let chain = Chain::new(config.network);
|
||||
Ok(Self {
|
||||
index: Index::load(store, chain, &metrics, config.index_lookup_limit)
|
||||
.context("failed to open index")?,
|
||||
mempool: Mempool::new(),
|
||||
metrics,
|
||||
index_batch_size: config.index_batch_size,
|
||||
ignore_mempool: config.ignore_mempool,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn chain(&self) -> &Chain {
|
||||
self.index.chain()
|
||||
}
|
||||
|
||||
pub(crate) fn fees_histogram(&self) -> &Histogram {
|
||||
self.mempool.fees_histogram()
|
||||
}
|
||||
|
||||
pub(crate) fn metrics(&self) -> &Metrics {
|
||||
&self.metrics
|
||||
}
|
||||
|
||||
pub(crate) fn get_history(&self, status: &ScriptHashStatus) -> Vec<HistoryEntry> {
|
||||
status.get_history(self.index.chain(), &self.mempool)
|
||||
}
|
||||
|
||||
pub(crate) fn get_unspent(&self, status: &ScriptHashStatus) -> Vec<UnspentEntry> {
|
||||
status.get_unspent(self.index.chain())
|
||||
}
|
||||
|
||||
pub fn sync(&mut self, daemon: &Daemon) -> Result<()> {
|
||||
self.index.sync(daemon, self.index_batch_size)?;
|
||||
if !self.ignore_mempool {
|
||||
self.mempool.sync(daemon);
|
||||
}
|
||||
// TODO: double check tip - and retry on diff
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn update_scripthash_status(
|
||||
&self,
|
||||
status: &mut ScriptHashStatus,
|
||||
daemon: &Daemon,
|
||||
cache: &Cache,
|
||||
) -> Result<bool> {
|
||||
let prev_statushash = status.statushash();
|
||||
status.sync(&self.index, &self.mempool, daemon, cache)?;
|
||||
Ok(prev_statushash != status.statushash())
|
||||
}
|
||||
|
||||
pub(crate) fn get_balance(&self, status: &ScriptHashStatus) -> Balance {
|
||||
status.get_balance(self.chain())
|
||||
}
|
||||
|
||||
pub fn get_blockhash_by_txid(&self, txid: Txid) -> Option<BlockHash> {
|
||||
// Note: there are two blocks with coinbase transactions having same txid (see BIP-30)
|
||||
self.index.filter_by_txid(txid).next()
|
||||
}
|
||||
}
|
330
src/types.rs
Normal file
330
src/types.rs
Normal file
@ -0,0 +1,330 @@
|
||||
use anyhow::Result;
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use bitcoin::{
|
||||
consensus::encode::{deserialize, serialize, Decodable, Encodable},
|
||||
hashes::{borrow_slice_impl, hash_newtype, hex_fmt_impl, index_impl, serde_impl, sha256, Hash},
|
||||
BlockHeader, OutPoint, Script, Txid,
|
||||
};
|
||||
|
||||
use crate::db;
|
||||
|
||||
macro_rules! impl_consensus_encoding {
|
||||
($thing:ident, $($field:ident),+) => (
|
||||
impl Encodable for $thing {
|
||||
#[inline]
|
||||
fn consensus_encode<S: ::std::io::Write>(
|
||||
&self,
|
||||
mut s: S,
|
||||
) -> Result<usize, std::io::Error> {
|
||||
let mut len = 0;
|
||||
$(len += self.$field.consensus_encode(&mut s)?;)+
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
impl Decodable for $thing {
|
||||
#[inline]
|
||||
fn consensus_decode<D: ::std::io::Read>(
|
||||
mut d: D,
|
||||
) -> Result<$thing, bitcoin::consensus::encode::Error> {
|
||||
Ok($thing {
|
||||
$($field: Decodable::consensus_decode(&mut d)?),+
|
||||
})
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
hash_newtype!(
|
||||
ScriptHash,
|
||||
sha256::Hash,
|
||||
32,
|
||||
doc = "https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-basics.html#script-hashes",
|
||||
true
|
||||
);
|
||||
|
||||
impl ScriptHash {
|
||||
pub fn new(script: &Script) -> Self {
|
||||
ScriptHash::hash(&script[..])
|
||||
}
|
||||
|
||||
fn prefix(&self) -> ScriptHashPrefix {
|
||||
let mut prefix = [0u8; HASH_PREFIX_LEN];
|
||||
prefix.copy_from_slice(&self.0[..HASH_PREFIX_LEN]);
|
||||
ScriptHashPrefix { prefix }
|
||||
}
|
||||
}
|
||||
|
||||
const HASH_PREFIX_LEN: usize = 8;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
struct ScriptHashPrefix {
|
||||
prefix: [u8; HASH_PREFIX_LEN],
|
||||
}
|
||||
|
||||
impl_consensus_encoding!(ScriptHashPrefix, prefix);
|
||||
|
||||
type Height = u32;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
pub(crate) struct ScriptHashRow {
|
||||
prefix: ScriptHashPrefix,
|
||||
height: Height, // transaction confirmed height
|
||||
}
|
||||
|
||||
impl_consensus_encoding!(ScriptHashRow, prefix, height);
|
||||
|
||||
impl ScriptHashRow {
|
||||
pub(crate) fn scan_prefix(scripthash: ScriptHash) -> Box<[u8]> {
|
||||
scripthash.0[..HASH_PREFIX_LEN].to_vec().into_boxed_slice()
|
||||
}
|
||||
|
||||
pub(crate) fn new(scripthash: ScriptHash, height: usize) -> Self {
|
||||
Self {
|
||||
prefix: scripthash.prefix(),
|
||||
height: Height::try_from(height).expect("invalid height"),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn to_db_row(&self) -> db::Row {
|
||||
serialize(self).into_boxed_slice()
|
||||
}
|
||||
|
||||
pub(crate) fn from_db_row(row: &[u8]) -> Self {
|
||||
deserialize(row).expect("bad ScriptHashRow")
|
||||
}
|
||||
|
||||
pub(crate) fn height(&self) -> usize {
|
||||
usize::try_from(self.height).expect("invalid height")
|
||||
}
|
||||
}
|
||||
|
||||
// ***************************************************************************
|
||||
|
||||
hash_newtype!(
|
||||
StatusHash,
|
||||
sha256::Hash,
|
||||
32,
|
||||
doc = "https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-basics.html#status",
|
||||
false
|
||||
);
|
||||
|
||||
// ***************************************************************************
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
struct SpendingPrefix {
|
||||
prefix: [u8; HASH_PREFIX_LEN],
|
||||
}
|
||||
|
||||
impl_consensus_encoding!(SpendingPrefix, prefix);
|
||||
|
||||
fn spending_prefix(prev: OutPoint) -> SpendingPrefix {
|
||||
let txid_prefix = <[u8; HASH_PREFIX_LEN]>::try_from(&prev.txid[..HASH_PREFIX_LEN]).unwrap();
|
||||
let value = u64::from_be_bytes(txid_prefix);
|
||||
let value = value.wrapping_add(prev.vout.into());
|
||||
SpendingPrefix {
|
||||
prefix: value.to_be_bytes(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
pub(crate) struct SpendingPrefixRow {
|
||||
prefix: SpendingPrefix,
|
||||
height: Height, // transaction confirmed height
|
||||
}
|
||||
|
||||
impl_consensus_encoding!(SpendingPrefixRow, prefix, height);
|
||||
|
||||
impl SpendingPrefixRow {
|
||||
pub(crate) fn scan_prefix(outpoint: OutPoint) -> Box<[u8]> {
|
||||
Box::new(spending_prefix(outpoint).prefix)
|
||||
}
|
||||
|
||||
pub(crate) fn new(outpoint: OutPoint, height: usize) -> Self {
|
||||
Self {
|
||||
prefix: spending_prefix(outpoint),
|
||||
height: Height::try_from(height).expect("invalid height"),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn to_db_row(&self) -> db::Row {
|
||||
serialize(self).into_boxed_slice()
|
||||
}
|
||||
|
||||
pub(crate) fn from_db_row(row: &[u8]) -> Self {
|
||||
deserialize(row).expect("bad SpendingPrefixRow")
|
||||
}
|
||||
|
||||
pub(crate) fn height(&self) -> usize {
|
||||
usize::try_from(self.height).expect("invalid height")
|
||||
}
|
||||
}
|
||||
|
||||
// ***************************************************************************
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
struct TxidPrefix {
|
||||
prefix: [u8; HASH_PREFIX_LEN],
|
||||
}
|
||||
|
||||
impl_consensus_encoding!(TxidPrefix, prefix);
|
||||
|
||||
fn txid_prefix(txid: &Txid) -> TxidPrefix {
|
||||
let mut prefix = [0u8; HASH_PREFIX_LEN];
|
||||
prefix.copy_from_slice(&txid[..HASH_PREFIX_LEN]);
|
||||
TxidPrefix { prefix }
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
pub(crate) struct TxidRow {
|
||||
prefix: TxidPrefix,
|
||||
height: Height, // transaction confirmed height
|
||||
}
|
||||
|
||||
impl_consensus_encoding!(TxidRow, prefix, height);
|
||||
|
||||
impl TxidRow {
|
||||
pub(crate) fn scan_prefix(txid: Txid) -> Box<[u8]> {
|
||||
Box::new(txid_prefix(&txid).prefix)
|
||||
}
|
||||
|
||||
pub(crate) fn new(txid: Txid, height: usize) -> Self {
|
||||
Self {
|
||||
prefix: txid_prefix(&txid),
|
||||
height: Height::try_from(height).expect("invalid height"),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn to_db_row(&self) -> db::Row {
|
||||
serialize(self).into_boxed_slice()
|
||||
}
|
||||
|
||||
pub(crate) fn from_db_row(row: &[u8]) -> Self {
|
||||
deserialize(row).expect("bad TxidRow")
|
||||
}
|
||||
|
||||
pub(crate) fn height(&self) -> usize {
|
||||
usize::try_from(self.height).expect("invalid height")
|
||||
}
|
||||
}
|
||||
|
||||
// ***************************************************************************
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct HeaderRow {
|
||||
pub(crate) header: BlockHeader,
|
||||
}
|
||||
|
||||
impl_consensus_encoding!(HeaderRow, header);
|
||||
|
||||
impl HeaderRow {
|
||||
pub(crate) fn new(header: BlockHeader) -> Self {
|
||||
Self { header }
|
||||
}
|
||||
|
||||
pub(crate) fn to_db_row(&self) -> db::Row {
|
||||
serialize(self).into_boxed_slice()
|
||||
}
|
||||
|
||||
pub(crate) fn from_db_row(row: &[u8]) -> Self {
|
||||
deserialize(row).expect("bad HeaderRow")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::types::{spending_prefix, ScriptHash, ScriptHashRow, SpendingPrefix, TxidRow};
|
||||
use bitcoin::{hashes::hex::ToHex, Address, OutPoint, Txid};
|
||||
use serde_json::{from_str, json};
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
#[test]
|
||||
fn test_scripthash_serde() {
|
||||
let hex = "\"4b3d912c1523ece4615e91bf0d27381ca72169dbf6b1c2ffcc9f92381d4984a3\"";
|
||||
let scripthash: ScriptHash = from_str(&hex).unwrap();
|
||||
assert_eq!(format!("\"{}\"", scripthash), hex);
|
||||
assert_eq!(json!(scripthash).to_string(), hex);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scripthash_row() {
|
||||
let hex = "\"4b3d912c1523ece4615e91bf0d27381ca72169dbf6b1c2ffcc9f92381d4984a3\"";
|
||||
let scripthash: ScriptHash = from_str(&hex).unwrap();
|
||||
let row1 = ScriptHashRow::new(scripthash, 123456);
|
||||
let db_row = row1.to_db_row();
|
||||
assert_eq!(db_row[..].to_hex(), "a384491d38929fcc40e20100");
|
||||
let row2 = ScriptHashRow::from_db_row(&db_row);
|
||||
assert_eq!(row1, row2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scripthash() {
|
||||
let addr = Address::from_str("1KVNjD3AAnQ3gTMqoTKcWFeqSFujq9gTBT").unwrap();
|
||||
let scripthash = ScriptHash::new(&addr.script_pubkey());
|
||||
assert_eq!(
|
||||
scripthash.to_hex(),
|
||||
"00dfb264221d07712a144bda338e89237d1abd2db4086057573895ea2659766a"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_txid1_prefix() {
|
||||
// duplicate txids from BIP-30
|
||||
let hex = "d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599";
|
||||
let txid = Txid::from_str(hex).unwrap();
|
||||
|
||||
let row1 = TxidRow::new(txid, 91812);
|
||||
let row2 = TxidRow::new(txid, 91842);
|
||||
|
||||
assert_eq!(row1.to_db_row().to_hex(), "9985d82954e10f22a4660100");
|
||||
assert_eq!(row2.to_db_row().to_hex(), "9985d82954e10f22c2660100");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_txid2_prefix() {
|
||||
// duplicate txids from BIP-30
|
||||
let hex = "e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468";
|
||||
let txid = Txid::from_str(hex).unwrap();
|
||||
|
||||
let row1 = TxidRow::new(txid, 91722);
|
||||
let row2 = TxidRow::new(txid, 91880);
|
||||
|
||||
// low-endian encoding => rows should be sorted according to block height
|
||||
assert_eq!(row1.to_db_row().to_hex(), "68b45f58b674e94e4a660100");
|
||||
assert_eq!(row2.to_db_row().to_hex(), "68b45f58b674e94ee8660100");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spending_prefix() {
|
||||
let hex = "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f";
|
||||
let txid = Txid::from_str(hex).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
spending_prefix(OutPoint { txid, vout: 0 }),
|
||||
SpendingPrefix {
|
||||
prefix: [31, 30, 29, 28, 27, 26, 25, 24]
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
spending_prefix(OutPoint { txid, vout: 10 }),
|
||||
SpendingPrefix {
|
||||
prefix: [31, 30, 29, 28, 27, 26, 25, 34]
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
spending_prefix(OutPoint { txid, vout: 255 }),
|
||||
SpendingPrefix {
|
||||
prefix: [31, 30, 29, 28, 27, 26, 26, 23]
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
spending_prefix(OutPoint { txid, vout: 256 }),
|
||||
SpendingPrefix {
|
||||
prefix: [31, 30, 29, 28, 27, 26, 26, 24]
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
408
src/util.rs
408
src/util.rs
@ -1,408 +0,0 @@
|
||||
use bitcoin::blockdata::block::BlockHeader;
|
||||
use bitcoin::hash_types::BlockHash;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::fmt;
|
||||
use std::iter::FromIterator;
|
||||
use std::slice;
|
||||
use std::sync::mpsc::{channel, sync_channel, Receiver, Sender, SyncSender};
|
||||
use std::thread;
|
||||
|
||||
pub type Bytes = Vec<u8>;
|
||||
pub type HeaderMap = HashMap<BlockHash, BlockHeader>;
|
||||
|
||||
// TODO: consolidate serialization/deserialize code for bincode/bitcoin.
|
||||
const HASH_LEN: usize = 32;
|
||||
pub const HASH_PREFIX_LEN: usize = 8;
|
||||
|
||||
pub type FullHash = [u8; HASH_LEN];
|
||||
pub type HashPrefix = [u8; HASH_PREFIX_LEN];
|
||||
|
||||
pub fn hash_prefix(hash: &[u8]) -> HashPrefix {
|
||||
hash[..HASH_PREFIX_LEN]
|
||||
.try_into()
|
||||
.expect("failed to convert into HashPrefix")
|
||||
}
|
||||
|
||||
pub fn full_hash(hash: &[u8]) -> FullHash {
|
||||
hash.try_into().expect("failed to convert into FullHash")
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Clone)]
|
||||
pub struct HeaderEntry {
|
||||
height: usize,
|
||||
hash: BlockHash,
|
||||
header: BlockHeader,
|
||||
}
|
||||
|
||||
impl HeaderEntry {
|
||||
pub fn hash(&self) -> &BlockHash {
|
||||
&self.hash
|
||||
}
|
||||
|
||||
pub fn header(&self) -> &BlockHeader {
|
||||
&self.header
|
||||
}
|
||||
|
||||
pub fn height(&self) -> usize {
|
||||
self.height
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for HeaderEntry {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let spec = time::Timespec::new(i64::from(self.header().time), 0);
|
||||
let last_block_time = time::at_utc(spec).rfc3339().to_string();
|
||||
write!(
|
||||
f,
|
||||
"best={} height={} @ {}",
|
||||
self.hash(),
|
||||
self.height(),
|
||||
last_block_time,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
struct HashedHeader {
|
||||
blockhash: BlockHash,
|
||||
header: BlockHeader,
|
||||
}
|
||||
|
||||
fn hash_headers(headers: Vec<BlockHeader>) -> Vec<HashedHeader> {
|
||||
// header[i] -> header[i-1] (i.e. header.last() is the tip)
|
||||
let hashed_headers =
|
||||
Vec::<HashedHeader>::from_iter(headers.into_iter().map(|header| HashedHeader {
|
||||
blockhash: header.block_hash(),
|
||||
header,
|
||||
}));
|
||||
for i in 1..hashed_headers.len() {
|
||||
assert_eq!(
|
||||
hashed_headers[i].header.prev_blockhash,
|
||||
hashed_headers[i - 1].blockhash
|
||||
);
|
||||
}
|
||||
hashed_headers
|
||||
}
|
||||
|
||||
pub struct HeaderList {
|
||||
headers: Vec<HeaderEntry>,
|
||||
heights: HashMap<BlockHash, usize>,
|
||||
}
|
||||
|
||||
impl HeaderList {
|
||||
pub fn empty() -> HeaderList {
|
||||
HeaderList {
|
||||
headers: vec![],
|
||||
heights: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn order(&self, new_headers: Vec<BlockHeader>) -> Vec<HeaderEntry> {
|
||||
// header[i] -> header[i-1] (i.e. header.last() is the tip)
|
||||
let hashed_headers = hash_headers(new_headers);
|
||||
let prev_blockhash = match hashed_headers.first() {
|
||||
Some(h) => h.header.prev_blockhash,
|
||||
None => return vec![], // hashed_headers is empty
|
||||
};
|
||||
let null_hash = BlockHash::default();
|
||||
let new_height: usize = if prev_blockhash == null_hash {
|
||||
0
|
||||
} else {
|
||||
self.header_by_blockhash(&prev_blockhash)
|
||||
.unwrap_or_else(|| panic!("{} is not part of the blockchain", prev_blockhash))
|
||||
.height()
|
||||
+ 1
|
||||
};
|
||||
(new_height..)
|
||||
.zip(hashed_headers.into_iter())
|
||||
.map(|(height, hashed_header)| HeaderEntry {
|
||||
height,
|
||||
hash: hashed_header.blockhash,
|
||||
header: hashed_header.header,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn apply(&mut self, new_headers: Vec<HeaderEntry>, tip: BlockHash) {
|
||||
if tip == BlockHash::default() {
|
||||
assert!(new_headers.is_empty());
|
||||
self.heights.clear();
|
||||
self.headers.clear();
|
||||
return;
|
||||
}
|
||||
// new_headers[i] -> new_headers[i - 1] (i.e. new_headers.last() is the tip)
|
||||
for i in 1..new_headers.len() {
|
||||
assert_eq!(new_headers[i - 1].height() + 1, new_headers[i].height());
|
||||
assert_eq!(
|
||||
*new_headers[i - 1].hash(),
|
||||
new_headers[i].header().prev_blockhash
|
||||
);
|
||||
}
|
||||
let new_height = match new_headers.first() {
|
||||
Some(entry) => {
|
||||
// Make sure tip is consistent (if there are new headers)
|
||||
let expected_tip = new_headers.last().unwrap().hash();
|
||||
assert_eq!(tip, *expected_tip);
|
||||
// Make sure first header connects correctly to existing chain
|
||||
let height = entry.height();
|
||||
let expected_prev_blockhash = if height > 0 {
|
||||
*self.headers[height - 1].hash()
|
||||
} else {
|
||||
BlockHash::default()
|
||||
};
|
||||
assert_eq!(entry.header().prev_blockhash, expected_prev_blockhash);
|
||||
// First new header's height (may override existing headers)
|
||||
height
|
||||
}
|
||||
// No new headers - chain's "tail" may be removed
|
||||
None => {
|
||||
let tip_height = *self
|
||||
.heights
|
||||
.get(&tip)
|
||||
.unwrap_or_else(|| panic!("missing tip: {}", tip));
|
||||
tip_height + 1 // keep the tip, drop the rest
|
||||
}
|
||||
};
|
||||
debug!(
|
||||
"applying {} new headers from height {}",
|
||||
new_headers.len(),
|
||||
new_height
|
||||
);
|
||||
self.headers.truncate(new_height); // keep [0..new_height) entries
|
||||
assert_eq!(new_height, self.headers.len());
|
||||
for new_header in new_headers {
|
||||
assert_eq!(new_header.height(), self.headers.len());
|
||||
assert_eq!(new_header.header().prev_blockhash, self.tip());
|
||||
self.heights.insert(*new_header.hash(), new_header.height());
|
||||
self.headers.push(new_header);
|
||||
}
|
||||
assert_eq!(tip, self.tip());
|
||||
assert!(self.heights.contains_key(&tip));
|
||||
}
|
||||
|
||||
pub fn header_by_blockhash(&self, blockhash: &BlockHash) -> Option<&HeaderEntry> {
|
||||
let height = self.heights.get(blockhash)?;
|
||||
let header = self.headers.get(*height)?;
|
||||
if *blockhash == *header.hash() {
|
||||
Some(header)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn header_by_height(&self, height: usize) -> Option<&HeaderEntry> {
|
||||
self.headers.get(height).map(|entry| {
|
||||
assert_eq!(entry.height(), height);
|
||||
entry
|
||||
})
|
||||
}
|
||||
|
||||
pub fn equals(&self, other: &HeaderList) -> bool {
|
||||
self.headers.last() == other.headers.last()
|
||||
}
|
||||
|
||||
pub fn tip(&self) -> BlockHash {
|
||||
self.headers.last().map(|h| *h.hash()).unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.headers.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.headers.is_empty()
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> slice::Iter<HeaderEntry> {
|
||||
self.headers.iter()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SyncChannel<T> {
|
||||
tx: SyncSender<T>,
|
||||
rx: Receiver<T>,
|
||||
}
|
||||
|
||||
impl<T> SyncChannel<T> {
|
||||
pub fn new(size: usize) -> SyncChannel<T> {
|
||||
let (tx, rx) = sync_channel(size);
|
||||
SyncChannel { tx, rx }
|
||||
}
|
||||
|
||||
pub fn sender(&self) -> SyncSender<T> {
|
||||
self.tx.clone()
|
||||
}
|
||||
|
||||
pub fn receiver(&self) -> &Receiver<T> {
|
||||
&self.rx
|
||||
}
|
||||
|
||||
pub fn into_receiver(self) -> Receiver<T> {
|
||||
self.rx
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Channel<T> {
|
||||
tx: Sender<T>,
|
||||
rx: Receiver<T>,
|
||||
}
|
||||
|
||||
impl<T> Channel<T> {
|
||||
pub fn unbounded() -> Self {
|
||||
let (tx, rx) = channel();
|
||||
Channel { tx, rx }
|
||||
}
|
||||
|
||||
pub fn sender(&self) -> Sender<T> {
|
||||
self.tx.clone()
|
||||
}
|
||||
|
||||
pub fn receiver(&self) -> &Receiver<T> {
|
||||
&self.rx
|
||||
}
|
||||
|
||||
pub fn into_receiver(self) -> Receiver<T> {
|
||||
self.rx
|
||||
}
|
||||
}
|
||||
|
||||
pub fn spawn_thread<F, T>(name: &str, f: F) -> thread::JoinHandle<T>
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
F: Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
thread::Builder::new()
|
||||
.name(name.to_owned())
|
||||
.spawn(f)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn test_headers() {
|
||||
use bitcoin::blockdata::block::BlockHeader;
|
||||
use bitcoin::hash_types::{BlockHash, TxMerkleNode};
|
||||
use bitcoin::hashes::Hash;
|
||||
|
||||
use super::HeaderList;
|
||||
|
||||
// Test an empty header list
|
||||
let null_hash = BlockHash::default();
|
||||
let mut header_list = HeaderList::empty();
|
||||
assert_eq!(header_list.tip(), null_hash);
|
||||
let ordered = header_list.order(vec![]);
|
||||
assert_eq!(ordered.len(), 0);
|
||||
header_list.apply(vec![], null_hash);
|
||||
|
||||
let merkle_root = TxMerkleNode::hash(&[255]);
|
||||
let mut headers = vec![BlockHeader {
|
||||
version: 1,
|
||||
prev_blockhash: BlockHash::default(),
|
||||
merkle_root,
|
||||
time: 0,
|
||||
bits: 0,
|
||||
nonce: 0,
|
||||
}];
|
||||
for _height in 1..10 {
|
||||
let prev_blockhash = headers.last().unwrap().block_hash();
|
||||
let header = BlockHeader {
|
||||
version: 1,
|
||||
prev_blockhash,
|
||||
merkle_root,
|
||||
time: 0,
|
||||
bits: 0,
|
||||
nonce: 0,
|
||||
};
|
||||
headers.push(header);
|
||||
}
|
||||
|
||||
// Test adding some new headers
|
||||
let ordered = header_list.order(headers[..3].to_vec());
|
||||
assert_eq!(ordered.len(), 3);
|
||||
header_list.apply(ordered.clone(), ordered[2].hash);
|
||||
assert_eq!(header_list.len(), 3);
|
||||
assert_eq!(header_list.tip(), ordered[2].hash);
|
||||
for h in 0..3 {
|
||||
let entry = header_list.header_by_height(h).unwrap();
|
||||
assert_eq!(entry.header, headers[h]);
|
||||
assert_eq!(entry.hash, headers[h].block_hash());
|
||||
assert_eq!(entry.height, h);
|
||||
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
|
||||
}
|
||||
|
||||
// Test adding some more headers
|
||||
let ordered = header_list.order(headers[3..6].to_vec());
|
||||
assert_eq!(ordered.len(), 3);
|
||||
header_list.apply(ordered.clone(), ordered[2].hash);
|
||||
assert_eq!(header_list.len(), 6);
|
||||
assert_eq!(header_list.tip(), ordered[2].hash);
|
||||
for h in 0..6 {
|
||||
let entry = header_list.header_by_height(h).unwrap();
|
||||
assert_eq!(entry.header, headers[h]);
|
||||
assert_eq!(entry.hash, headers[h].block_hash());
|
||||
assert_eq!(entry.height, h);
|
||||
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
|
||||
}
|
||||
|
||||
// Test adding some more headers (with an overlap)
|
||||
let ordered = header_list.order(headers[5..].to_vec());
|
||||
assert_eq!(ordered.len(), 5);
|
||||
header_list.apply(ordered.clone(), ordered[4].hash);
|
||||
assert_eq!(header_list.len(), 10);
|
||||
assert_eq!(header_list.tip(), ordered[4].hash);
|
||||
for h in 0..10 {
|
||||
let entry = header_list.header_by_height(h).unwrap();
|
||||
assert_eq!(entry.header, headers[h]);
|
||||
assert_eq!(entry.hash, headers[h].block_hash());
|
||||
assert_eq!(entry.height, h);
|
||||
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
|
||||
}
|
||||
|
||||
// Reorg the chain and test apply() on it
|
||||
for h in 8..10 {
|
||||
headers[h].nonce += 1;
|
||||
headers[h].prev_blockhash = headers[h - 1].block_hash()
|
||||
}
|
||||
// Test reorging the chain
|
||||
let ordered = header_list.order(headers[8..10].to_vec());
|
||||
assert_eq!(ordered.len(), 2);
|
||||
header_list.apply(ordered.clone(), ordered[1].hash);
|
||||
assert_eq!(header_list.len(), 10);
|
||||
assert_eq!(header_list.tip(), ordered[1].hash);
|
||||
for h in 0..10 {
|
||||
let entry = header_list.header_by_height(h).unwrap();
|
||||
assert_eq!(entry.header, headers[h]);
|
||||
assert_eq!(entry.hash, headers[h].block_hash());
|
||||
assert_eq!(entry.height, h);
|
||||
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
|
||||
}
|
||||
|
||||
// Test "trimming" the chain
|
||||
header_list.apply(vec![], headers[7].block_hash());
|
||||
assert_eq!(header_list.len(), 8);
|
||||
assert_eq!(header_list.tip(), headers[7].block_hash());
|
||||
for h in 0..8 {
|
||||
let entry = header_list.header_by_height(h).unwrap();
|
||||
assert_eq!(entry.header, headers[h]);
|
||||
assert_eq!(entry.hash, headers[h].block_hash());
|
||||
assert_eq!(entry.height, h);
|
||||
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
|
||||
}
|
||||
|
||||
// Test "un-trimming" the chain
|
||||
let ordered = header_list.order(headers[8..].to_vec());
|
||||
assert_eq!(ordered.len(), 2);
|
||||
header_list.apply(ordered.clone(), ordered[1].hash);
|
||||
assert_eq!(header_list.len(), 10);
|
||||
assert_eq!(header_list.tip(), ordered[1].hash);
|
||||
for h in 0..10 {
|
||||
let entry = header_list.header_by_height(h).unwrap();
|
||||
assert_eq!(entry.header, headers[h]);
|
||||
assert_eq!(entry.hash, headers[h].block_hash());
|
||||
assert_eq!(entry.height, h);
|
||||
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
|
||||
}
|
||||
}
|
||||
}
|
15
sync.sh
Executable file
15
sync.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
cd `dirname $0`
|
||||
|
||||
cargo fmt --all
|
||||
cargo build --all --release
|
||||
|
||||
NETWORK=$1
|
||||
shift
|
||||
|
||||
CMD="target/release/sync --network $NETWORK --db-dir ./db2 --daemon-dir $HOME/.bitcoin"
|
||||
export RUST_LOG=${RUST_LOG-info}
|
||||
$CMD --ignore-mempool $*
|
||||
|
||||
# use SIGINT to quit
|
107
tests/run.sh
Executable file
107
tests/run.sh
Executable file
@ -0,0 +1,107 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
rm -rf data/
|
||||
mkdir -p data/{bitcoin,electrum,electrs}
|
||||
|
||||
cleanup() {
|
||||
trap - SIGTERM SIGINT
|
||||
set +eo pipefail
|
||||
jobs
|
||||
for j in `jobs -rp`
|
||||
do
|
||||
kill $j
|
||||
wait $j
|
||||
done
|
||||
}
|
||||
trap cleanup SIGINT SIGTERM EXIT
|
||||
|
||||
BTC="bitcoin-cli -regtest -datadir=data/bitcoin"
|
||||
ELECTRUM="electrum --regtest"
|
||||
EL="$ELECTRUM --wallet=data/electrum/wallet"
|
||||
|
||||
tail_log() {
|
||||
tail -n +0 -F $1 || true
|
||||
}
|
||||
|
||||
echo "Starting $(bitcoind -version | head -n1)..."
|
||||
bitcoind -regtest -datadir=data/bitcoin -printtoconsole=0 &
|
||||
BITCOIND_PID=$!
|
||||
|
||||
$BTC -rpcwait getblockcount > /dev/null
|
||||
|
||||
echo "Creating Electrum `electrum version --offline` wallet..."
|
||||
WALLET=`$EL --offline create --seed_type=segwit`
|
||||
MINING_ADDR=`$EL --offline getunusedaddress`
|
||||
|
||||
$BTC generatetoaddress 110 $MINING_ADDR > /dev/null
|
||||
echo `$BTC getblockchaininfo | jq -r '"Generated \(.blocks) regtest blocks (\(.size_on_disk/1e3) kB)"'` to $MINING_ADDR
|
||||
|
||||
TIP=`$BTC getbestblockhash`
|
||||
|
||||
export RUST_LOG=electrs=debug
|
||||
electrs \
|
||||
--db-dir=data/electrs \
|
||||
--daemon-dir=data/bitcoin \
|
||||
--network=regtest \
|
||||
2> data/electrs/regtest-debug.log &
|
||||
ELECTRS_PID=$!
|
||||
tail_log data/electrs/regtest-debug.log | grep -m1 "serving Electrum RPC"
|
||||
curl localhost:24224 -o metrics.txt
|
||||
|
||||
$ELECTRUM daemon --server localhost:60401:t -1 -vDEBUG 2> data/electrum/regtest-debug.log &
|
||||
ELECTRUM_PID=$!
|
||||
tail_log data/electrum/regtest-debug.log | grep -m1 "connection established"
|
||||
$EL getinfo | jq .
|
||||
|
||||
echo "Loading Electrum wallet..."
|
||||
test `$EL load_wallet` == "true"
|
||||
|
||||
echo "Running integration tests:"
|
||||
|
||||
echo " * getbalance"
|
||||
test "`$EL getbalance | jq -c .`" == '{"confirmed":"550","unmatured":"4950"}'
|
||||
|
||||
echo " * getunusedaddress"
|
||||
NEW_ADDR=`$EL getunusedaddress`
|
||||
|
||||
echo " * payto & broadcast"
|
||||
TXID=$($EL broadcast $($EL payto $NEW_ADDR 123 --fee 0.001))
|
||||
|
||||
echo " * get_tx_status"
|
||||
test "`$EL get_tx_status $TXID | jq -c .`" == '{"confirmations":0}'
|
||||
|
||||
echo " * getaddresshistory"
|
||||
test "`$EL getaddresshistory $NEW_ADDR | jq -c .`" == "[{\"fee\":100000,\"height\":0,\"tx_hash\":\"$TXID\"}]"
|
||||
|
||||
echo " * getbalance"
|
||||
test "`$EL getbalance | jq -c .`" == '{"confirmed":"550","unconfirmed":"-0.001","unmatured":"4950"}'
|
||||
|
||||
echo "Generating bitcoin block..."
|
||||
$BTC generatetoaddress 1 $MINING_ADDR > /dev/null
|
||||
$BTC getblockcount > /dev/null
|
||||
|
||||
echo " * wait for new block"
|
||||
kill -USR1 $ELECTRS_PID # notify server to index new block
|
||||
tail_log data/electrum/regtest-debug.log | grep -m1 "verified $TXID" > /dev/null
|
||||
|
||||
echo " * get_tx_status"
|
||||
test "`$EL get_tx_status $TXID | jq -c .`" == '{"confirmations":1}'
|
||||
|
||||
echo " * getaddresshistory"
|
||||
test "`$EL getaddresshistory $NEW_ADDR | jq -c .`" == "[{\"height\":111,\"tx_hash\":\"$TXID\"}]"
|
||||
|
||||
echo " * getbalance"
|
||||
test "`$EL getbalance | jq -c .`" == '{"confirmed":"599.999","unmatured":"4950.001"}'
|
||||
|
||||
echo "Electrum `$EL stop`" # disconnect wallet
|
||||
wait $ELECTRUM_PID
|
||||
|
||||
kill -INT $ELECTRS_PID # close server
|
||||
tail_log data/electrs/regtest-debug.log | grep -m1 "stopping Electrum RPC server"
|
||||
wait $ELECTRS_PID
|
||||
|
||||
$BTC stop # stop bitcoind
|
||||
wait $BITCOIND_PID
|
||||
|
||||
echo "=== PASSED ==="
|
Loading…
Reference in New Issue
Block a user