1
0
mirror of https://github.com/romanz/electrs.git synced 2024-11-19 01:43:29 +01:00

Refactor and sync using p2p protocol

This commit is contained in:
Roman Zeyde 2021-03-26 11:05:58 +03:00
parent 3c4cf72960
commit 3780d7d48b
54 changed files with 3818 additions and 5104 deletions

View File

@ -1,3 +1,11 @@
target/
.git/
_*/
.*
_*
contrib
db*
dist
doc
Dockerfile
examples
scripts
target
tests

View File

@ -1,22 +1,15 @@
name: Rust
name: electrs
on: [push, pull_request]
jobs:
electrs:
name: Electrum Integration Test
runs-on: ubuntu-latest
steps:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.toolchain }}
profile: minimal
override: true
- uses: actions/checkout@v1
- name: Check
run: cargo check
- name: Checkout
uses: actions/checkout@v2
- name: Build
run: cargo build
- name: Run tests
run: cargo test
run: docker build . --rm -t electrs:tests
- name: Test
run: docker run -v $PWD/contrib/:/contrib -v $PWD/tests/:/tests --rm electrs:tests bash /tests/run.sh

5
.gitignore vendored
View File

@ -1,5 +1,5 @@
target
*db/
/db*/
_*/
*.log
*.sublime*
@ -8,3 +8,6 @@ _*/
.env
*.dat
electrs.toml
data/
tests/bitcoin-*
tests/bin

1058
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[package]
name = "electrs"
version = "0.8.5"
version = "0.9.0"
authors = ["Roman Zeyde <me@romanzey.de>"]
description = "An efficient re-implementation of Electrum Server in Rust"
license = "MIT"
@ -12,40 +12,30 @@ readme = "README.md"
edition = "2018"
build = "build.rs"
[package.metadata.configure_me]
spec = "config_spec.toml"
[features]
default = ["rocksdb/zstd"]
[profile.release]
lto = true
[package.metadata.configure_me]
spec = "internal/config_specification.toml"
[dependencies]
base64 = "0.10"
bincode = "1.0"
bitcoin = { version = "0.24", features = ["use-serde"] }
configure_me = "0.3.4"
configure_me_codegen = "0.3.14"
crossbeam-channel = "0.3"
dirs = "1.0"
error-chain = "0.12"
glob = "0.3"
hex = "0.3"
libc = "0.2"
anyhow = "1.0"
bitcoin = { version = "0.26", features = ["use-serde", "rand"] }
bitcoincore-rpc = "0.13"
configure_me = "0.4"
crossbeam-channel = "0.5"
dirs-next = "2.0"
env_logger = "0.7"
hyper = "0.10"
log = "0.4"
lru = "0.1"
num_cpus = "1.0"
page_size = "0.4"
prometheus = "0.5"
protobuf = "= 2.14.0" # https://github.com/stepancheg/rust-protobuf/blob/master/CHANGELOG.md#2150---2020-06-21
rocksdb = "= 0.12.2" # due to https://github.com/romanz/electrs/issues/193
prometheus = { version = "0.11", features = ["process"] }
rayon = "1.5"
rocksdb = { git = "https://github.com/romanz/rust-rocksdb", rev = "379c2d7f2a15fe31d2ec2726f4b6179de5c8c287", default-features = false } # to support building with Rust 1.41.1
rust-crypto = "0.2"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
signal-hook = "0.1"
stderrlog = "0.4.1"
sysconf = ">=0.3.4"
time = "0.1"
tiny_http = "0.6"
signal-hook = "0.3"
[build-dependencies]
configure_me_codegen = "0.3.12"
configure_me_codegen = "0.4"

View File

@ -1,25 +1,42 @@
FROM rust:1.44.1-slim-buster
### Electrum Rust Server ###
FROM rust:1.48.0-slim as electrs-build
RUN apt-get update
RUN apt-get install -qq -y clang cmake
RUN rustup component add rustfmt
RUN apt-get update \
&& apt-get install -y --no-install-recommends clang=1:7.* cmake=3.* \
libsnappy-dev=1.* \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Build, test and install electrs
WORKDIR /build/electrs
COPY . .
RUN cargo fmt -- --check
RUN cargo build --locked --release --all
RUN cargo test --locked --release --all
RUN cargo install --locked --path .
RUN adduser --disabled-login --system --shell /bin/false --uid 1000 user
FROM debian:buster-slim as updated
RUN apt-get update -qqy
WORKDIR /home/user
COPY ./ /home/user
RUN chown -R user .
### Bitcoin Core ###
FROM updated as bitcoin-build
# Download
RUN apt-get install -qqy wget
WORKDIR /build/bitcoin
RUN wget -q https://bitcoincore.org/bin/bitcoin-core-0.21.0/bitcoin-0.21.0-x86_64-linux-gnu.tar.gz
RUN tar xvf bitcoin-0.21.0-x86_64-linux-gnu.tar.gz
RUN mv -v bitcoin-0.21.0/bin/bitcoind .
RUN mv -v bitcoin-0.21.0/bin/bitcoin-cli .
USER user
FROM updated as result
# Copy the binaries
COPY --from=electrs-build /usr/local/cargo/bin/electrs /usr/bin/electrs
COPY --from=bitcoin-build /build/bitcoin/bitcoind /build/bitcoin/bitcoin-cli /usr/bin/
RUN bitcoind -version && bitcoin-cli -version
RUN cargo install --path .
### Electrum ###
# Clone latest Electrum wallet and a few test tools
WORKDIR /build/
RUN apt-get install -qqy git libsecp256k1-0 python3-cryptography python3-setuptools python3-pip jq
RUN git clone --recurse-submodules https://github.com/spesmilo/electrum/ && cd electrum/ && git log -1
RUN python3 -m pip install -e electrum/
# Electrum RPC
EXPOSE 50001
# Prometheus monitoring
EXPOSE 4224
STOPSIGNAL SIGINT
RUN electrum version --offline
WORKDIR /

View File

@ -22,9 +22,13 @@ def main():
for addr in args.address:
script = network.parse.address(addr).script()
script_hash = hashlib.sha256(script).digest()[::-1].hex()
reply = conn.call('blockchain.scripthash.get_balance', script_hash)
reply = conn.call('blockchain.scripthash.subscribe', script_hash)
print(f'{reply}')
reply = conn.call('blockchain.scripthash.get_history', script_hash)
result = reply['result']
print('{} has {} satoshis'.format(addr, result))
print(f'{addr} has {len(result)} transactions:')
for tx in result:
print(f'* {tx["tx_hash"]}')
if __name__ == '__main__':

View File

@ -12,6 +12,7 @@ class Client:
'id': self.id,
'method': method,
'params': list(args),
'jsonrpc': '2.0',
}
msg = json.dumps(req) + '\n'
self.s.sendall(msg.encode('ascii'))

16
contrib/get_tip.py Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env python3
import argparse
import client
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument("host")
parser.add_argument("port", type=int)
args = parser.parse_args()
conn = client.Client((args.host, args.port))
print(json.dumps(conn.call("blockchain.headers.subscribe")["result"]))
if __name__ == '__main__':
main()

16
contrib/get_tx.py Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env python3
import argparse
import client
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument("txid")
args = parser.parse_args()
conn = client.Client(("localhost", 50001))
tx = conn.call("blockchain.transaction.get", args.txid, True)["result"]
print(json.dumps(tx))
if __name__ == "__main__":
main()

16
contrib/health_check.py Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env python3
import argparse
import client
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument("host")
parser.add_argument("port", type=int)
args = parser.parse_args()
conn = client.Client((args.host, args.port))
print(json.dumps(conn.call("server.version", "health_check", "1.4")["result"]))
if __name__ == '__main__':
main()

View File

@ -1,33 +1,23 @@
#!/usr/bin/env python3
import argparse
import daemon
import client
def main():
parser = argparse.ArgumentParser()
parser.add_argument('txid')
parser.add_argument("txid")
args = parser.parse_args()
d = daemon.Daemon(port=8332, cookie_dir='~/.bitcoin')
txid = args.txid
conn = client.Client(("localhost", 50001))
tx = conn.call("blockchain.transaction.get", args.txid, True)["result"]
fee = 0
for vin in tx["vin"]:
prev_txid = vin["txid"]
prev_tx = conn.call("blockchain.transaction.get", prev_txid, True)["result"]
txo = prev_tx["vout"][vin["vout"]]
fee += txo["value"]
fee -= sum(vout["value"] for vout in tx["vout"])
txn, = d.request('getrawtransaction', [[txid, True]])
vin = txn['vin']
print(f'vSize = {tx["vsize"]}, Fee = {1e3 * fee:.2f} mBTC = {1e8 * fee / tx["vsize"]:.2f} sat/vB')
fee = 0.0
for txi in txn['vin']:
prev_txid = txi['txid']
prev_tx, = d.request('getrawtransaction', [[prev_txid, True]])
index = txi['vout']
prev_txo = prev_tx['vout'][index]
print(f"{prev_txid}:{index:<5} {prev_txo['value']:+20.8f}")
fee += prev_txo['value']
for i, txo in enumerate(txn['vout']):
print(f"{txid}:{i:<5} {-txo['value']:+20.8f}")
fee -= txo['value']
print(f"Fee = {1e6 * fee:.2f} uBTC = {1e8 * fee / txn['vsize']:.2f} sat/vB")
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -1,31 +1,31 @@
# Index Schema
The index is stored at a single RocksDB database using the following schema:
The index is stored at a single RocksDB database using the following column families:
## Transaction outputs' index
## Transaction outputs' index (`funding`)
Allows efficiently finding all funding transactions for a specific address:
| Code | Script Hash Prefix | Funding TxID Prefix | |
| ------ | -------------------- | --------------------- | - |
| `b'O'` | `SHA256(script)[:8]` | `txid[:8]` | |
| Script Hash Prefix | Confirmed Block Height |
| -------------------- | ---------------------- |
| `SHA256(script)[:8]` | `height as u32` |
## Transaction inputs' index
## Transaction inputs' index (`spending`)
Allows efficiently finding spending transaction of a specific output:
| Code | Funding TxID Prefix | Funding Output Index | Spending TxID Prefix | |
| ------ | -------------------- | --------------------- | --------------------- | - |
| `b'I'` | `txid[:8]` | `uint16` | `txid[:8]` | |
| Previous Outpoint Prefix | Confirmed Block Height |
| ------------------------ | ---------------------- |
| `txid[:8] as u64 + vout` | `height as u32` |
## Full Transaction IDs
## Transaction ID index (`txid`)
In order to save storage space, we store the full transaction IDs once, and use their 8-byte prefixes for the indexes above.
In order to save storage space, we map the 8-byte transaction ID prefix to its confirmed block height:
| Code | Transaction ID | | Confirmed height |
| ------ | ----------------- | - | ------------------ |
| `b'T'` | `txid` (32 bytes) | | `uint32` |
| Txid Prefix | Confirmed height |
| ----------- | ---------------- |
| `txid[:8]` | `height as u32` |
Note that this mapping allows us to use `getrawtransaction` RPC to retrieve actual transaction data from without `-txindex` enabled
(by explicitly specifying the [blockhash](https://github.com/bitcoin/bitcoin/commit/497d0e014cc79d46531d570e74e4aeae72db602d)).

View File

@ -4,66 +4,201 @@
### Build dependencies
Install [recent Rust](https://rustup.rs/) (1.41.1+, `apt install cargo` is preferred for Debian 10),
[latest Bitcoin Core](https://bitcoincore.org/en/download/) (0.16+)
and [latest Electrum wallet](https://electrum.org/#download) (3.3+).
Note for Raspberry Pi 4 owners: the old versions of OS/toolchains produce broken binaries.
Make sure to use latest OS! (see #226)
Also, install the following packages (on Debian):
Install [recent Rust](https://rustup.rs/) (1.48.0+, `apt install cargo` is preferred for Debian 10),
[latest Bitcoin Core](https://bitcoincore.org/en/download/) (0.21+)
and [latest Electrum wallet](https://electrum.org/#download) (4.0+).
Also, install the following packages (on Debian or Ubuntu):
```bash
$ sudo apt update
$ sudo apt install clang cmake # for building 'rust-rocksdb'
$ sudo apt install clang cmake build-essential # for building 'rust-rocksdb'
```
Note for Raspberry Pi 4 owners: the old versions of OS/toolchains produce broken binaries. Make sure to use latest OS! (see #226)
There are two ways to compile `electrs`: by statically linking to `librocksdb` or dynamically linking.
Optionally, you may install [`cfg_me`](https://github.com/Kixunil/cfg_me) tool for generating the manual page. The easiest way is to run `cargo install cfg_me`.
The advantages of static linking:
### Build
* The binary is self-contained and doesn't need other dependencies, it can be transferred to other machine without worrying
* The binary should work pretty much with every common distro
* Different library installed elsewhere doesn't affect the behavior of `electrs`
The advantages of dynamic linking:
* If a (security) bug is found in the library, you only need to upgrade/recompile the library to fix it, no need to recompile `electrs`
* Updating rocksdb can be as simple as `apt upgrade`
* The build is significantly faster (if you already have the binary version of the library from packages)
* The build is deterministic
* Cross compilation is more reliable
* If another application is also using `rocksdb`, you don't store it on disk and in RAM twice
If you decided to use dynamic linking, you will also need to install the library.
On Debian:
```bash
$ sudo apt install librocksdb-dev
```
#### Preparing for cross compilation
Cross compilation can save you some time since you can compile `electrs` for a slower computer (like Raspberry Pi) on a faster machine
even with different CPU architecture.
Skip this if it's not your case.
If you want to cross-compile, you need to install some additional packages.
These cross compilation instructions use `aarch64`/`arm64` + Linux as an example.
(The resulting binary should work on RPi 4 with aarch64-enabled OS).
Change to your desired architecture/OS.
If you use Debian (or a derived distribution) you need to enable the target architecture:
```
$ sudo dpkg --add-architecture arm64
$ sudo apt update
```
If you use `cargo` from the repository
```bash
$ sudo apt install gcc-aarch64-linux-gnu gcc-aarch64-linux-gnu libc6-dev:arm64 libstd-rust-dev:arm64
```
If you use Rustup:
```bash
$ sudo apt install gcc-aarch64-linux-gnu gcc-aarch64-linux-gnu libc6-dev:arm64
$ rustup target add aarch64-unknown-linux-gnu
```
If you decided to use the system rocksdb (recommended if the target OS supports it), you need the version from the other architecture:
```bash
$ sudo apt install librocksdb-dev:arm64
```
#### Preparing man page generation (optional)
Optionally, you may install [`cfg_me`](https://github.com/Kixunil/cfg_me) tool for generating the manual page.
The easiest way is to run `cargo install cfg_me`.
#### Download electrs
First build should take ~20 minutes:
```bash
$ git clone https://github.com/romanz/electrs
$ cd electrs
$ cargo build --release
```
### Build
Note: you need to have enough free RAM to build `electrs`.
The build will fail otherwise.
Close those 100 old tabs in the browser. ;)
#### Static linking
First build should take ~20 minutes:
```bash
$ cargo build --locked --release
```
#### Dynamic linking
```
$ ROCKSDB_INCLUDE_DIR=/usr/include ROCKSDB_LIB_DIR=/usr/lib cargo build --locked --no-default-features --release
```
(Don't worry about `--no-default-features`, it's only related to rocksdb linking.)
#### Cross compilation
Run one of the commands above (depending on linking type) with argument `--target aarch64-unknown-linux-gnu` and prepended with env vars: `BINDGEN_EXTRA_CLANG_ARGS="-target gcc-aarch64-linux-gnu" RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc"`
E.g. for dynamic linking case:
```
$ ROCKSDB_INCLUDE_DIR=/usr/include ROCKSDB_LIB_DIR=/usr/lib BINDGEN_EXTRA_CLANG_ARGS="-target gcc-aarch64-linux-gnu" RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc" cargo build --locked --release --target aarch64-unknown-linux-gnu
```
It's a bit long but sufficient! You will find the resulting binary in `target/aarch64-unknown-linux-gnu/release/electrs` - copy it to your target machine.
#### Generating man pages
If you installed `cfg_me` to generate man page, you can run `cfg_me man` to see it right away or `cfg_me -o electrs.1 man` to save it into a file (`electrs.1`).
## Docker-based installation from source
Note: currently Docker installation links statically
Note: health check only works if Prometheus is running on port 4224 inside container
```bash
$ docker build -t electrs-app .
$ mkdir db
$ docker run --network host \
--volume $HOME/.bitcoin:/home/user/.bitcoin:ro \
--volume $PWD:/home/user \
--rm -i -t electrs-app \
electrs -vvvv --timestamp --db-dir /home/user/db
--volume $PWD/db:/home/user/db \
--env ELECTRS_VERBOSE=4 \
--env ELECTRS_TIMESTAMP=true \
--env ELECTRS_DB_DIR=/home/user/db \
--rm -i -t electrs-app
```
If not using the host-network, you probably want to expose the ports for electrs and Prometheus like so:
```bash
$ docker run --volume $HOME/.bitcoin:/home/user/.bitcoin:ro \
--volume $PWD/db:/home/user/db \
--env ELECTRS_VERBOSE=4 \
--env ELECTRS_TIMESTAMP=true \
--env ELECTRS_DB_DIR=/home/user/db \
--env ELECTRS_ELECTRUM_RPC_ADDR=0.0.0.0:50001 \
--env ELECTRS_MONITORING_ADDR=0.0.0.0:4224 \
--rm -i -t electrs-app
```
To access the server from outside Docker, add `-p 50001:50001 -p 4224:4224` but be aware of the security risks. Good practice is to group containers that needs access to the server inside the same Docker network and not expose the ports to the outside world.
## Native OS packages
There are currently no official/stable binary pckages.
There are currently no official/stable binary packages.
However, there's an [**experimental** repository for Debian 10](https://deb.ln-ask.me) (should work on recent Ubuntu, but not tested well-enough). The repository provides several significant advantages:
However, there's a [*beta* repository for Debian 10](https://deb.ln-ask.me) (should work on recent Ubuntu, but not tested well-enough)
The repository provides several significant advantages:
* Everything is completely automatic - after installing `electrs` via `apt`, it's running and will automatically run on reboot, restart after crash... It also connects to bitcoind out-of-the-box, no messing with config files or anything else. It just works.
* Prebuilt binaries save you a lot of time. The binary installation of all the components is under 3 minutes on common hardware. Building from source is much longer.
* Everything is completely automatic - after installing `electrs` via `apt`, it's running and will automatically run on reboot, restart after crash..
It also connects to bitcoind out-of-the-box, no messing with config files or anything else.
It just works.
* Prebuilt binaries save you a lot of time.
The binary installation of all the components is under 3 minutes on common hardware.
Building from source is much longer.
* The repository contains some seurity hardening out-of-the-box - separate users for services, use of [btc-rpc-proxy](https://github.com/Kixunil/btc-rpc-proxy), etc.
And two significant disadvantages:
And two disadvantages:
* It's currently impossible to independently verify the built packages, so you have to trust the author of the repository. This will hopefully change in the future.
* The repository is considered experimental and not well tested yet. The author of the repository is also a contributor to `electrs` and appreciates [bug reports](https://github.com/Kixunil/cryptoanarchy-deb-repo-builder/issues), [test reports](https://github.com/Kixunil/cryptoanarchy-deb-repo-builder/issues/61), and other contributions.
* It's currently not trivial to independently verify the built packages, so you may need to trust the author of the repository.
The build is now deterministic but nobody verified it independently yet.
* The repository is considered beta.
electrs` seems to work well so far but was not tested heavily.
The author of the repository is also a contributor to `electrs` and appreciates [bug reports](https://github.com/Kixunil/cryptoanarchy-deb-repo-builder/issues),
[test reports](https://github.com/Kixunil/cryptoanarchy-deb-repo-builder/issues/61), and other contributions.
## Manual configuration
This applies only if you do **not** use some other automated systems such as Debian packages. If you use automated systems, refer to their documentation first!
This applies only if you do **not** use some other automated systems such as Debian packages.
If you use automated systems, refer to their documentation first!
### Bitcoind configuration
Pruning must be turned **off** for `electrs` to work. `txindex` is allowed but unnecessary for `electrs`. However, you might still need it if you run other services (e.g.`eclair`)
Pruning must be turned **off** for `electrs` to work.
`txindex` is allowed but unnecessary for `electrs`.
However, you might still need it if you run other services (e.g.`eclair`)
The highly recommended way of authenticating `electrs` is using cookie file. It's the most secure and robust method. Set `rpccookiefile` option of `bitcoind` to a file within an existing directory which it can access. You can skip it if you're running both daemons under the same user and with the default directories.
The highly recommended way of authenticating `electrs` is using cookie file.
It's the most [secure](https://github.com/Kixunil/security_writings/blob/master/cookie_files.md) and robust method.
Set `rpccookiefile` option of `bitcoind` to a file within an existing directory which it can access.
You can skip it if you're running both daemons under the same user and with the default directories.
`electrs` will wait for `bitcoind` to sync, however, you will be unabe to use it until the syncing is done.
@ -74,26 +209,52 @@ $ bitcoind -server=1 -txindex=0 -prune=0
```
### Electrs configuration
Electrs can be configured using command line, environment variables and configuration files (or their combination). It is highly recommended to use configuration files for any non-trivial setups since it's easier to manage. If you're setting password manually instead of cookie files, configuration file is the only way to set it due to security reasons.
Electrs can be configured using command line, environment variables and configuration files (or their combination).
It is highly recommended to use configuration files for any non-trivial setups since it's easier to manage.
If you're setting password manually instead of cookie files, configuration file is the only way to set it due to security reasons.
### Configuration files and priorities
The config files must be in the Toml format. These config files are (from lowest priority to highest): `/etc/electrs/config.toml`, `~/.electrs/config.toml`, `./electrs.toml`.
The Toml-formatted config files ([an example here](config_example.toml)) are (from lowest priority to highest): `/etc/electrs/config.toml`, `~/.electrs/config.toml`, `./electrs.toml`.
The options in highest-priority config files override options set in lowest-priority config files. Environment variables override options in config files and finally arguments override everythig else. There are two special arguments `--conf` which reads the specified file and `--conf-dir`, which read all the files in the specified directory. The options in those files override **everything that was set previously, including arguments that were passed before these arguments**. In general, later arguments override previous ones. It is a good practice to use these special arguments at the beginning of the command line in order to avoid confusion.
The options in highest-priority config files override options set in lowest-priority config files.
For each command line argument an environment variable of the same name with `ELECTRS_` prefix, upper case letters and underscores instead of hypens exists (e.g. you can use `ELECTRS_ELECTRUM_RPC_ADDR` instead of `--electrum-rpc-addr`). Similarly, for each such argument an option in config file exists with underscores instead of hypens (e.g. `electrum_rpc_addr`). In addition, config files support `cookie` option to specify cookie - this is not available using command line or environment variables for security reasons (other applications could read it otherwise). Note that this is different from using `cookie_path`, which points to a file containing the cookie instead of being the cookie itself.
**Environment variables** override options in config files and finally **arguments** override everythig else.
Finally, you need to use a number in config file if you want to increase verbosity (e.g. `verbose = 3` is equivalent to `-vvv`) and `true` value in case of flags (e.g. `timestamp = true`)
There are two special arguments `--conf` which reads the specified file and `--conf-dir`, which read all the files in the specified directory.
If you are using `-rpcuser=USER` and `-rpcpassword=PASSWORD` of `bitcoind` for authentication, please use `cookie="USER:PASSWORD"` option in one of the [config files](https://github.com/romanz/electrs/blob/master/doc/usage.md#configuration-files-and-priorities).
Otherwise, [`~/.bitcoin/.cookie`](https://github.com/bitcoin/bitcoin/blob/0212187fc624ea4a02fc99bc57ebd413499a9ee1/contrib/debian/examples/bitcoin.conf#L70-L72) will be used as the default cookie file, allowing this server to use bitcoind JSONRPC interface.
The options in those files override **everything** that was set previously, **including arguments** that were passed before these two special arguments.
In general, later arguments override previous ones.
It is a good practice to use these special arguments at the beginning of the command line in order to avoid confusion.
**Naming convention**
For each command line argument an **environment variable** of the same name with `ELECTRS_` prefix, upper case letters and underscores instead of hypens exists
(e.g. you can use `ELECTRS_ELECTRUM_RPC_ADDR` instead of `--electrum-rpc-addr`).
Similarly, for each such argument an option in config file exists with underscores instead of hypens (e.g. `electrum_rpc_addr`).
You need to use a number in config file if you want to increase verbosity (e.g. `verbose = 3` is equivalent to `-vvv`) and `true` value in case of flags (e.g. `timestamp = true`)
**Authentication**
In addition, config files support `auth` option to specify username and password.
This is not available using command line or environment variables for security reasons (other applications could read it otherwise).
**Important note**: `auth` is different from `cookie_file`, which points to a file containing the cookie instead of being the cookie itself!
If you are using `-rpcuser=USER` and `-rpcpassword=PASSWORD` of `bitcoind` for authentication, please use `auth="USER:PASSWORD"` option in one of the [config files](https://github.com/romanz/electrs/blob/master/doc/usage.md#configuration-files-and-priorities).
Otherwise, [`~/.bitcoin/.cookie`](https://github.com/bitcoin/bitcoin/blob/0212187fc624ea4a02fc99bc57ebd413499a9ee1/contrib/debian/examples/bitcoin.conf#L70-L72) will be used as the default cookie file,
allowing this server to use bitcoind JSONRPC interface.
Note: there was a `cookie` option in the version 0.8.7 and below, it's now deprecated - do **not** use, it will be removed.
Please read upgrade notes if you're upgrading to a newer version.
### Electrs usage
First index sync should take ~1.5 hours (on a dual core Intel CPU @ 3.3 GHz, 8 GB RAM, 1TB WD Blue HDD):
```bash
$ cargo run --release -- -vvv --timestamp --db-dir ./db --electrum-rpc-addr="127.0.0.1:50001"
$ ./target/release/electrs -vvv --timestamp --db-dir ./db --electrum-rpc-addr="127.0.0.1:50001"
2018-08-17T18:27:42 - INFO - NetworkInfo { version: 179900, subversion: "/Satoshi:0.17.99/" }
2018-08-17T18:27:42 - INFO - BlockchainInfo { chain: "main", blocks: 537204, headers: 537204, bestblockhash: "0000000000000000002956768ca9421a8ddf4e53b1d81e429bd0125a383e3636", pruned: false, initialblockdownload: false }
2018-08-17T18:27:42 - DEBUG - opening DB at "./db/mainnet"
@ -116,14 +277,16 @@ $ cargo run --release -- -vvv --timestamp --db-dir ./db --electrum-rpc-addr="127
2018-08-17T19:58:28 - DEBUG - applying 14 new headers from height 537205
2018-08-17T19:58:29 - INFO - RPC server running on 127.0.0.1:50001
```
You can specify options via command-line parameters, environment variables or using config files. See the documentation above.
You can specify options via command-line parameters, environment variables or using config files.
See the documentation above.
Note that the final DB size should be ~20% of the `blk*.dat` files, but it may increase to ~35% at the end of the inital sync (just before the [full compaction is invoked](https://github.com/facebook/rocksdb/wiki/Manual-Compaction)).
If initial sync fails due to `memory allocation of xxxxxxxx bytes failedAborted` errors, as may happen on devices with limited RAM, try the following arguments when starting `electrs`. It should take roughly 18 hours to sync and compact the index on an ODROID-HC1 with 8 CPU cores @ 2GHz, 2GB RAM, and an SSD using the following command:
If initial sync fails due to `memory allocation of xxxxxxxx bytes failedAborted` errors, as may happen on devices with limited RAM, try the following arguments when starting `electrs`.
It should take roughly 18 hours to sync and compact the index on an ODROID-HC1 with 8 CPU cores @ 2GHz, 2GB RAM, and an SSD using the following command:
```bash
$ cargo run --release -- -vvvv --index-batch-size=10 --jsonrpc-import --db-dir ./db --electrum-rpc-addr="127.0.0.1:50001"
$ ./target/release/electrs -vvvv --index-batch-size=10 --jsonrpc-import --db-dir ./db --electrum-rpc-addr="127.0.0.1:50001"
```
The index database is stored here:
@ -136,7 +299,8 @@ See below for [extra configuration suggestions](https://github.com/romanz/electr
## Electrum client
If you happen to use the Electrum client from [the **experimental** Debian repository](https://github.com/romanz/electrs/blob/master/doc/usage.md#cnative-os-packages), it's pre-configured out-of-the-box already. Read below otherwise.
If you happen to use the Electrum client from [the *beta* Debian repository](https://github.com/romanz/electrs/blob/master/doc/usage.md#cnative-os-packages), it's pre-configured out-of-the-box already
Read below otherwise.
There's a prepared script for launching `electrum` in such way to connect only to the local `electrs` instance to protect your privacy.
@ -160,7 +324,8 @@ $ electrum # will connect only to the local server
### SSL connection
In order to use a secure connection, you can also use [NGINX as an SSL endpoint](https://docs.nginx.com/nginx/admin-guide/security-controls/terminating-ssl-tcp/#) by placing the following block in `nginx.conf`.
In order to use a secure connection, you can also use [NGINX as an SSL endpoint](https://docs.nginx.com/nginx/admin-guide/security-controls/terminating-ssl-tcp/#)
by placing the following block in `nginx.conf`.
```nginx
stream {
@ -211,7 +376,8 @@ HiddenServiceVersion 3
HiddenServicePort 50001 127.0.0.1:50001
```
If you use [the **experimental** Debian repository](https://github.com/romanz/electrs/blob/master/doc/usage.md#cnative-os-packages), it is cleaner to install `tor-hs-patch-config` using `apt` and then placing the configuration into a file inside `/etc/tor/hidden-services.d`.
If you use [the *beta* Debian repository](https://github.com/romanz/electrs/blob/master/doc/usage.md#cnative-os-packages),
it is cleaner to install `tor-hs-patch-config` using `apt` and then placing the configuration into a file inside `/etc/tor/hidden-services.d`.
Restart the service:
```
@ -233,9 +399,11 @@ For more details, see http://docs.electrum.org/en/latest/tor.html.
### Sample Systemd Unit File
If you use [the **experimental** Debian repository](https://github.com/romanz/electrs/blob/master/doc/usage.md#cnative-os-packages), you should skip this section, as the appropriate systemd unit file is installed automatically.
If you use [the *beta* Debian repository](https://github.com/romanz/electrs/blob/master/doc/usage.md#cnative-os-packages), you should skip this section,
as the appropriate systemd unit file is installed automatically.
You may wish to have systemd manage electrs so that it's "always on." Here is a sample unit file (which assumes that the bitcoind unit file is `bitcoind.service`):
You may wish to have systemd manage electrs so that it's "always on".
Here is a sample unit file (which assumes that the bitcoind unit file is `bitcoind.service`):
```
[Unit]
@ -278,8 +446,8 @@ $ firefox 'http://localhost:9090/graph?g0.range_input=1h&g0.expr=index_height&g0
You can invoke any supported RPC using `netcat`, for example:
```
$ echo '{"jsonrpc": "2.0", "method": "server.version", "id": 0}' | netcat 127.0.0.1 50001
{"id":0,"jsonrpc":"2.0","result":["electrs 0.8.5","1.4"]}
$ echo '{"jsonrpc": "2.0", "method": "server.version", "params": ["", "1.4"], "id": 0}' | netcat 127.0.0.1 50001
{"id":0,"jsonrpc":"2.0","result":["electrs 0.8.6","1.4"]}
```
For more complex tasks, you may need to convert addresses to
@ -290,3 +458,50 @@ For more complex tasks, you may need to convert addresses to
$ ./contrib/addr.py 144STc7gcb9XCp6t4hvrcUEKg9KemivsCR # sample address from block #640699
144STc7gcb9XCp6t4hvrcUEKg9KemivsCR has {'confirmed': 12652436, 'unconfirmed': 0} satoshis
```
## Upgrading
> **If you're upgrading from version 0.8.7 to a higher version and used `cookie` option you should change your configuration!**
> The `cookie` option was deprecated and **will be removed eventually**!
> If you had actual cookie (from `~/bitcoin/.cookie` file) specified in `cookie` option, this was wrong as it wouldn't get updated when needed.
> It's strongly recommended to use proper cookie authentication using `cookie_file`.
> If you really have to use fixed username and password, explicitly specified in `bitcoind` config, use `auth` option instead.
> Users of `btc-rpc-proxy` using `public:public` need to use `auth` too.
> You can read [a detailed explanation of cookie deprecation with motivation explained](cookie_deprecation.md).
As with any other application, you need to remember how you installed `electrs` to upgrade it.
If you don't then here's a little help: run `which electrs` and compare the output
* If you got an error you didn't install `electrs` into your system in any way, it's probably sitting in the `target/release` directory of source
* If the path starts with `/bin/` then either you have used packaging system or you made a mistake the first time (non-packaged binaries must go to `/usr/local/bin`)
* If the path starts with `/usr/local/bin` you most likely copied electrs there after building
* If the path starts with `/home/YOUR_USERNAME/.cargo/bin` you most likely ran `cargo install`
### Upgrading distribution package
If you used Debian packaging system you only need this:
```
sudo apt update
sudo apt upgrade
```
Similarly for other distributions - use their respective commands.
If a new version of `electrs` is not yet in the package system, try wait a few days or contact the maintainers of the packages if it's been a long time.
### Upgrading manual installation
1. Enter your `electrs` source directory, usually in `~/` but some people like to put it in something like `~/sources`.
If you've deleted it, you need to `git clone` again.
2. `git checkout master`
3. `git pull`
4. Strongly recommended: `git verify-tag v0.8.6` (fix the version number if we've forgotten to update this docs ;)) should show "Good signature from 15C8 C357 4AE4 F1E2 5F3F 35C5 87CA E5FA 4691 7CBB"
5. `git checkout v0.8.6`
6. If you used static linking: `cargo build --locked --release`.
If you used dynamic linking `ROCKSDB_INCLUDE_DIR=/usr/include ROCKSDB_LIB_DIR=/usr/lib cargo build --locked --no-default-features --release`.
If you don't remember which linking you used, you probably used static.
This step will take a few tens of minutes (but dynamic linking is a bit faster), go grab a coffee.
Also remember that you need enough free RAM, the build will die otherwise
7. If you've previously copied `electrs` into `/usr/local/bin` run: sudo `cp target/release/electrs /usr/local/bin`
If you've previously installed `electrs` using `cargo install`: `cargo install --locked --path . -f`
8. If you've manually configured systemd service: `sudo systemctl restart electrs`

View File

@ -1,29 +0,0 @@
/// Benchmark full compaction.
extern crate electrs;
#[macro_use]
extern crate log;
extern crate error_chain;
use electrs::{config::Config, errors::*, store::DBStore};
use error_chain::ChainedError;
fn run(config: Config) -> Result<()> {
if !config.db_path.exists() {
panic!(
"DB {:?} must exist when running this benchmark!",
config.db_path
);
}
let store = DBStore::open(&config.db_path, /*low_memory=*/ true);
store.compact();
Ok(())
}
fn main() {
if let Err(e) = run(Config::from_args()) {
error!("{}", e.display_chain());
}
}

View File

@ -1,42 +0,0 @@
/// Benchmark regular indexing flow (using JSONRPC), don't persist the resulting index.
extern crate electrs;
extern crate error_chain;
#[macro_use]
extern crate log;
use electrs::{
cache::BlockTxIDsCache, config::Config, daemon::Daemon, errors::*, fake::FakeStore,
index::Index, metrics::Metrics, signal::Waiter,
};
use error_chain::ChainedError;
use std::sync::Arc;
fn run() -> Result<()> {
let signal = Waiter::start();
let config = Config::from_args();
let metrics = Metrics::new(config.monitoring_addr);
metrics.start();
let cache = Arc::new(BlockTxIDsCache::new(0, &metrics));
let daemon = Daemon::new(
&config.daemon_dir,
&config.blocks_dir,
config.daemon_rpc_addr,
config.cookie_getter(),
config.network_type,
signal.clone(),
cache,
&metrics,
)?;
let fake_store = FakeStore {};
let index = Index::load(&fake_store, &daemon, &metrics, config.index_batch_size)?;
index.update(&fake_store, &signal)?;
Ok(())
}
fn main() {
if let Err(e) = run() {
error!("{}", e.display_chain());
}
}

View File

@ -1,49 +0,0 @@
extern crate electrs;
extern crate hex;
extern crate log;
use electrs::{config::Config, store::DBStore};
fn max_collision(store: DBStore, prefix: &[u8]) {
let prefix_len = prefix.len();
let mut prev: Option<Vec<u8>> = None;
let mut collision_max = 0;
for row in store.iter_scan(prefix) {
assert!(row.key.starts_with(prefix));
if let Some(prev) = prev {
let collision_len = prev
.iter()
.zip(row.key.iter())
.take_while(|(a, b)| a == b)
.count();
if collision_len > collision_max {
eprintln!(
"{} bytes collision found:\n{:?}\n{:?}\n",
collision_len - prefix_len,
revhex(&prev[prefix_len..]),
revhex(&row.key[prefix_len..]),
);
collision_max = collision_len;
}
}
prev = Some(row.key.to_vec());
}
}
fn revhex(value: &[u8]) -> String {
hex::encode(&value.iter().cloned().rev().collect::<Vec<u8>>())
}
fn run(config: Config) {
if !config.db_path.exists() {
panic!("DB {:?} must exist when running this tool!", config.db_path);
}
let store = DBStore::open(&config.db_path, /*low_memory=*/ false);
max_collision(store, b"T");
}
fn main() {
run(Config::from_args());
}

3
internal/README.md Normal file
View File

@ -0,0 +1,3 @@
# electrs-internal files
**Nothing for users here, just for developers. ;)**

View File

@ -37,7 +37,15 @@ doc = "Analogous to bitcoind's -blocksdir option, this specifies the directory c
[[param]]
name = "cookie"
type = "String"
doc = "JSONRPC authentication cookie ('USER:PASSWORD', default: read from cookie file)"
doc = "DEPRECATED: use cookie_file or auth instead!"
# Force the user to use config file in order to avoid password leaks
argument = false
env_var = false
[[param]]
name = "auth"
type = "String"
doc = "JSONRPC authentication ('USER:PASSWORD', default: use cookie file)"
# Force the user to use config file in order to avoid password leaks
argument = false
env_var = false
@ -52,23 +60,27 @@ doc = "JSONRPC authentication cookie file (default: ~/.bitcoin/.cookie)"
name = "network"
type = "crate::config::BitcoinNetwork"
convert_into = "::bitcoin::network::constants::Network"
doc = "Select Bitcoin network type ('bitcoin', 'testnet' or 'regtest')"
doc = "Select Bitcoin network type ('bitcoin', 'testnet', 'regtest' or 'signet')"
default = "Default::default()"
[[param]]
name = "electrum_rpc_addr"
type = "crate::config::ResolvAddr"
doc = "Electrum server JSONRPC 'addr:port' to listen on (default: '127.0.0.1:50001' for mainnet, '127.0.0.1:60001' for testnet and '127.0.0.1:60401' for regtest)"
doc = "Electrum server JSONRPC 'addr:port' to listen on (default: '127.0.0.1:50001' for mainnet, '127.0.0.1:60001' for testnet, '127.0.0.1:60401' for regtest and '127.0.0.1:60601' for signet)"
[[param]]
name = "daemon_rpc_addr"
type = "crate::config::ResolvAddr"
doc = "Bitcoin daemon JSONRPC 'addr:port' to connect (default: 127.0.0.1:8332 for mainnet, 127.0.0.1:18332 for testnet and 127.0.0.1:18443 for regtest)"
doc = "Bitcoin daemon JSONRPC 'addr:port' to connect (default: 127.0.0.1:8332 for mainnet, 127.0.0.1:18332 for testnet, 127.0.0.1:18443 for regtest and 127.0.0.1:18554 for signet)"
[[param]]
name = "daemon_p2p_addr"
type = "crate::config::ResolvAddr"
doc = "Bitcoin daemon p2p 'addr:port' to connect (default: 127.0.0.1:8333 for mainnet, 127.0.0.1:18333 for testnet, 127.0.0.1:18444 for regtest and 127.0.0.1:38333 for signet)"
[[param]]
name = "monitoring_addr"
type = "crate::config::ResolvAddr"
doc = "Prometheus monitoring 'addr:port' to listen on (default: 127.0.0.1:4224 for mainnet, 127.0.0.1:14224 for testnet and 127.0.0.1:24224 for regtest)"
doc = "Prometheus monitoring 'addr:port' to listen on (default: 127.0.0.1:4224 for mainnet, 127.0.0.1:14224 for testnet, 127.0.0.1:24224 for regtest and 127.0.0.1:34224 for regtest)"
[[switch]]
name = "jsonrpc_import"
@ -78,19 +90,13 @@ doc = "Use JSONRPC instead of directly importing blk*.dat files. Useful for remo
name = "wait_duration_secs"
type = "u64"
doc = "Duration to wait between bitcoind polling"
default = "5"
default = "10"
[[param]]
name = "index_batch_size"
type = "usize"
doc = "Number of blocks to get in one JSONRPC request from bitcoind"
default = "100"
[[param]]
name = "bulk_index_threads"
type = "usize"
doc = "Number of threads used for bulk indexing (default: use the # of CPUs)"
default = "0"
default = "10"
[[param]]
name = "tx_cache_size_mb"

15
query.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
set -eux
cd `dirname $0`
cargo fmt --all
cargo build --all --release
NETWORK=$1
shift
CMD="target/release/query --network $NETWORK --db-dir ./db2 --daemon-dir $HOME/.bitcoin"
export RUST_LOG=${RUST_LOG-info}
$CMD $*
# use SIGINT to quit

16
server.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
set -eux
cd `dirname $0`
cargo fmt --all
cargo build --all --release
NETWORK=$1
shift
DB=./db2 # $HOME/tmp/electrs_db/mainnet_zstd
CMD="target/release/electrs --network $NETWORK --db-dir $DB --daemon-dir $HOME/.bitcoin"
export RUST_LOG=${RUST_LOG-info}
$CMD $*
# use SIGINT to quit

View File

@ -1,60 +0,0 @@
use bitcoin::hash_types::BlockHash;
use std::sync::{Arc, Mutex};
use crate::{config::Config, daemon, errors::*, index, signal::Waiter, store};
pub struct App {
store: store::DBStore,
index: index::Index,
daemon: daemon::Daemon,
banner: String,
tip: Mutex<BlockHash>,
}
impl App {
pub fn new(
store: store::DBStore,
index: index::Index,
daemon: daemon::Daemon,
config: &Config,
) -> Result<Arc<App>> {
Ok(Arc::new(App {
store,
index,
daemon: daemon.reconnect()?,
banner: config.server_banner.clone(),
tip: Mutex::new(BlockHash::default()),
}))
}
fn write_store(&self) -> &impl store::WriteStore {
&self.store
}
// TODO: use index for queries.
pub fn read_store(&self) -> &dyn store::ReadStore {
&self.store
}
pub fn index(&self) -> &index::Index {
&self.index
}
pub fn daemon(&self) -> &daemon::Daemon {
&self.daemon
}
pub fn update(&self, signal: &Waiter) -> Result<bool> {
let mut tip = self.tip.lock().expect("failed to lock tip");
let new_block = *tip != self.daemon().getbestblockhash()?;
if new_block {
*tip = self.index().update(self.write_store(), &signal)?;
}
Ok(new_block)
}
pub fn get_banner(&self) -> Result<String> {
Ok(format!(
"{}\n{}",
self.banner,
self.daemon.get_subversion()?
))
}
}

View File

@ -1,89 +1,15 @@
extern crate electrs;
#![recursion_limit = "256"]
extern crate error_chain;
#[macro_use]
extern crate log;
use anyhow::{Context, Result};
use electrs::{server, Config, Daemon, Rpc, Tracker};
use error_chain::ChainedError;
use std::process;
use std::sync::Arc;
use electrs::{
app::App,
bulk,
cache::{BlockTxIDsCache, TransactionCache},
config::Config,
daemon::Daemon,
errors::*,
index::Index,
metrics::Metrics,
query::Query,
rpc::RPC,
signal::Waiter,
store::{full_compaction, is_fully_compacted, DBStore},
};
fn run_server(config: &Config) -> Result<()> {
let signal = Waiter::start();
let metrics = Metrics::new(config.monitoring_addr);
metrics.start();
let blocktxids_cache = Arc::new(BlockTxIDsCache::new(config.blocktxids_cache_size, &metrics));
let daemon = Daemon::new(
&config.daemon_dir,
&config.blocks_dir,
config.daemon_rpc_addr,
config.cookie_getter(),
config.network_type,
signal.clone(),
blocktxids_cache,
&metrics,
)?;
// Perform initial indexing from local blk*.dat block files.
let store = DBStore::open(&config.db_path, /*low_memory=*/ config.jsonrpc_import);
let index = Index::load(&store, &daemon, &metrics, config.index_batch_size)?;
let store = if is_fully_compacted(&store) {
store // initial import and full compaction are over
} else if config.jsonrpc_import {
index.update(&store, &signal)?; // slower: uses JSONRPC for fetching blocks
full_compaction(store)
} else {
// faster, but uses more memory
let store =
bulk::index_blk_files(&daemon, config.bulk_index_threads, &metrics, &signal, store)?;
let store = full_compaction(store);
index.reload(&store); // make sure the block header index is up-to-date
store
}
.enable_compaction(); // enable auto compactions before starting incremental index updates.
let app = App::new(store, index, daemon, &config)?;
let tx_cache = TransactionCache::new(config.tx_cache_size, &metrics);
let query = Query::new(app.clone(), &metrics, tx_cache, config.txid_limit);
let relayfee = query.get_relayfee()?;
debug!("relayfee: {} BTC", relayfee);
let mut server = None; // Electrum RPC server
loop {
app.update(&signal)?;
query.update_mempool()?;
server
.get_or_insert_with(|| {
RPC::start(config.electrum_rpc_addr, query.clone(), &metrics, relayfee)
})
.notify(); // update subscribed clients
if let Err(err) = signal.wait(config.wait_duration) {
info!("stopping server: {}", err);
break;
}
}
Ok(())
}
fn main() {
fn main() -> Result<()> {
let config = Config::from_args();
if let Err(e) = run_server(&config) {
error!("server failed: {}", e.display_chain());
process::exit(1);
}
let mut tracker = Tracker::new(&config)?;
tracker
.sync(&Daemon::connect(&config)?)
.context("initial sync failed")?;
// re-connect after initial sync (due to possible timeout during compaction)
server::run(&config, Rpc::new(&config, tracker)?).context("server failed")
}

43
src/bin/query.rs Normal file
View File

@ -0,0 +1,43 @@
#[macro_use]
extern crate log;
use anyhow::Result;
use bitcoin::{Address, Amount};
use std::collections::BTreeMap;
use std::str::FromStr;
use electrs::{Cache, Config, Daemon, ScriptHash, Status, Tracker};
fn main() -> Result<()> {
let config = Config::from_args();
let addresses = config
.args
.iter()
.map(|a| Address::from_str(a).expect("invalid address"));
let cache = Cache::new();
let daemon = Daemon::connect(&config)?;
let mut tracker = Tracker::new(&config)?;
let mut map: BTreeMap<Address, Status> = addresses
.map(|addr| {
let status = Status::new(ScriptHash::new(&addr.script_pubkey()));
(addr, status)
})
.collect();
loop {
tracker.sync(&daemon)?;
let mut total = Amount::ZERO;
for (addr, status) in map.iter_mut() {
tracker.update_status(status, &daemon, &cache)?;
let balance = tracker.get_balance(status, &cache);
if balance > Amount::ZERO {
info!("{} has {}", addr, balance);
}
total += balance;
}
info!("total: {}", total);
std::thread::sleep(config.wait_duration);
}
}

9
src/bin/sync.rs Normal file
View File

@ -0,0 +1,9 @@
use anyhow::Result;
use electrs::{Config, Daemon, Tracker};
fn main() -> Result<()> {
let config = Config::from_args();
let daemon = Daemon::connect(&config)?;
Tracker::new(&config)?.sync(&daemon)
}

View File

@ -1,289 +0,0 @@
use bitcoin::blockdata::block::Block;
use bitcoin::consensus::encode::{deserialize, Decodable};
use bitcoin::hash_types::BlockHash;
use std::collections::HashSet;
use std::fs;
use std::io::Cursor;
use std::path::{Path, PathBuf};
use std::sync::{
mpsc::{Receiver, SyncSender},
Arc, Mutex,
};
use std::thread;
use crate::daemon::Daemon;
use crate::errors::*;
use crate::index::{index_block, last_indexed_block, read_indexed_blockhashes};
use crate::metrics::{CounterVec, Histogram, HistogramOpts, HistogramVec, MetricOpts, Metrics};
use crate::signal::Waiter;
use crate::store::{DBStore, Row, WriteStore};
use crate::util::{spawn_thread, HeaderList, SyncChannel};
struct Parser {
magic: u32,
current_headers: HeaderList,
indexed_blockhashes: Mutex<HashSet<BlockHash>>,
// metrics
duration: HistogramVec,
block_count: CounterVec,
bytes_read: Histogram,
}
impl Parser {
fn new(
daemon: &Daemon,
metrics: &Metrics,
indexed_blockhashes: HashSet<BlockHash>,
) -> Result<Arc<Parser>> {
Ok(Arc::new(Parser {
magic: daemon.magic(),
current_headers: load_headers(daemon)?,
indexed_blockhashes: Mutex::new(indexed_blockhashes),
duration: metrics.histogram_vec(
HistogramOpts::new(
"electrs_parse_duration",
"blk*.dat parsing duration (in seconds)",
),
&["step"],
),
block_count: metrics.counter_vec(
MetricOpts::new("electrs_parse_blocks", "# of block parsed (from blk*.dat)"),
&["type"],
),
bytes_read: metrics.histogram(HistogramOpts::new(
"electrs_parse_bytes_read",
"# of bytes read (from blk*.dat)",
)),
}))
}
fn last_indexed_row(&self) -> Row {
// TODO: use JSONRPC for missing blocks, and don't use 'L' row at all.
let indexed_blockhashes = self.indexed_blockhashes.lock().unwrap();
let last_header = self
.current_headers
.iter()
.take_while(|h| indexed_blockhashes.contains(h.hash()))
.last()
.expect("no indexed header found");
debug!("last indexed block: {:?}", last_header);
last_indexed_block(last_header.hash())
}
fn read_blkfile(&self, path: &Path) -> Result<Vec<u8>> {
let timer = self.duration.with_label_values(&["read"]).start_timer();
let blob = fs::read(&path).chain_err(|| format!("failed to read {:?}", path))?;
timer.observe_duration();
self.bytes_read.observe(blob.len() as f64);
Ok(blob)
}
fn index_blkfile(&self, blob: Vec<u8>) -> Result<Vec<Row>> {
let timer = self.duration.with_label_values(&["parse"]).start_timer();
let blocks = parse_blocks(blob, self.magic)?;
timer.observe_duration();
let mut rows = Vec::<Row>::new();
let timer = self.duration.with_label_values(&["index"]).start_timer();
for block in blocks {
let blockhash = block.block_hash();
if let Some(header) = self.current_headers.header_by_blockhash(&blockhash) {
if self
.indexed_blockhashes
.lock()
.expect("indexed_blockhashes")
.insert(blockhash)
{
rows.extend(index_block(&block, header.height()));
self.block_count.with_label_values(&["indexed"]).inc();
} else {
self.block_count.with_label_values(&["duplicate"]).inc();
}
} else {
// will be indexed later (after bulk load is over) if not an orphan block
self.block_count.with_label_values(&["skipped"]).inc();
}
}
timer.observe_duration();
let timer = self.duration.with_label_values(&["sort"]).start_timer();
rows.sort_unstable_by(|a, b| a.key.cmp(&b.key));
timer.observe_duration();
Ok(rows)
}
}
fn parse_blocks(blob: Vec<u8>, magic: u32) -> Result<Vec<Block>> {
let mut cursor = Cursor::new(&blob);
let mut blocks = vec![];
let max_pos = blob.len() as u64;
while cursor.position() < max_pos {
let offset = cursor.position();
match u32::consensus_decode(&mut cursor) {
Ok(value) => {
if magic != value {
cursor.set_position(offset + 1);
continue;
}
}
Err(_) => break, // EOF
};
let block_size = u32::consensus_decode(&mut cursor).chain_err(|| "no block size")?;
let start = cursor.position();
let end = start + block_size as u64;
// If Core's WriteBlockToDisk ftell fails, only the magic bytes and size will be written
// and the block body won't be written to the blk*.dat file.
// Since the first 4 bytes should contain the block's version, we can skip such blocks
// by peeking the cursor (and skipping previous `magic` and `block_size`).
match u32::consensus_decode(&mut cursor) {
Ok(value) => {
if magic == value {
cursor.set_position(start);
continue;
}
}
Err(_) => break, // EOF
}
let block: Block = deserialize(&blob[start as usize..end as usize])
.chain_err(|| format!("failed to parse block at {}..{}", start, end))?;
blocks.push(block);
cursor.set_position(end as u64);
}
Ok(blocks)
}
fn load_headers(daemon: &Daemon) -> Result<HeaderList> {
let tip = daemon.getbestblockhash()?;
let mut headers = HeaderList::empty();
let new_headers = headers.order(daemon.get_new_headers(&headers, &tip)?);
headers.apply(new_headers, tip);
Ok(headers)
}
fn set_open_files_limit(limit: libc::rlim_t) {
let resource = libc::RLIMIT_NOFILE;
let mut rlim = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
let result = unsafe { libc::getrlimit(resource, &mut rlim) };
if result < 0 {
panic!("getrlimit() failed: {}", result);
}
rlim.rlim_cur = limit; // set softs limit only.
let result = unsafe { libc::setrlimit(resource, &rlim) };
if result < 0 {
panic!("setrlimit() failed: {}", result);
}
}
type JoinHandle = thread::JoinHandle<Result<()>>;
type BlobReceiver = Arc<Mutex<Receiver<(Vec<u8>, PathBuf)>>>;
fn start_reader(blk_files: Vec<PathBuf>, parser: Arc<Parser>) -> (BlobReceiver, JoinHandle) {
let chan = SyncChannel::new(0);
let blobs = chan.sender();
let handle = spawn_thread("bulk_read", move || -> Result<()> {
for path in blk_files {
blobs
.send((parser.read_blkfile(&path)?, path))
.expect("failed to send blk*.dat contents");
}
Ok(())
});
(Arc::new(Mutex::new(chan.into_receiver())), handle)
}
fn start_indexer(
blobs: BlobReceiver,
parser: Arc<Parser>,
writer: SyncSender<(Vec<Row>, PathBuf)>,
) -> JoinHandle {
spawn_thread("bulk_index", move || -> Result<()> {
loop {
let msg = blobs.lock().unwrap().recv();
if let Ok((blob, path)) = msg {
let rows = parser
.index_blkfile(blob)
.chain_err(|| format!("failed to index {:?}", path))?;
writer
.send((rows, path))
.expect("failed to send indexed rows")
} else {
debug!("no more blocks to index");
break;
}
}
Ok(())
})
}
pub fn index_blk_files(
daemon: &Daemon,
index_threads: usize,
metrics: &Metrics,
signal: &Waiter,
store: DBStore,
) -> Result<DBStore> {
set_open_files_limit(2048); // twice the default `ulimit -n` value
let blk_files = daemon.list_blk_files()?;
info!("indexing {} blk*.dat files", blk_files.len());
let indexed_blockhashes = read_indexed_blockhashes(&store);
debug!("found {} indexed blocks", indexed_blockhashes.len());
let parser = Parser::new(daemon, metrics, indexed_blockhashes)?;
let (blobs, reader) = start_reader(blk_files, parser.clone());
let rows_chan = SyncChannel::new(0);
let indexers: Vec<JoinHandle> = (0..index_threads)
.map(|_| start_indexer(blobs.clone(), parser.clone(), rows_chan.sender()))
.collect();
for (rows, path) in rows_chan.into_receiver() {
trace!("indexed {:?}: {} rows", path, rows.len());
store.write(rows);
signal
.poll()
.chain_err(|| "stopping bulk indexing due to signal")?;
}
reader
.join()
.expect("reader panicked")
.expect("reader failed");
indexers.into_iter().for_each(|i| {
i.join()
.expect("indexer panicked")
.expect("indexing failed")
});
store.write(vec![parser.last_indexed_row()]);
Ok(store)
}
#[cfg(test)]
mod tests {
use super::*;
use bitcoin::hashes::Hash;
use hex::decode as hex_decode;
#[test]
fn test_incomplete_block_parsing() {
let magic = 0x0709110b;
let raw_blocks = hex_decode(fixture("incomplete_block.hex")).unwrap();
let blocks = parse_blocks(raw_blocks, magic).unwrap();
assert_eq!(blocks.len(), 2);
assert_eq!(
blocks[1].block_hash().into_inner().to_vec(),
hex_decode("d55acd552414cc44a761e8d6b64a4d555975e208397281d115336fc500000000").unwrap()
);
}
pub fn fixture(filename: &str) -> String {
let path = Path::new("src")
.join("tests")
.join("fixtures")
.join(filename);
fs::read_to_string(path).unwrap()
}
}

View File

@ -1,273 +1,48 @@
use crate::errors::*;
use crate::metrics::{CounterVec, MetricOpts, Metrics};
use bitcoin::{BlockHash, Transaction, Txid};
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::consensus::encode::deserialize;
use bitcoin::hash_types::{BlockHash, Txid};
use lru::LruCache;
use prometheus::IntGauge;
use std::hash::Hash;
use std::sync::Mutex;
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
struct SizedLruCache<K, V> {
map: LruCache<K, (V, usize)>,
bytes_usage: usize,
bytes_capacity: usize,
lookups: CounterVec,
usage: IntGauge,
use crate::merkle::Proof;
pub struct Cache {
txs: Arc<RwLock<HashMap<Txid, Transaction>>>,
proofs: Arc<RwLock<HashMap<(BlockHash, Txid), Proof>>>,
}
impl<K: Hash + Eq, V> SizedLruCache<K, V> {
fn new(bytes_capacity: usize, lookups: CounterVec, usage: IntGauge) -> SizedLruCache<K, V> {
SizedLruCache {
map: LruCache::unbounded(),
bytes_usage: 0,
bytes_capacity,
lookups,
usage,
}
impl Cache {
pub fn new() -> Self {
let txs = Arc::new(RwLock::new(HashMap::new()));
let proofs = Arc::new(RwLock::new(HashMap::new()));
Self { txs, proofs }
}
fn get(&mut self, key: &K) -> Option<&V> {
match self.map.get(key) {
None => {
self.lookups.with_label_values(&["miss"]).inc();
None
}
Some((value, _)) => {
self.lookups.with_label_values(&["hit"]).inc();
Some(value)
}
}
pub(crate) fn add_tx(&self, txid: Txid, f: impl FnOnce() -> Transaction) {
self.txs.write().unwrap().entry(txid).or_insert_with(f);
}
fn put(&mut self, key: K, value: V, byte_size: usize) {
if byte_size > self.bytes_capacity {
return;
}
if let Some((_, popped_size)) = self.map.put(key, (value, byte_size)) {
self.bytes_usage -= popped_size
}
self.bytes_usage += byte_size;
while self.bytes_usage > self.bytes_capacity {
match self.map.pop_lru() {
Some((_, (_, popped_size))) => self.bytes_usage -= popped_size,
None => break,
}
}
self.usage.set(self.bytes_usage as i64);
}
}
pub struct BlockTxIDsCache {
map: Mutex<SizedLruCache<BlockHash, Vec<Txid>>>,
}
impl BlockTxIDsCache {
pub fn new(bytes_capacity: usize, metrics: &Metrics) -> BlockTxIDsCache {
let lookups = metrics.counter_vec(
MetricOpts::new(
"electrs_blocktxids_cache",
"# of cache lookups for list of transactions in a block",
),
&["type"],
);
let usage = metrics.gauge_int(MetricOpts::new(
"electrs_blocktxids_cache_size",
"Cache usage for list of transactions in a block (bytes)",
));
BlockTxIDsCache {
map: Mutex::new(SizedLruCache::new(bytes_capacity, lookups, usage)),
}
}
pub fn get_or_else<F>(&self, blockhash: &BlockHash, load_txids_func: F) -> Result<Vec<Txid>>
pub(crate) fn get_tx<F, T>(&self, txid: &Txid, f: F) -> Option<T>
where
F: FnOnce() -> Result<Vec<Txid>>,
F: FnOnce(&Transaction) -> T,
{
if let Some(txids) = self.map.lock().unwrap().get(blockhash) {
return Ok(txids.clone());
self.txs.read().unwrap().get(txid).map(f)
}
let txids = load_txids_func()?;
let byte_size = 32 /* hash size */ * (1 /* key */ + txids.len() /* values */);
self.map
.lock()
.unwrap()
.put(*blockhash, txids.clone(), byte_size);
Ok(txids)
}
}
pub struct TransactionCache {
// Store serialized transaction (should use less RAM).
map: Mutex<SizedLruCache<Txid, Vec<u8>>>,
}
impl TransactionCache {
pub fn new(bytes_capacity: usize, metrics: &Metrics) -> TransactionCache {
let lookups = metrics.counter_vec(
MetricOpts::new(
"electrs_transactions_cache",
"# of cache lookups for transactions",
),
&["type"],
);
let usage = metrics.gauge_int(MetricOpts::new(
"electrs_transactions_cache_size",
"Cache usage for list of transactions (bytes)",
));
TransactionCache {
map: Mutex::new(SizedLruCache::new(bytes_capacity, lookups, usage)),
}
}
pub fn get_or_else<F>(&self, txid: &Txid, load_txn_func: F) -> Result<Transaction>
pub(crate) fn add_proof<F>(&self, blockhash: BlockHash, txid: Txid, f: F)
where
F: FnOnce() -> Result<Vec<u8>>,
F: FnOnce() -> Proof,
{
if let Some(serialized_txn) = self.map.lock().unwrap().get(txid) {
return Ok(deserialize(&serialized_txn).chain_err(|| "failed to parse cached tx")?);
}
let serialized_txn = load_txn_func()?;
let txn = deserialize(&serialized_txn).chain_err(|| "failed to parse serialized tx")?;
let byte_size = 32 /* key (hash size) */ + serialized_txn.len();
self.map
.lock()
self.proofs
.write()
.unwrap()
.put(*txid, serialized_txn, byte_size);
Ok(txn)
}
.entry((blockhash, txid))
.or_insert_with(f);
}
#[cfg(test)]
mod tests {
use super::*;
use bitcoin::hashes::Hash;
#[test]
fn test_sized_lru_cache_hit_and_miss() {
let counter = CounterVec::new(prometheus::Opts::new("name", "help"), &["type"]).unwrap();
let usage = IntGauge::new("usage", "help").unwrap();
let mut cache = SizedLruCache::<i8, i32>::new(100, counter.clone(), usage.clone());
assert_eq!(counter.with_label_values(&["miss"]).get(), 0);
assert_eq!(counter.with_label_values(&["hit"]).get(), 0);
assert_eq!(usage.get(), 0);
assert_eq!(cache.get(&1), None); // no such key
assert_eq!(counter.with_label_values(&["miss"]).get(), 1);
assert_eq!(counter.with_label_values(&["hit"]).get(), 0);
assert_eq!(usage.get(), 0);
cache.put(1, 10, 50); // add new key-value
assert_eq!(cache.get(&1), Some(&10));
assert_eq!(counter.with_label_values(&["miss"]).get(), 1);
assert_eq!(counter.with_label_values(&["hit"]).get(), 1);
assert_eq!(usage.get(), 50);
cache.put(3, 30, 50); // drop oldest key (1)
cache.put(2, 20, 50);
assert_eq!(cache.get(&1), None);
assert_eq!(cache.get(&2), Some(&20));
assert_eq!(cache.get(&3), Some(&30));
assert_eq!(counter.with_label_values(&["miss"]).get(), 2);
assert_eq!(counter.with_label_values(&["hit"]).get(), 3);
assert_eq!(usage.get(), 100);
cache.put(3, 33, 50); // replace existing value
assert_eq!(cache.get(&1), None);
assert_eq!(cache.get(&2), Some(&20));
assert_eq!(cache.get(&3), Some(&33));
assert_eq!(counter.with_label_values(&["miss"]).get(), 3);
assert_eq!(counter.with_label_values(&["hit"]).get(), 5);
assert_eq!(usage.get(), 100);
cache.put(9, 90, 9999); // larger than cache capacity, don't drop the cache
assert_eq!(cache.get(&1), None);
assert_eq!(cache.get(&2), Some(&20));
assert_eq!(cache.get(&3), Some(&33));
assert_eq!(cache.get(&9), None);
assert_eq!(counter.with_label_values(&["miss"]).get(), 5);
assert_eq!(counter.with_label_values(&["hit"]).get(), 7);
assert_eq!(usage.get(), 100);
}
fn gen_hash<T: Hash>(seed: u8) -> T {
let bytes: Vec<u8> = (seed..seed + 32).collect();
<T as Hash>::hash(&bytes[..])
}
#[test]
fn test_blocktxids_cache_hit_and_miss() {
let block1: BlockHash = gen_hash(1);
let block2: BlockHash = gen_hash(2);
let block3: BlockHash = gen_hash(3);
let txids: Vec<Txid> = vec![gen_hash(4), gen_hash(5)];
let misses: Mutex<usize> = Mutex::new(0);
let miss_func = || {
*misses.lock().unwrap() += 1;
Ok(txids.clone())
};
let dummy_metrics = Metrics::new("127.0.0.1:60000".parse().unwrap());
// 200 bytes ~ 32 (bytes/hash) * (1 key hash + 2 value hashes) * 2 txns
let cache = BlockTxIDsCache::new(200, &dummy_metrics);
// cache miss
let result = cache.get_or_else(&block1, &miss_func).unwrap();
assert_eq!(1, *misses.lock().unwrap());
assert_eq!(txids, result);
// cache hit
let result = cache.get_or_else(&block1, &miss_func).unwrap();
assert_eq!(1, *misses.lock().unwrap());
assert_eq!(txids, result);
// cache size is 200, test that blockhash1 falls out of cache
cache.get_or_else(&block2, &miss_func).unwrap();
assert_eq!(2, *misses.lock().unwrap());
cache.get_or_else(&block3, &miss_func).unwrap();
assert_eq!(3, *misses.lock().unwrap());
cache.get_or_else(&block1, &miss_func).unwrap();
assert_eq!(4, *misses.lock().unwrap());
// cache hits
cache.get_or_else(&block3, &miss_func).unwrap();
cache.get_or_else(&block1, &miss_func).unwrap();
assert_eq!(4, *misses.lock().unwrap());
}
#[test]
fn test_txn_cache() {
use hex;
let dummy_metrics = Metrics::new("127.0.0.1:60000".parse().unwrap());
let cache = TransactionCache::new(1024, &dummy_metrics);
let tx_bytes = hex::decode("0100000001a15d57094aa7a21a28cb20b59aab8fc7d1149a3bdbcddba9c622e4f5f6a99ece010000006c493046022100f93bb0e7d8db7bd46e40132d1f8242026e045f03a0efe71bbb8e3f475e970d790221009337cd7f1f929f00cc6ff01f03729b069a7c21b59b1736ddfee5db5946c5da8c0121033b9b137ee87d5a812d6f506efdd37f0affa7ffc310711c06c7f3e097c9447c52ffffffff0100e1f505000000001976a9140389035a9225b3839e2bbf32d826a1e222031fd888ac00000000").unwrap();
let tx: Transaction = deserialize(&tx_bytes).unwrap();
let txid = tx.txid();
let mut misses = 0;
assert_eq!(
cache
.get_or_else(&txid, || {
misses += 1;
Ok(tx_bytes.clone())
})
.unwrap(),
tx
);
assert_eq!(misses, 1);
assert_eq!(
cache
.get_or_else(&txid, || panic!("should not be called"))
.unwrap(),
tx
);
assert_eq!(misses, 1);
pub(crate) fn get_proof<F, T>(&self, blockhash: BlockHash, txid: Txid, f: F) -> Option<T>
where
F: FnOnce(&Proof) -> T,
{
self.proofs.read().unwrap().get(&(blockhash, txid)).map(f)
}
}

130
src/chain.rs Normal file
View File

@ -0,0 +1,130 @@
use std::collections::HashMap;
use bitcoin::consensus::deserialize;
use bitcoin::hashes::hex::FromHex;
use bitcoin::network::constants;
use bitcoin::{BlockHash, BlockHeader};
pub(crate) struct NewHeader {
header: BlockHeader,
hash: BlockHash,
height: usize,
}
impl NewHeader {
pub(crate) fn from((header, height): (BlockHeader, usize)) -> Self {
Self {
header,
hash: header.block_hash(),
height,
}
}
pub(crate) fn height(&self) -> usize {
self.height
}
pub(crate) fn hash(&self) -> BlockHash {
self.hash
}
}
/// Curent blockchain headers' list
pub struct Chain {
headers: Vec<(BlockHash, BlockHeader)>,
heights: HashMap<BlockHash, usize>,
}
impl Chain {
pub fn new(network: constants::Network) -> Self {
let genesis_header_hex = match network {
constants::Network::Bitcoin => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c",
constants::Network::Testnet => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff001d1aa4ae18",
constants::Network::Regtest => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4adae5494dffff7f2002000000",
constants::Network::Signet => "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a008f4d5fae77031e8ad22203",
};
let genesis_header_bytes = Vec::from_hex(genesis_header_hex).unwrap();
let genesis: BlockHeader = deserialize(&genesis_header_bytes).unwrap();
assert_eq!(genesis.prev_blockhash, BlockHash::default());
Self {
headers: vec![(genesis.block_hash(), genesis)],
heights: std::iter::once((genesis.block_hash(), 0)).collect(),
}
}
pub(crate) fn load(&mut self, headers: Vec<BlockHeader>, tip: BlockHash) {
let genesis_hash = self.headers[0].0;
let mut header_map: HashMap<BlockHash, BlockHeader> =
headers.into_iter().map(|h| (h.block_hash(), h)).collect();
let mut blockhash = tip;
let mut new_headers = vec![];
while blockhash != genesis_hash {
let header = match header_map.remove(&blockhash) {
Some(header) => header,
None => panic!("missing header {} while loading from DB", blockhash),
};
blockhash = header.prev_blockhash;
new_headers.push(header);
}
info!("loading {} headers, tip={}", new_headers.len(), tip);
let new_headers = new_headers.into_iter().rev(); // order by height
self.update(new_headers.zip(1..).map(NewHeader::from).collect())
}
pub(crate) fn get_block_hash(&self, height: usize) -> Option<BlockHash> {
self.headers.get(height).map(|(hash, _header)| *hash)
}
pub(crate) fn get_block_header(&self, height: usize) -> Option<&BlockHeader> {
self.headers.get(height).map(|(_hash, header)| header)
}
pub(crate) fn get_block_height(&self, blockhash: &BlockHash) -> Option<usize> {
self.heights.get(blockhash).copied()
}
pub(crate) fn update(&mut self, headers: Vec<NewHeader>) {
if let Some(first_height) = headers.first().map(|h| h.height) {
for (hash, _header) in self.headers.drain(first_height..) {
assert!(self.heights.remove(&hash).is_some());
}
for (h, height) in headers.into_iter().zip(first_height..) {
assert_eq!(h.height, height);
assert_eq!(h.hash, h.header.block_hash());
assert!(self.heights.insert(h.hash, h.height).is_none());
self.headers.push((h.hash, h.header));
}
info!(
"chain updated: tip={}, height={}",
self.headers.last().unwrap().0,
self.headers.len() - 1
);
}
}
pub(crate) fn tip(&self) -> BlockHash {
self.headers.last().expect("empty chain").0
}
pub(crate) fn height(&self) -> usize {
self.headers.len() - 1
}
pub(crate) fn locator(&self) -> Vec<BlockHash> {
let mut result = vec![];
let mut index = self.headers.len() - 1;
let mut step = 1;
loop {
if result.len() >= 10 {
step *= 2;
}
result.push(self.headers[index].0);
if index == 0 {
break;
}
index = index.saturating_sub(step);
}
result
}
}

View File

@ -1,18 +1,14 @@
use bitcoin::network::constants::Network;
use dirs::home_dir;
use std::convert::TryInto;
use dirs_next::home_dir;
use std::ffi::{OsStr, OsString};
use std::fmt;
use std::fs;
use std::net::SocketAddr;
use std::net::ToSocketAddrs;
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use crate::daemon::CookieGetter;
use crate::errors::*;
use std::time::Duration;
const DEFAULT_SERVER_ADDRESS: [u8; 4] = [127, 0, 0, 1]; // by default, serve on IPv4 localhost
@ -109,7 +105,7 @@ impl FromStr for BitcoinNetwork {
impl ::configure_me::parse_arg::ParseArgFromStr for BitcoinNetwork {
fn describe_type<W: fmt::Write>(mut writer: W) -> std::fmt::Result {
write!(writer, "either 'bitcoin', 'testnet' or 'regtest'")
write!(writer, "either 'bitcoin', 'testnet', 'regtest' or 'signet'")
}
}
@ -120,25 +116,21 @@ impl Into<Network> for BitcoinNetwork {
}
/// Parsed and post-processed configuration
#[derive(Debug)]
pub struct Config {
// See below for the documentation of each field:
pub log: stderrlog::StdErrLog,
pub network_type: Network,
pub network: Network,
pub db_path: PathBuf,
pub daemon_dir: PathBuf,
pub blocks_dir: PathBuf,
pub daemon_cookie_file: PathBuf,
pub daemon_rpc_addr: SocketAddr,
pub daemon_p2p_addr: SocketAddr,
pub electrum_rpc_addr: SocketAddr,
pub monitoring_addr: SocketAddr,
pub jsonrpc_import: bool,
pub wait_duration: Duration,
pub index_batch_size: usize,
pub bulk_index_threads: usize,
pub tx_cache_size: usize,
pub txid_limit: usize,
pub server_banner: String,
pub blocktxids_cache_size: usize,
pub cookie_getter: Arc<dyn CookieGetter>,
pub args: Vec<String>,
}
/// Returns default daemon directory
@ -151,26 +143,6 @@ fn default_daemon_dir() -> PathBuf {
home
}
fn default_blocks_dir(daemon_dir: &Path) -> PathBuf {
daemon_dir.join("blocks")
}
fn create_cookie_getter(
cookie: Option<String>,
cookie_file: Option<PathBuf>,
daemon_dir: &Path,
) -> Arc<dyn CookieGetter> {
match (cookie, cookie_file) {
(None, None) => Arc::new(CookieFile::from_daemon_dir(daemon_dir)),
(None, Some(file)) => Arc::new(CookieFile::from_file(file)),
(Some(cookie), None) => Arc::new(StaticCookie::from_string(cookie)),
(Some(_), Some(_)) => {
eprintln!("Error: ambigous configuration - cookie and cookie_file can't be specified at the same time");
std::process::exit(1);
}
}
}
impl Config {
/// Parses args, env vars, config files and post-processes them
pub fn from_args() -> Config {
@ -186,36 +158,49 @@ impl Config {
.chain(home_config.as_ref().map(AsRef::as_ref))
.chain(std::iter::once(system_config));
let (mut config, _) =
let (mut config, args) =
internal::Config::including_optional_config_files(configs).unwrap_or_exit();
let db_subdir = match config.network {
// We must keep the name "mainnet" due to backwards compatibility
Network::Bitcoin => "mainnet",
Network::Bitcoin => "bitcoin",
Network::Testnet => "testnet",
Network::Regtest => "regtest",
Network::Signet => "signet",
};
config.db_dir.push(db_subdir);
let default_daemon_port = match config.network {
let default_daemon_rpc_port = match config.network {
Network::Bitcoin => 8332,
Network::Testnet => 18332,
Network::Regtest => 18443,
Network::Signet => 38332,
};
let default_daemon_p2p_port = match config.network {
Network::Bitcoin => 8333,
Network::Testnet => 18333,
Network::Regtest => 18444,
Network::Signet => 38333,
};
let default_electrum_port = match config.network {
Network::Bitcoin => 50001,
Network::Testnet => 60001,
Network::Regtest => 60401,
Network::Signet => 60601,
};
let default_monitoring_port = match config.network {
Network::Bitcoin => 4224,
Network::Testnet => 14224,
Network::Regtest => 24224,
Network::Signet => 34224,
};
let daemon_rpc_addr: SocketAddr = config.daemon_rpc_addr.map_or(
(DEFAULT_SERVER_ADDRESS, default_daemon_port).into(),
(DEFAULT_SERVER_ADDRESS, default_daemon_rpc_port).into(),
ResolvAddr::resolve_or_exit,
);
let daemon_p2p_addr: SocketAddr = config.daemon_p2p_addr.map_or(
(DEFAULT_SERVER_ADDRESS, default_daemon_p2p_port).into(),
ResolvAddr::resolve_or_exit,
);
let electrum_rpc_addr: SocketAddr = config.electrum_rpc_addr.map_or(
@ -231,140 +216,31 @@ impl Config {
Network::Bitcoin => (),
Network::Testnet => config.daemon_dir.push("testnet3"),
Network::Regtest => config.daemon_dir.push("regtest"),
Network::Signet => config.daemon_dir.push("signet"),
}
let daemon_dir = &config.daemon_dir;
let blocks_dir = config
.blocks_dir
.unwrap_or_else(|| default_blocks_dir(daemon_dir));
let daemon_cookie_file = daemon_dir.join(".cookie");
let cookie_getter = create_cookie_getter(config.cookie, config.cookie_file, daemon_dir);
let mut log = stderrlog::new();
log.verbosity(
config
.verbose
.try_into()
.expect("Overflow: Running electrs on less than 32 bit devices is unsupported"),
);
log.timestamp(if config.timestamp {
stderrlog::Timestamp::Millisecond
} else {
stderrlog::Timestamp::Off
});
log.init().unwrap_or_else(|err| {
eprintln!("Error: logging initialization failed: {}", err);
std::process::exit(1)
});
// Could have been default, but it's useful to allow the user to specify 0 when overriding
// configs.
if config.bulk_index_threads == 0 {
config.bulk_index_threads = num_cpus::get();
}
const MB: f32 = (1 << 20) as f32;
let config = Config {
log,
network_type: config.network,
network: config.network,
db_path: config.db_dir,
daemon_dir: config.daemon_dir,
blocks_dir,
daemon_cookie_file,
daemon_rpc_addr,
daemon_p2p_addr,
electrum_rpc_addr,
monitoring_addr,
jsonrpc_import: config.jsonrpc_import,
wait_duration: Duration::from_secs(config.wait_duration_secs),
index_batch_size: config.index_batch_size,
bulk_index_threads: config.bulk_index_threads,
tx_cache_size: (config.tx_cache_size_mb * MB) as usize,
blocktxids_cache_size: (config.blocktxids_cache_size_mb * MB) as usize,
txid_limit: config.txid_limit,
server_banner: config.server_banner,
cookie_getter,
args: args.map(|a| a.into_string().unwrap()).collect(),
};
eprintln!("{:?}", config);
env_logger::Builder::from_default_env()
.default_format()
.format_timestamp_millis()
.init();
config
}
pub fn cookie_getter(&self) -> Arc<dyn CookieGetter> {
Arc::clone(&self.cookie_getter)
}
}
// CookieGetter + Debug isn't implemented in Rust, so we have to skip cookie_getter
macro_rules! debug_struct {
($name:ty, $($field:ident,)*) => {
impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct(stringify!($name))
$(
.field(stringify!($field), &self.$field)
)*
.finish()
}
}
}
}
debug_struct! { Config,
log,
network_type,
db_path,
daemon_dir,
blocks_dir,
daemon_rpc_addr,
electrum_rpc_addr,
monitoring_addr,
jsonrpc_import,
index_batch_size,
bulk_index_threads,
tx_cache_size,
txid_limit,
server_banner,
blocktxids_cache_size,
}
struct StaticCookie {
value: Vec<u8>,
}
impl StaticCookie {
fn from_string(value: String) -> Self {
StaticCookie {
value: value.into(),
}
}
}
impl CookieGetter for StaticCookie {
fn get(&self) -> Result<Vec<u8>> {
Ok(self.value.clone())
}
}
struct CookieFile {
cookie_file: PathBuf,
}
impl CookieFile {
fn from_daemon_dir(daemon_dir: &Path) -> Self {
CookieFile {
cookie_file: daemon_dir.join(".cookie"),
}
}
fn from_file(cookie_file: PathBuf) -> Self {
CookieFile { cookie_file }
}
}
impl CookieGetter for CookieFile {
fn get(&self) -> Result<Vec<u8>> {
let contents = fs::read(&self.cookie_file).chain_err(|| {
ErrorKind::Connection(format!(
"failed to read cookie from {}",
self.cookie_file.display()
))
})?;
Ok(contents)
}
}

View File

@ -1,666 +1,298 @@
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::consensus::encode::{deserialize, serialize};
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::network::constants::Network;
use bitcoin::hashes::hex::{FromHex, ToHex};
use bitcoin::hashes::Hash;
use serde_json::{from_str, from_value, Map, Value};
use std::collections::{HashMap, HashSet};
use std::io::{BufRead, BufReader, Lines, Write};
use std::net::{SocketAddr, TcpStream};
use std::path::PathBuf;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use anyhow::{Context, Result};
use crate::cache::BlockTxIDsCache;
use crate::errors::*;
use crate::metrics::{HistogramOpts, HistogramVec, Metrics};
use crate::signal::Waiter;
use crate::util::HeaderList;
use std::io::Write;
use std::iter::FromIterator;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream};
use std::sync::Mutex;
use std::time::{SystemTime, UNIX_EPOCH};
fn parse_hash<T: Hash>(value: &Value) -> Result<T> {
Ok(T::from_hex(
value
.as_str()
.chain_err(|| format!("non-string value: {}", value))?,
)
.chain_err(|| format!("non-hex value: {}", value))?)
}
use bitcoin::consensus::encode;
use bitcoin::network::stream_reader::StreamReader;
use bitcoin::network::{
address, constants,
message::{self, NetworkMessage},
message_blockdata::{GetHeadersMessage, Inventory},
message_network,
};
use bitcoin::secp256k1;
use bitcoin::secp256k1::rand::Rng;
use bitcoin::{Amount, Block, BlockHash, Network, Transaction, Txid};
use bitcoincore_rpc::{self, json, RpcApi};
fn header_from_value(value: Value) -> Result<BlockHeader> {
let header_hex = value
.as_str()
.chain_err(|| format!("non-string header: {}", value))?;
let header_bytes = hex::decode(header_hex).chain_err(|| "non-hex header")?;
Ok(
deserialize(&header_bytes)
.chain_err(|| format!("failed to parse header {}", header_hex))?,
)
}
fn block_from_value(value: Value) -> Result<Block> {
let block_hex = value.as_str().chain_err(|| "non-string block")?;
let block_bytes = hex::decode(block_hex).chain_err(|| "non-hex block")?;
Ok(deserialize(&block_bytes).chain_err(|| format!("failed to parse block {}", block_hex))?)
}
fn tx_from_value(value: Value) -> Result<Transaction> {
let tx_hex = value.as_str().chain_err(|| "non-string tx")?;
let tx_bytes = hex::decode(tx_hex).chain_err(|| "non-hex tx")?;
Ok(deserialize(&tx_bytes).chain_err(|| format!("failed to parse tx {}", tx_hex))?)
}
/// Parse JSONRPC error code, if exists.
fn parse_error_code(err: &Value) -> Option<i64> {
if err.is_null() {
return None;
}
err.as_object()?.get("code")?.as_i64()
}
fn check_error_code(reply_obj: &Map<String, Value>, method: &str) -> Result<()> {
if let Some(err) = reply_obj.get("error") {
if let Some(code) = parse_error_code(&err) {
match code {
// RPC_IN_WARMUP -> retry by later reconnection
-28 => bail!(ErrorKind::Connection(err.to_string())),
_ => bail!("{} RPC error: {}", method, err),
}
}
}
Ok(())
}
fn parse_jsonrpc_reply(mut reply: Value, method: &str, expected_id: u64) -> Result<Value> {
if let Some(reply_obj) = reply.as_object_mut() {
check_error_code(reply_obj, method)?;
let id = reply_obj
.get("id")
.chain_err(|| format!("no id in reply: {:?}", reply_obj))?
.clone();
if id != expected_id {
bail!(
"wrong {} response id {}, expected {}",
method,
id,
expected_id
);
}
if let Some(result) = reply_obj.get_mut("result") {
return Ok(result.take());
}
bail!("no result in reply: {:?}", reply_obj);
}
bail!("non-object reply: {:?}", reply);
}
#[derive(Serialize, Deserialize, Debug)]
struct BlockchainInfo {
chain: String,
blocks: u32,
headers: u32,
verificationprogress: f64,
bestblockhash: String,
pruned: bool,
initialblockdownload: bool,
}
#[derive(Serialize, Deserialize, Debug)]
struct NetworkInfo {
version: u64,
subversion: String,
relayfee: f64, // in BTC
}
pub struct MempoolEntry {
fee: u64, // in satoshis
vsize: u32, // in virtual bytes (= weight/4)
fee_per_vbyte: f32,
}
impl MempoolEntry {
fn new(fee: u64, vsize: u32) -> MempoolEntry {
MempoolEntry {
fee,
vsize,
fee_per_vbyte: fee as f32 / vsize as f32,
}
}
pub fn fee_per_vbyte(&self) -> f32 {
self.fee_per_vbyte
}
pub fn fee(&self) -> u64 {
self.fee
}
pub fn vsize(&self) -> u32 {
self.vsize
}
}
pub trait CookieGetter: Send + Sync {
fn get(&self) -> Result<Vec<u8>>;
}
use crate::{
chain::{Chain, NewHeader},
config::Config,
};
struct Connection {
tx: TcpStream,
rx: Lines<BufReader<TcpStream>>,
cookie_getter: Arc<dyn CookieGetter>,
addr: SocketAddr,
signal: Waiter,
}
fn tcp_connect(addr: SocketAddr, signal: &Waiter) -> Result<TcpStream> {
loop {
match TcpStream::connect(addr) {
Ok(conn) => return Ok(conn),
Err(err) => {
warn!("failed to connect daemon at {}: {}", addr, err);
signal.wait(Duration::from_secs(3))?;
continue;
}
}
}
stream: TcpStream,
reader: StreamReader<TcpStream>,
network: Network,
}
impl Connection {
fn new(
addr: SocketAddr,
cookie_getter: Arc<dyn CookieGetter>,
signal: Waiter,
) -> Result<Connection> {
let conn = tcp_connect(addr, &signal)?;
let reader = BufReader::new(
conn.try_clone()
.chain_err(|| format!("failed to clone {:?}", conn))?,
pub fn connect(network: Network, address: SocketAddr) -> Result<Self> {
let stream = TcpStream::connect(address)
.with_context(|| format!("{} p2p failed to connect: {:?}", network, address))?;
let reader = StreamReader::new(
stream.try_clone().context("stream failed to clone")?,
/*buffer_size*/ Some(1 << 20),
);
Ok(Connection {
tx: conn,
rx: reader.lines(),
cookie_getter,
addr,
signal,
})
let mut conn = Self {
stream,
reader,
network,
};
conn.send(build_version_message())?;
if let NetworkMessage::GetHeaders(_) = conn.recv()? {
conn.send(NetworkMessage::Headers(vec![]))?;
}
Ok(conn)
}
fn reconnect(&self) -> Result<Connection> {
Connection::new(self.addr, self.cookie_getter.clone(), self.signal.clone())
fn send(&mut self, msg: NetworkMessage) -> Result<()> {
trace!("send: {:?}", msg);
let raw_msg = message::RawNetworkMessage {
magic: self.network.magic(),
payload: msg,
};
self.stream
.write_all(encode::serialize(&raw_msg).as_slice())
.context("p2p failed to send")
}
fn send(&mut self, request: &str) -> Result<()> {
let cookie = &self.cookie_getter.get()?;
let msg = format!(
"POST / HTTP/1.1\nAuthorization: Basic {}\nContent-Length: {}\n\n{}",
base64::encode(cookie),
request.len(),
request,
fn recv(&mut self) -> Result<NetworkMessage> {
loop {
let raw_msg: message::RawNetworkMessage =
self.reader.read_next().context("p2p failed to recv")?;
trace!("recv: {:?}", raw_msg.payload);
match raw_msg.payload {
NetworkMessage::Version(version) => {
trace!("peer version: {:?}", version);
self.send(NetworkMessage::Verack)?;
}
NetworkMessage::Ping(nonce) => {
self.send(NetworkMessage::Pong(nonce))?;
}
NetworkMessage::Verack
| NetworkMessage::Alert(_)
| NetworkMessage::Addr(_)
| NetworkMessage::Inv(_) => {}
payload => return Ok(payload),
};
}
}
}
pub(crate) fn rpc_connect(config: &Config) -> Result<bitcoincore_rpc::Client> {
let rpc_url = format!("http://{}", config.daemon_rpc_addr);
if !config.daemon_cookie_file.exists() {
bail!("{:?} is missing", config.daemon_cookie_file);
}
let rpc_auth = bitcoincore_rpc::Auth::CookieFile(config.daemon_cookie_file.clone());
let rpc = bitcoincore_rpc::Client::new(rpc_url, rpc_auth)
.with_context(|| format!("failed to connect to RPC: {}", config.daemon_rpc_addr))?;
use bitcoincore_rpc::{
jsonrpc::error::Error::Rpc as ServerError, Error::JsonRpc as JsonRpcError,
};
loop {
match rpc.get_blockchain_info() {
Ok(info) => {
if info.blocks < info.headers {
info!(
"waiting for {} blocks to download",
info.headers - info.blocks
);
self.tx.write_all(msg.as_bytes()).chain_err(|| {
ErrorKind::Connection("disconnected from daemon while sending".to_owned())
})
}
fn recv(&mut self) -> Result<String> {
// TODO: use proper HTTP parser.
let mut in_header = true;
let mut contents: Option<String> = None;
let iter = self.rx.by_ref();
let status = iter
.next()
.chain_err(|| {
ErrorKind::Connection("disconnected from daemon while receiving".to_owned())
})?
.chain_err(|| "failed to read status")?;
let mut headers = HashMap::new();
for line in iter {
let line = line.chain_err(|| ErrorKind::Connection("failed to read".to_owned()))?;
if line.is_empty() {
in_header = false; // next line should contain the actual response.
} else if in_header {
let parts: Vec<&str> = line.splitn(2, ": ").collect();
if parts.len() == 2 {
headers.insert(parts[0].to_owned(), parts[1].to_owned());
} else {
warn!("invalid header: {:?}", line);
}
} else {
contents = Some(line);
break;
std::thread::sleep(std::time::Duration::from_secs(1));
continue;
}
}
let contents =
contents.chain_err(|| ErrorKind::Connection("no reply from daemon".to_owned()))?;
let contents_length: &str = headers
.get("Content-Length")
.chain_err(|| format!("Content-Length is missing: {:?}", headers))?;
let contents_length: usize = contents_length
.parse()
.chain_err(|| format!("invalid Content-Length: {:?}", contents_length))?;
let expected_length = contents_length - 1; // trailing EOL is skipped
if expected_length != contents.len() {
bail!(ErrorKind::Connection(format!(
"expected {} bytes, got {}",
expected_length,
contents.len()
)));
}
Ok(if status == "HTTP/1.1 200 OK" {
contents
} else if status == "HTTP/1.1 500 Internal Server Error" {
warn!("HTTP status: {}", status);
contents // the contents should have a JSONRPC error field
} else {
bail!(
"request failed {:?}: {:?} = {:?}",
status,
headers,
contents
);
})
Err(err) => {
if let JsonRpcError(ServerError(ref e)) = err {
if e.code == -28 {
info!("waiting for RPC warmup: {}", e.message);
std::thread::sleep(std::time::Duration::from_secs(1));
continue;
}
}
struct Counter {
value: AtomicU64,
return Err(err).context("daemon not available");
}
impl Counter {
fn new() -> Self {
Counter { value: 0.into() }
}
fn next(&self) -> u64 {
// fetch_add() returns previous value, we want current one
self.value.fetch_add(1, Ordering::Relaxed) + 1
return Ok(rpc);
}
}
pub struct Daemon {
daemon_dir: PathBuf,
blocks_dir: PathBuf,
network: Network,
conn: Mutex<Connection>,
message_id: Counter, // for monotonic JSONRPC 'id'
signal: Waiter,
blocktxids_cache: Arc<BlockTxIDsCache>,
// monitoring
latency: HistogramVec,
size: HistogramVec,
p2p: Mutex<Connection>,
rpc: bitcoincore_rpc::Client,
}
impl Daemon {
pub fn new(
daemon_dir: &PathBuf,
blocks_dir: &PathBuf,
daemon_rpc_addr: SocketAddr,
cookie_getter: Arc<dyn CookieGetter>,
network: Network,
signal: Waiter,
blocktxids_cache: Arc<BlockTxIDsCache>,
metrics: &Metrics,
) -> Result<Daemon> {
let daemon = Daemon {
daemon_dir: daemon_dir.clone(),
blocks_dir: blocks_dir.clone(),
network,
conn: Mutex::new(Connection::new(
daemon_rpc_addr,
cookie_getter,
signal.clone(),
)?),
message_id: Counter::new(),
blocktxids_cache,
signal: signal.clone(),
latency: metrics.histogram_vec(
HistogramOpts::new("electrs_daemon_rpc", "Bitcoind RPC latency (in seconds)"),
&["method"],
),
// TODO: use better buckets (e.g. 1 byte to 10MB).
size: metrics.histogram_vec(
HistogramOpts::new("electrs_daemon_bytes", "Bitcoind RPC size (in bytes)"),
&["method", "dir"],
),
};
let network_info = daemon.getnetworkinfo()?;
info!("{:?}", network_info);
if network_info.version < 16_00_00 {
bail!(
"{} is not supported - please use bitcoind 0.16+",
network_info.subversion,
)
pub fn connect(config: &Config) -> Result<Self> {
let rpc = rpc_connect(config)?;
let network_info = rpc.get_network_info()?;
if network_info.version < 21_00_00 {
bail!("electrs requires bitcoind 0.21+");
}
let blockchain_info = daemon.getblockchaininfo()?;
info!("{:?}", blockchain_info);
if !network_info.network_active {
bail!("electrs requires active bitcoind p2p network");
}
let blockchain_info = rpc.get_blockchain_info()?;
if blockchain_info.pruned {
bail!("pruned node is not supported (use '-prune=0' bitcoind flag)".to_owned())
bail!("electrs requires non-pruned bitcoind node");
}
loop {
let info = daemon.getblockchaininfo()?;
if !info.initialblockdownload {
break;
}
if network == Network::Regtest && info.headers == info.blocks {
break;
}
warn!(
"wait until IBD is over: headers={} blocks={} progress={}",
info.headers, info.blocks, info.verificationprogress
);
signal.wait(Duration::from_secs(3))?;
}
Ok(daemon)
let p2p = Mutex::new(Connection::connect(config.network, config.daemon_p2p_addr)?);
Ok(Self { p2p, rpc })
}
pub fn reconnect(&self) -> Result<Daemon> {
Ok(Daemon {
daemon_dir: self.daemon_dir.clone(),
blocks_dir: self.blocks_dir.clone(),
network: self.network,
conn: Mutex::new(self.conn.lock().unwrap().reconnect()?),
message_id: Counter::new(),
signal: self.signal.clone(),
blocktxids_cache: Arc::clone(&self.blocktxids_cache),
latency: self.latency.clone(),
size: self.size.clone(),
})
pub(crate) fn estimate_fee(&self, nblocks: u16) -> Result<Option<Amount>> {
Ok(self
.rpc
.estimate_smart_fee(nblocks, None)
.context("failed to estimate fee")?
.fee_rate)
}
pub fn list_blk_files(&self) -> Result<Vec<PathBuf>> {
let path = self.blocks_dir.join("blk*.dat");
info!("listing block files at {:?}", path);
let mut paths: Vec<PathBuf> = glob::glob(path.to_str().unwrap())
.chain_err(|| "failed to list blk*.dat files")?
.map(std::result::Result::unwrap)
.collect();
paths.sort();
Ok(paths)
pub(crate) fn get_relay_fee(&self) -> Result<Amount> {
Ok(self
.rpc
.get_network_info()
.context("failed to get relay fee")?
.relay_fee)
}
pub fn magic(&self) -> u32 {
self.network.magic()
pub(crate) fn broadcast(&self, tx: &Transaction) -> Result<Txid> {
self.rpc
.send_raw_transaction(tx)
.context("failed to broadcast transaction")
}
fn call_jsonrpc(&self, method: &str, request: &Value) -> Result<Value> {
let mut conn = self.conn.lock().unwrap();
let timer = self.latency.with_label_values(&[method]).start_timer();
let request = request.to_string();
conn.send(&request)?;
self.size
.with_label_values(&[method, "send"])
.observe(request.len() as f64);
let response = conn.recv()?;
let result: Value = from_str(&response).chain_err(|| "invalid JSON")?;
timer.observe_duration();
self.size
.with_label_values(&[method, "recv"])
.observe(response.len() as f64);
Ok(result)
}
fn handle_request_batch(&self, method: &str, params_list: &[Value]) -> Result<Vec<Value>> {
let id = self.message_id.next();
let reqs = params_list
.iter()
.map(|params| json!({"method": method, "params": params, "id": id}))
.collect();
let mut results = vec![];
let mut replies = self.call_jsonrpc(method, &reqs)?;
if let Some(replies_vec) = replies.as_array_mut() {
for reply in replies_vec {
results.push(parse_jsonrpc_reply(reply.take(), method, id)?)
}
return Ok(results);
}
bail!("non-array replies: {:?}", replies);
}
fn retry_request_batch(&self, method: &str, params_list: &[Value]) -> Result<Vec<Value>> {
loop {
match self.handle_request_batch(method, params_list) {
Err(Error(ErrorKind::Connection(msg), _)) => {
warn!("reconnecting to bitcoind: {}", msg);
self.signal.wait(Duration::from_secs(3))?;
let mut conn = self.conn.lock().unwrap();
*conn = conn.reconnect()?;
continue;
}
result => return result,
}
}
}
fn request(&self, method: &str, params: Value) -> Result<Value> {
let mut values = self.retry_request_batch(method, &[params])?;
assert_eq!(values.len(), 1);
Ok(values.remove(0))
}
fn requests(&self, method: &str, params_list: &[Value]) -> Result<Vec<Value>> {
self.retry_request_batch(method, params_list)
}
// bitcoind JSONRPC API:
fn getblockchaininfo(&self) -> Result<BlockchainInfo> {
let info: Value = self.request("getblockchaininfo", json!([]))?;
Ok(from_value(info).chain_err(|| "invalid blockchain info")?)
}
fn getnetworkinfo(&self) -> Result<NetworkInfo> {
let info: Value = self.request("getnetworkinfo", json!([]))?;
Ok(from_value(info).chain_err(|| "invalid network info")?)
}
pub fn get_subversion(&self) -> Result<String> {
Ok(self.getnetworkinfo()?.subversion)
}
pub fn get_relayfee(&self) -> Result<f64> {
Ok(self.getnetworkinfo()?.relayfee)
}
pub fn getbestblockhash(&self) -> Result<BlockHash> {
parse_hash(&self.request("getbestblockhash", json!([]))?).chain_err(|| "invalid blockhash")
}
pub fn getblockheader(&self, blockhash: &BlockHash) -> Result<BlockHeader> {
header_from_value(self.request(
"getblockheader",
json!([blockhash.to_hex(), /*verbose=*/ false]),
)?)
}
pub fn getblockheaders(&self, heights: &[usize]) -> Result<Vec<BlockHeader>> {
let heights: Vec<Value> = heights.iter().map(|height| json!([height])).collect();
let params_list: Vec<Value> = self
.requests("getblockhash", &heights)?
.into_iter()
.map(|hash| json!([hash, /*verbose=*/ false]))
.collect();
let mut result = vec![];
for h in self.requests("getblockheader", &params_list)? {
result.push(header_from_value(h)?);
}
Ok(result)
}
pub fn getblock(&self, blockhash: &BlockHash) -> Result<Block> {
let block = block_from_value(
self.request("getblock", json!([blockhash.to_hex(), /*verbose=*/ false]))?,
)?;
assert_eq!(block.block_hash(), *blockhash);
Ok(block)
}
fn load_blocktxids(&self, blockhash: &BlockHash) -> Result<Vec<Txid>> {
self.request("getblock", json!([blockhash.to_hex(), /*verbose=*/ 1]))?
.get("tx")
.chain_err(|| "block missing txids")?
.as_array()
.chain_err(|| "invalid block txids")?
.iter()
.map(parse_hash)
.collect::<Result<Vec<Txid>>>()
}
pub fn getblocktxids(&self, blockhash: &BlockHash) -> Result<Vec<Txid>> {
self.blocktxids_cache
.get_or_else(&blockhash, || self.load_blocktxids(blockhash))
}
pub fn getblocks(&self, blockhashes: &[BlockHash]) -> Result<Vec<Block>> {
let params_list: Vec<Value> = blockhashes
.iter()
.map(|hash| json!([hash.to_hex(), /*verbose=*/ false]))
.collect();
let values = self.requests("getblock", &params_list)?;
let mut blocks = vec![];
for value in values {
blocks.push(block_from_value(value)?);
}
Ok(blocks)
}
pub fn gettransaction(
pub(crate) fn get_transaction_info(
&self,
txhash: &Txid,
txid: &Txid,
blockhash: Option<BlockHash>,
) -> Result<json::GetRawTransactionResult> {
self.rpc
.get_raw_transaction_info(txid, blockhash.as_ref())
.context("failed to get transaction info")
}
pub(crate) fn get_transaction_hex(
&self,
txid: &Txid,
blockhash: Option<BlockHash>,
) -> Result<json::GetRawTransactionResult> {
self.rpc
.get_raw_transaction_info(txid, blockhash.as_ref())
.context("failed to get transaction info")
}
pub(crate) fn get_transaction(
&self,
txid: &Txid,
blockhash: Option<BlockHash>,
) -> Result<Transaction> {
let mut args = json!([txhash.to_hex(), /*verbose=*/ false]);
if let Some(blockhash) = blockhash {
args.as_array_mut().unwrap().push(json!(blockhash.to_hex()));
}
tx_from_value(self.request("getrawtransaction", args)?)
self.rpc
.get_raw_transaction(txid, blockhash.as_ref())
.context("failed to get transaction")
}
pub fn gettransaction_raw(
&self,
txhash: &Txid,
blockhash: Option<BlockHash>,
verbose: bool,
) -> Result<Value> {
let mut args = json!([txhash.to_hex(), verbose]);
if let Some(blockhash) = blockhash {
args.as_array_mut().unwrap().push(json!(blockhash.to_hex()));
}
Ok(self.request("getrawtransaction", args)?)
pub(crate) fn get_block_txids(&self, blockhash: BlockHash) -> Result<Vec<Txid>> {
Ok(self
.rpc
.get_block_info(&blockhash)
.context("failed to get block txids")?
.tx)
}
pub fn gettransactions(&self, txhashes: &[&Txid]) -> Result<Vec<Transaction>> {
let params_list: Vec<Value> = txhashes
pub(crate) fn get_mempool_txids(&self) -> Result<Vec<Txid>> {
self.rpc
.get_raw_mempool()
.context("failed to get mempool txids")
}
pub(crate) fn get_mempool_entry(&self, txid: &Txid) -> Result<json::GetMempoolEntryResult> {
self.rpc
.get_mempool_entry(txid)
.context("failed to get mempool entry")
}
pub(crate) fn get_new_headers(&self, chain: &Chain) -> Result<Vec<NewHeader>> {
let mut conn = self.p2p.lock().unwrap();
let msg = GetHeadersMessage::new(chain.locator(), BlockHash::default());
conn.send(NetworkMessage::GetHeaders(msg))?;
let headers = match conn.recv()? {
NetworkMessage::Headers(headers) => headers,
msg => bail!("unexpected {:?}", msg),
};
debug!("got {} new headers", headers.len());
let prev_blockhash = match headers.first().map(|h| h.prev_blockhash) {
None => return Ok(vec![]),
Some(prev_blockhash) => prev_blockhash,
};
let new_heights = match chain.get_block_height(&prev_blockhash) {
Some(last_height) => (last_height + 1)..,
None => bail!("missing prev_blockhash: {}", prev_blockhash),
};
Ok(headers
.into_iter()
.zip(new_heights)
.map(NewHeader::from)
.collect())
}
pub(crate) fn for_blocks<B, F>(&self, blockhashes: B, mut func: F) -> Result<()>
where
B: IntoIterator<Item = BlockHash>,
F: FnMut(BlockHash, Block),
{
let mut conn = self.p2p.lock().unwrap();
let blockhashes = Vec::from_iter(blockhashes);
if blockhashes.is_empty() {
return Ok(());
}
let inv = blockhashes
.iter()
.map(|txhash| json!([txhash.to_hex(), /*verbose=*/ false]))
.map(|h| Inventory::WitnessBlock(*h))
.collect();
let values = self.requests("getrawtransaction", &params_list)?;
let mut txs = vec![];
for value in values {
txs.push(tx_from_value(value)?);
debug!("loading {} blocks", blockhashes.len());
conn.send(NetworkMessage::GetData(inv))?;
for hash in blockhashes {
match conn.recv()? {
NetworkMessage::Block(block) => {
assert_eq!(block.block_hash(), hash, "got unexpected block");
func(hash, block);
}
msg => bail!("unexpected {:?}", msg),
};
}
Ok(())
}
assert_eq!(txhashes.len(), txs.len());
Ok(txs)
}
pub fn getmempooltxids(&self) -> Result<HashSet<Txid>> {
let txids: Value = self.request("getrawmempool", json!([/*verbose=*/ false]))?;
let mut result = HashSet::new();
for value in txids.as_array().chain_err(|| "non-array result")? {
result.insert(parse_hash(&value).chain_err(|| "invalid txid")?);
}
Ok(result)
}
fn build_version_message() -> NetworkMessage {
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time error")
.as_secs() as i64;
pub fn getmempoolentry(&self, txid: &Txid) -> Result<MempoolEntry> {
let entry = self.request("getmempoolentry", json!([txid.to_hex()]))?;
let fee = (entry
.get("fee")
.chain_err(|| "missing fee")?
.as_f64()
.chain_err(|| "non-float fee")?
* 100_000_000f64) as u64;
let vsize = entry
.get("size")
.or_else(|| entry.get("vsize")) // (https://github.com/bitcoin/bitcoin/pull/15637)
.chain_err(|| "missing vsize")?
.as_u64()
.chain_err(|| "non-integer vsize")? as u32;
Ok(MempoolEntry::new(fee, vsize))
}
let services = constants::ServiceFlags::NETWORK | constants::ServiceFlags::WITNESS;
pub fn broadcast(&self, tx: &Transaction) -> Result<Txid> {
let tx = hex::encode(serialize(tx));
let txid = self.request("sendrawtransaction", json!([tx]))?;
Ok(
Txid::from_hex(txid.as_str().chain_err(|| "non-string txid")?)
.chain_err(|| "failed to parse txid")?,
)
}
fn get_all_headers(&self, tip: &BlockHash) -> Result<Vec<BlockHeader>> {
let info: Value = self.request("getblockheader", json!([tip.to_hex()]))?;
let tip_height = info
.get("height")
.expect("missing height")
.as_u64()
.expect("non-numeric height") as usize;
let all_heights: Vec<usize> = (0..=tip_height).collect();
let chunk_size = 100_000;
let mut result = vec![];
let null_hash = BlockHash::default();
for heights in all_heights.chunks(chunk_size) {
trace!("downloading {} block headers", heights.len());
let mut headers = self.getblockheaders(&heights)?;
assert!(headers.len() == heights.len());
result.append(&mut headers);
}
let mut blockhash = null_hash;
for header in &result {
assert_eq!(header.prev_blockhash, blockhash);
blockhash = header.block_hash();
}
assert_eq!(blockhash, *tip);
Ok(result)
}
// Returns a list of BlockHeaders in ascending height (i.e. the tip is last).
pub fn get_new_headers(
&self,
indexed_headers: &HeaderList,
bestblockhash: &BlockHash,
) -> Result<Vec<BlockHeader>> {
// Iterate back over headers until known blockash is found:
if indexed_headers.is_empty() {
return self.get_all_headers(bestblockhash);
}
debug!(
"downloading new block headers ({} already indexed) from {}",
indexed_headers.len(),
bestblockhash,
);
let mut new_headers = vec![];
let null_hash = BlockHash::default();
let mut blockhash = *bestblockhash;
while blockhash != null_hash {
if indexed_headers.header_by_blockhash(&blockhash).is_some() {
break;
}
let header = self
.getblockheader(&blockhash)
.chain_err(|| format!("failed to get {} header", blockhash))?;
new_headers.push(header);
blockhash = header.prev_blockhash;
}
trace!("downloaded {} block headers", new_headers.len());
new_headers.reverse(); // so the tip is the last vector entry
Ok(new_headers)
}
NetworkMessage::Version(message_network::VersionMessage {
version: constants::PROTOCOL_VERSION,
services,
timestamp,
receiver: address::Address::new(&addr, services),
sender: address::Address::new(&addr, services),
nonce: secp256k1::rand::thread_rng().gen(),
user_agent: String::from("electrs"),
start_height: 0,
relay: false,
})
}

276
src/db.rs Normal file
View File

@ -0,0 +1,276 @@
use anyhow::{Context, Result};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
pub(crate) type Row = Box<[u8]>;
#[derive(Default)]
pub(crate) struct WriteBatch {
pub(crate) tip_row: Row,
pub(crate) header_rows: Vec<Row>,
pub(crate) funding_rows: Vec<Row>,
pub(crate) spending_rows: Vec<Row>,
pub(crate) txid_rows: Vec<Row>,
}
impl WriteBatch {
pub(crate) fn sort(&mut self) {
self.header_rows.sort_unstable();
self.funding_rows.sort_unstable();
self.spending_rows.sort_unstable();
self.txid_rows.sort_unstable();
}
}
#[derive(Debug)]
struct Options {
path: PathBuf,
}
/// RocksDB wrapper for index storage
pub struct DBStore {
db: rocksdb::DB,
path: PathBuf,
bulk_import: AtomicBool,
cfs: Vec<&'static str>,
}
const CONFIG_CF: &str = "config";
const HEADERS_CF: &str = "headers";
const TXID_CF: &str = "txid";
const FUNDING_CF: &str = "funding";
const SPENDING_CF: &str = "spending";
const CONFIG_KEY: &str = "C";
const TIP_KEY: &[u8] = b"T";
#[derive(Debug, Deserialize, Serialize)]
struct Config {
compacted: bool,
format: u64,
}
const CURRENT_FORMAT: u64 = 0;
fn default_opts() -> rocksdb::Options {
let mut opts = rocksdb::Options::default();
opts.set_keep_log_file_num(10);
opts.set_max_open_files(16);
opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
opts.set_compression_type(rocksdb::DBCompressionType::Zstd);
opts.set_target_file_size_base(256 << 20);
opts.set_write_buffer_size(256 << 20);
opts.set_disable_auto_compactions(true); // for initial bulk load
opts.set_advise_random_on_open(false); // bulk load uses sequential I/O
opts.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix(8));
opts
}
impl DBStore {
/// Opens a new RocksDB at the specified location.
pub fn open(path: &Path) -> Result<Self> {
let cfs = vec![CONFIG_CF, HEADERS_CF, TXID_CF, FUNDING_CF, SPENDING_CF];
let cf_descriptors: Vec<rocksdb::ColumnFamilyDescriptor> = cfs
.iter()
.map(|&name| rocksdb::ColumnFamilyDescriptor::new(name, default_opts()))
.collect();
let mut db_opts = default_opts();
db_opts.create_if_missing(true);
db_opts.create_missing_column_families(true);
let db = rocksdb::DB::open_cf_descriptors(&db_opts, path, cf_descriptors)
.with_context(|| format!("failed to open DB: {:?}", path))?;
let live_files = db.live_files()?;
info!(
"{:?}: {} SST files, {} GB, {} Grows",
path,
live_files.len(),
live_files.iter().map(|f| f.size).sum::<usize>() as f64 / 1e9,
live_files.iter().map(|f| f.num_entries).sum::<u64>() as f64 / 1e9
);
let store = DBStore {
db,
path: path.to_path_buf(),
cfs,
bulk_import: AtomicBool::new(true),
};
let config = store.get_config();
debug!("DB {:?}", config);
if config.format != CURRENT_FORMAT {
bail!("unsupported DB format {}, re-index required", config.format);
}
if config.compacted {
store.start_compactions();
}
store.set_config(config);
Ok(store)
}
fn config_cf(&self) -> &rocksdb::ColumnFamily {
self.db.cf_handle(CONFIG_CF).expect("missing CONFIG_CF")
}
fn funding_cf(&self) -> &rocksdb::ColumnFamily {
self.db.cf_handle(FUNDING_CF).expect("missing FUNDING_CF")
}
fn spending_cf(&self) -> &rocksdb::ColumnFamily {
self.db.cf_handle(SPENDING_CF).expect("missing SPENDING_CF")
}
fn txid_cf(&self) -> &rocksdb::ColumnFamily {
self.db.cf_handle(TXID_CF).expect("missing TXID_CF")
}
fn headers_cf(&self) -> &rocksdb::ColumnFamily {
self.db.cf_handle(HEADERS_CF).expect("missing HEADERS_CF")
}
pub(crate) fn iter_funding(&self, prefix: Row) -> ScanIterator {
self.iter_prefix_cf(self.funding_cf(), prefix)
}
pub(crate) fn iter_spending(&self, prefix: Row) -> ScanIterator {
self.iter_prefix_cf(self.spending_cf(), prefix)
}
pub(crate) fn iter_txid(&self, prefix: Row) -> ScanIterator {
self.iter_prefix_cf(self.txid_cf(), prefix)
}
fn iter_prefix_cf<'a>(&'a self, cf: &rocksdb::ColumnFamily, prefix: Row) -> ScanIterator<'a> {
let mode = rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward);
let iter = self.db.iterator_cf(cf, mode);
ScanIterator {
prefix,
iter,
done: false,
}
}
pub(crate) fn read_headers(&self) -> Vec<Row> {
let mut opts = rocksdb::ReadOptions::default();
opts.fill_cache(false);
self.db
.iterator_cf_opt(self.headers_cf(), opts, rocksdb::IteratorMode::Start)
.map(|(key, _)| key)
.filter(|key| &key[..] != TIP_KEY) // headers' rows are longer than TIP_KEY
.collect()
}
pub(crate) fn get_tip(&self) -> Option<Vec<u8>> {
self.db
.get_cf(self.headers_cf(), TIP_KEY)
.expect("get_tip failed")
}
pub(crate) fn write(&self, batch: WriteBatch) -> usize {
let mut db_batch = rocksdb::WriteBatch::default();
let mut total_rows_count = 0;
for key in batch.funding_rows {
db_batch.put_cf(self.funding_cf(), key, b"");
total_rows_count += 1;
}
for key in batch.spending_rows {
db_batch.put_cf(self.spending_cf(), key, b"");
total_rows_count += 1;
}
for key in batch.txid_rows {
db_batch.put_cf(self.txid_cf(), key, b"");
total_rows_count += 1;
}
for key in batch.header_rows {
db_batch.put_cf(self.headers_cf(), key, b"");
total_rows_count += 1;
}
db_batch.put_cf(self.headers_cf(), TIP_KEY, batch.tip_row);
let mut opts = rocksdb::WriteOptions::new();
let bulk_import = self.bulk_import.load(Ordering::Relaxed);
opts.set_sync(!bulk_import);
opts.disable_wal(bulk_import);
self.db.write_opt(db_batch, &opts).unwrap();
total_rows_count
}
pub(crate) fn flush(&self) {
let mut config = self.get_config();
for name in &self.cfs {
let cf = self.db.cf_handle(name).expect("missing CF");
self.db.flush_cf(cf).expect("CF flush failed");
}
if !config.compacted {
for name in &self.cfs {
info!("starting {} compaction", name);
let cf = self.db.cf_handle(name).expect("missing CF");
self.db.compact_range_cf(cf, None::<&[u8]>, None::<&[u8]>);
}
config.compacted = true;
self.set_config(config);
info!("finished full compaction");
self.start_compactions();
}
}
fn start_compactions(&self) {
self.bulk_import.store(false, Ordering::Relaxed);
for name in &self.cfs {
let cf = self.db.cf_handle(name).expect("missing CF");
self.db
.set_options_cf(cf, &[("disable_auto_compactions", "false")])
.expect("failed to start auto-compactions");
}
debug!("auto-compactions enabled");
}
fn set_config(&self, config: Config) {
let mut opts = rocksdb::WriteOptions::default();
opts.set_sync(true);
opts.disable_wal(false);
let value = serde_json::to_vec(&config).expect("failed to serialize config");
self.db
.put_cf_opt(self.config_cf(), CONFIG_KEY, value, &opts)
.expect("DB::put failed");
}
fn get_config(&self) -> Config {
self.db
.get_cf(self.config_cf(), CONFIG_KEY)
.expect("DB::get failed")
.map(|value| serde_json::from_slice(&value).expect("failed to deserialize Config"))
.unwrap_or_else(|| Config {
compacted: false,
format: CURRENT_FORMAT,
})
}
}
pub(crate) struct ScanIterator<'a> {
prefix: Row,
iter: rocksdb::DBIterator<'a>,
done: bool,
}
impl<'a> Iterator for ScanIterator<'a> {
type Item = Row;
fn next(&mut self) -> Option<Row> {
if self.done {
return None;
}
let (key, _) = self.iter.next()?;
if !key.starts_with(&self.prefix) {
self.done = true;
return None;
}
Some(key)
}
}
impl Drop for DBStore {
fn drop(&mut self) {
info!("closing DB at {:?}", self.path);
}
}

321
src/electrum.rs Normal file
View File

@ -0,0 +1,321 @@
use anyhow::{bail, Context, Result};
use bitcoin::{
consensus::{deserialize, serialize},
hashes::hex::{FromHex, ToHex},
BlockHash, Txid,
};
use rayon::prelude::*;
use serde_derive::{Deserialize, Serialize};
use serde_json::{from_value, json, Value};
use std::collections::HashMap;
use std::iter::FromIterator;
use crate::{
cache::Cache, config::Config, daemon::Daemon, merkle::Proof, metrics::Histogram,
status::Status, tracker::Tracker, types::ScriptHash,
};
const ELECTRS_VERSION: &str = env!("CARGO_PKG_VERSION");
const PROTOCOL_VERSION: &str = "1.4";
const UNKNOWN_FEE: isize = -1; // (allowed by Electrum protocol)
/// Per-client Electrum protocol state
#[derive(Default)]
pub struct Client {
tip: Option<BlockHash>,
status: HashMap<ScriptHash, Status>,
}
#[derive(Debug, Deserialize, Serialize)]
struct Request {
id: Value,
jsonrpc: String,
method: String,
#[serde(default)]
params: Value,
}
#[derive(Deserialize, Debug, PartialEq, Eq)]
#[serde(untagged)]
enum Version {
Single(String),
Range(String, String),
}
#[derive(Deserialize)]
#[serde(untagged)]
enum TxGetArgs {
Txid((Txid,)),
TxidVerbose(Txid, bool),
}
impl From<TxGetArgs> for (Txid, bool) {
fn from(args: TxGetArgs) -> Self {
match args {
TxGetArgs::Txid((txid,)) => (txid, false),
TxGetArgs::TxidVerbose(txid, verbose) => (txid, verbose),
}
}
}
/// Electrum RPC handler
pub struct Rpc {
tracker: Tracker,
cache: Cache,
rpc_duration: Histogram,
daemon: Daemon,
banner: String,
}
impl Rpc {
pub fn new(config: &Config, tracker: Tracker) -> Result<Self> {
let rpc_duration = tracker.metrics().histogram_vec(
"rpc_duration",
"RPC duration (in seconds)",
&["method"],
);
Ok(Self {
tracker,
cache: Cache::new(),
rpc_duration,
daemon: Daemon::connect(&config)?,
banner: config.server_banner.clone(),
})
}
pub fn sync(&mut self) -> Result<()> {
self.tracker.sync(&self.daemon)
}
pub fn update_client(&self, client: &mut Client) -> Result<Vec<Value>> {
let chain = self.tracker.chain();
let mut notifications = client
.status
.par_iter_mut()
.filter_map(|(scripthash, status)| -> Option<Result<Value>> {
match self
.tracker
.update_status(status, &self.daemon, &self.cache)
{
Ok(true) => Some(Ok(notification(
"blockchain.scripthash.subscribe",
&[json!(scripthash), json!(status.statushash())],
))),
Ok(false) => None, // statushash is the same
Err(e) => Some(Err(e)),
}
})
.collect::<Result<Vec<Value>>>()
.context("failed to update status")?;
if let Some(old_tip) = client.tip {
let new_tip = self.tracker.chain().tip();
if old_tip != new_tip {
client.tip = Some(new_tip);
let height = chain.height();
let header = chain.get_block_header(height).unwrap();
notifications.push(notification(
"blockchain.headers.subscribe",
&[json!({"hex": serialize(&header).to_hex(), "height": height})],
));
}
}
Ok(notifications)
}
pub fn handle_request(&self, client: &mut Client, value: Value) -> Result<Value> {
let Request {
id,
jsonrpc,
method,
params,
} = from_value(value).context("invalid request")?;
self.rpc_duration.observe_duration(&method, || {
let result = match method.as_str() {
"blockchain.scripthash.get_history" => {
self.scripthash_get_history(client, from_value(params)?)
}
"blockchain.scripthash.subscribe" => {
self.scripthash_subscribe(client, from_value(params)?)
}
"blockchain.transaction.broadcast" => {
self.transaction_broadcast(from_value(params)?)
}
"blockchain.transaction.get" => self.transaction_get(from_value(params)?),
"blockchain.transaction.get_merkle" => {
self.transaction_get_merkle(from_value(params)?)
}
"server.banner" => Ok(json!(self.banner)),
"server.donation_address" => Ok(Value::Null),
"server.peers.subscribe" => Ok(json!([])),
"blockchain.block.header" => self.block_header(from_value(params)?),
"blockchain.block.headers" => self.block_headers(from_value(params)?),
"blockchain.estimatefee" => self.estimate_fee(from_value(params)?),
"blockchain.headers.subscribe" => self.headers_subscribe(client),
"blockchain.relayfee" => self.relayfee(),
"mempool.get_fee_histogram" => self.get_fee_histogram(),
"server.ping" => Ok(Value::Null),
"server.version" => self.version(from_value(params)?),
&_ => bail!("unknown method '{}' with {}", method, params,),
};
Ok(match result {
Ok(value) => json!({"jsonrpc": jsonrpc, "id": id, "result": value}),
Err(err) => {
let msg = format!("RPC failed: {:#}", err);
warn!("{}", msg);
let error = json!({"code": 1, "message": msg});
json!({"jsonrpc": jsonrpc, "id": id, "error": error})
}
})
})
}
fn headers_subscribe(&self, client: &mut Client) -> Result<Value> {
let chain = self.tracker.chain();
client.tip = Some(chain.tip());
let height = chain.height();
let header = chain.get_block_header(height).unwrap();
Ok(json!({"hex": serialize(header).to_hex(), "height": height}))
}
fn block_header(&self, (height,): (usize,)) -> Result<Value> {
let chain = self.tracker.chain();
let header = match chain.get_block_header(height) {
None => bail!("no header at {}", height),
Some(header) => header,
};
Ok(json!(serialize(header).to_hex()))
}
fn block_headers(&self, (start_height, count): (usize, usize)) -> Result<Value> {
let chain = self.tracker.chain();
let max_count = 2016usize;
let count = std::cmp::min(
std::cmp::min(count, max_count),
chain.height() - start_height + 1,
);
let heights = start_height..(start_height + count);
let hex_headers = String::from_iter(
heights.map(|height| serialize(chain.get_block_header(height).unwrap()).to_hex()),
);
Ok(json!({"count": count, "hex": hex_headers, "max": max_count}))
}
fn estimate_fee(&self, (nblocks,): (u16,)) -> Result<Value> {
Ok(self
.daemon
.estimate_fee(nblocks)?
.map(|fee_rate| json!(fee_rate.as_btc()))
.unwrap_or_else(|| json!(UNKNOWN_FEE)))
}
fn relayfee(&self) -> Result<Value> {
Ok(json!(self.daemon.get_relay_fee()?.as_btc())) // [BTC/kB]
}
fn scripthash_get_history(
&self,
client: &Client,
(scripthash,): (ScriptHash,),
) -> Result<Value> {
let status = client
.status
.get(&scripthash)
.context("no subscription for scripthash")?;
Ok(json!(self
.tracker
.get_history(status)
.collect::<Vec<Value>>()))
}
fn scripthash_subscribe(
&self,
client: &mut Client,
(scripthash,): (ScriptHash,),
) -> Result<Value> {
let mut status = Status::new(scripthash);
self.tracker
.update_status(&mut status, &self.daemon, &self.cache)?;
let statushash = status.statushash();
client.status.insert(scripthash, status); // skip if already exists
Ok(json!(statushash))
}
fn transaction_broadcast(&self, (tx_hex,): (String,)) -> Result<Value> {
let tx_bytes = Vec::from_hex(&tx_hex).context("non-hex transaction")?;
let tx = deserialize(&tx_bytes).context("invalid transaction")?;
let txid = self.daemon.broadcast(&tx)?;
Ok(json!(txid))
}
fn transaction_get(&self, args: TxGetArgs) -> Result<Value> {
let (txid, verbose) = args.into();
if verbose {
let blockhash = self.tracker.get_blockhash_by_txid(txid);
return Ok(json!(self.daemon.get_transaction_info(&txid, blockhash)?));
}
let cached = self.cache.get_tx(&txid, |tx| serialize(tx).to_hex());
Ok(match cached {
Some(tx_hex) => json!(tx_hex),
None => {
debug!("tx cache miss: {}", txid);
let blockhash = self.tracker.get_blockhash_by_txid(txid);
json!(self.daemon.get_transaction_hex(&txid, blockhash)?)
}
})
}
fn transaction_get_merkle(&self, (txid, height): (Txid, usize)) -> Result<Value> {
let chain = self.tracker.chain();
let blockhash = match chain.get_block_hash(height) {
None => bail!("missing block at {}", height),
Some(blockhash) => blockhash,
};
let proof_to_value = |proof: &Proof| {
json!({
"block_height": height,
"pos": proof.position(),
"merkle": proof.to_hex(),
})
};
if let Some(result) = self.cache.get_proof(blockhash, txid, proof_to_value) {
return Ok(result);
}
debug!("txids cache miss: {}", blockhash);
let txids = self.daemon.get_block_txids(blockhash)?;
match txids.iter().position(|current_txid| *current_txid == txid) {
None => bail!("missing tx {} for merkle proof", txid),
Some(position) => Ok(proof_to_value(&Proof::create(&txids, position))),
}
}
fn get_fee_histogram(&self) -> Result<Value> {
Ok(json!(self.tracker.fees_histogram()))
}
fn version(&self, (client_id, client_version): (String, Version)) -> Result<Value> {
match client_version {
Version::Single(v) if v == PROTOCOL_VERSION => (),
_ => {
bail!(
"{} requested {:?}, server supports {}",
client_id,
client_version,
PROTOCOL_VERSION
);
}
};
let server_id = format!("electrs/{}", ELECTRS_VERSION);
Ok(json!([server_id, PROTOCOL_VERSION]))
}
}
fn notification(method: &str, params: &[Value]) -> Value {
json!({"jsonrpc": "2.0", "method": method, "params": params})
}

View File

@ -1,17 +0,0 @@
error_chain! {
types {
Error, ErrorKind, ResultExt, Result;
}
errors {
Connection(msg: String) {
description("Connection error")
display("Connection error: {}", msg)
}
Interrupt(sig: i32) {
description("Interruption by external signal")
display("Interrupted by signal {}", sig)
}
}
}

View File

@ -1,37 +0,0 @@
use crate::store::{ReadStore, Row, WriteStore};
use crate::util::Bytes;
pub struct FakeStore;
impl ReadStore for FakeStore {
fn get(&self, _key: &[u8]) -> Option<Bytes> {
None
}
fn scan(&self, _prefix: &[u8]) -> Vec<Row> {
vec![]
}
}
impl WriteStore for FakeStore {
fn write<I: IntoIterator<Item = Row>>(&self, _rows: I) {}
fn flush(&self) {}
}
#[cfg(test)]
mod tests {
#[test]
fn test_fakestore() {
use crate::fake;
use crate::store::{ReadStore, Row, WriteStore};
let store = fake::FakeStore {};
store.write(vec![Row {
key: b"k".to_vec(),
value: b"v".to_vec(),
}]);
store.flush();
// nothing was actually written
assert!(store.get(b"").is_none());
assert!(store.scan(b"").is_empty());
}
}

View File

@ -1,436 +1,213 @@
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut};
use bitcoin::consensus::encode::{deserialize, serialize};
use bitcoin::hash_types::{BlockHash, Txid};
use crypto::digest::Digest;
use crypto::sha2::Sha256;
use std::collections::{HashMap, HashSet};
use std::iter::FromIterator;
use std::sync::RwLock;
use anyhow::Result;
use bitcoin::consensus::{deserialize, serialize};
use bitcoin::{Block, BlockHash, OutPoint, Txid};
use crate::daemon::Daemon;
use crate::errors::*;
use crate::metrics::{
Counter, Gauge, HistogramOpts, HistogramTimer, HistogramVec, MetricOpts, Metrics,
};
use crate::signal::Waiter;
use crate::store::{ReadStore, Row, WriteStore};
use crate::util::{
full_hash, hash_prefix, spawn_thread, Bytes, FullHash, HashPrefix, HeaderEntry, HeaderList,
HeaderMap, SyncChannel, HASH_PREFIX_LEN,
use std::collections::HashMap;
use crate::{
chain::Chain,
daemon::Daemon,
db,
metrics::{Histogram, Metrics},
types::{HeaderRow, ScriptHash, ScriptHashRow, SpendingPrefixRow, TxidRow},
};
#[derive(Serialize, Deserialize)]
pub struct TxInKey {
pub code: u8,
pub prev_hash_prefix: HashPrefix,
pub prev_index: u16,
}
#[derive(Serialize, Deserialize)]
pub struct TxInRow {
key: TxInKey,
pub txid_prefix: HashPrefix,
}
impl TxInRow {
pub fn new(txid: &Txid, input: &TxIn) -> TxInRow {
TxInRow {
key: TxInKey {
code: b'I',
prev_hash_prefix: hash_prefix(&input.previous_output.txid[..]),
prev_index: input.previous_output.vout as u16,
},
txid_prefix: hash_prefix(&txid[..]),
}
}
pub fn filter(txid: &Txid, output_index: usize) -> Bytes {
bincode::serialize(&TxInKey {
code: b'I',
prev_hash_prefix: hash_prefix(&txid[..]),
prev_index: output_index as u16,
})
.unwrap()
}
pub fn to_row(&self) -> Row {
Row {
key: bincode::serialize(&self).unwrap(),
value: vec![],
}
}
pub fn from_row(row: &Row) -> TxInRow {
bincode::deserialize(&row.key).expect("failed to parse TxInRow")
}
}
#[derive(Serialize, Deserialize)]
pub struct TxOutKey {
code: u8,
script_hash_prefix: HashPrefix,
}
#[derive(Serialize, Deserialize)]
pub struct TxOutRow {
key: TxOutKey,
pub txid_prefix: HashPrefix,
}
impl TxOutRow {
pub fn new(txid: &Txid, output: &TxOut) -> TxOutRow {
TxOutRow {
key: TxOutKey {
code: b'O',
script_hash_prefix: hash_prefix(&compute_script_hash(&output.script_pubkey[..])),
},
txid_prefix: hash_prefix(&txid[..]),
}
}
pub fn filter(script_hash: &[u8]) -> Bytes {
bincode::serialize(&TxOutKey {
code: b'O',
script_hash_prefix: hash_prefix(&script_hash[..HASH_PREFIX_LEN]),
})
.unwrap()
}
pub fn to_row(&self) -> Row {
Row {
key: bincode::serialize(&self).unwrap(),
value: vec![],
}
}
pub fn from_row(row: &Row) -> TxOutRow {
bincode::deserialize(&row.key).expect("failed to parse TxOutRow")
}
}
#[derive(Serialize, Deserialize)]
pub struct TxKey {
code: u8,
pub txid: FullHash,
}
pub struct TxRow {
pub key: TxKey,
pub height: u32, // value
}
impl TxRow {
pub fn new(txid: &Txid, height: u32) -> TxRow {
TxRow {
key: TxKey {
code: b'T',
txid: full_hash(&txid[..]),
},
height,
}
}
pub fn filter_prefix(txid_prefix: HashPrefix) -> Bytes {
[b"T", &txid_prefix[..]].concat()
}
pub fn filter_full(txid: &Txid) -> Bytes {
[b"T", &txid[..]].concat()
}
pub fn to_row(&self) -> Row {
Row {
key: bincode::serialize(&self.key).unwrap(),
value: bincode::serialize(&self.height).unwrap(),
}
}
pub fn from_row(row: &Row) -> TxRow {
TxRow {
key: bincode::deserialize(&row.key).expect("failed to parse TxKey"),
height: bincode::deserialize(&row.value).expect("failed to parse height"),
}
}
}
#[derive(Serialize, Deserialize)]
struct BlockKey {
code: u8,
hash: FullHash,
}
pub fn compute_script_hash(data: &[u8]) -> FullHash {
let mut hash = FullHash::default();
let mut sha2 = Sha256::new();
sha2.input(data);
sha2.result(&mut hash);
hash
}
pub fn index_transaction<'a>(
txn: &'a Transaction,
height: usize,
) -> impl 'a + Iterator<Item = Row> {
let null_hash = Txid::default();
let txid = txn.txid();
let inputs = txn.input.iter().filter_map(move |input| {
if input.previous_output.txid == null_hash {
None
} else {
Some(TxInRow::new(&txid, &input).to_row())
}
});
let outputs = txn
.output
.iter()
.map(move |output| TxOutRow::new(&txid, &output).to_row());
// Persist transaction ID and confirmed height
inputs
.chain(outputs)
.chain(std::iter::once(TxRow::new(&txid, height as u32).to_row()))
}
pub fn index_block<'a>(block: &'a Block, height: usize) -> impl 'a + Iterator<Item = Row> {
let blockhash = block.block_hash();
// Persist block hash and header
let row = Row {
key: bincode::serialize(&BlockKey {
code: b'B',
hash: full_hash(&blockhash[..]),
})
.unwrap(),
value: serialize(&block.header),
};
block
.txdata
.iter()
.flat_map(move |txn| index_transaction(&txn, height))
.chain(std::iter::once(row))
}
pub fn last_indexed_block(blockhash: &BlockHash) -> Row {
// Store last indexed block (i.e. all previous blocks were indexed)
Row {
key: b"L".to_vec(),
value: serialize(blockhash),
}
}
pub fn read_indexed_blockhashes(store: &dyn ReadStore) -> HashSet<BlockHash> {
let mut result = HashSet::new();
for row in store.scan(b"B") {
let key: BlockKey = bincode::deserialize(&row.key).unwrap();
result.insert(deserialize(&key.hash).unwrap());
}
result
}
fn read_indexed_headers(store: &dyn ReadStore) -> HeaderList {
let latest_blockhash: BlockHash = match store.get(b"L") {
// latest blockheader persisted in the DB.
Some(row) => deserialize(&row).unwrap(),
None => BlockHash::default(),
};
trace!("latest indexed blockhash: {}", latest_blockhash);
let mut map = HeaderMap::new();
for row in store.scan(b"B") {
let key: BlockKey = bincode::deserialize(&row.key).unwrap();
let header: BlockHeader = deserialize(&row.value).unwrap();
map.insert(deserialize(&key.hash).unwrap(), header);
}
let mut headers = vec![];
let null_hash = BlockHash::default();
let mut blockhash = latest_blockhash;
while blockhash != null_hash {
let header = map
.remove(&blockhash)
.unwrap_or_else(|| panic!("missing {} header in DB", blockhash));
blockhash = header.prev_blockhash;
headers.push(header);
}
headers.reverse();
assert_eq!(
headers
.first()
.map(|h| h.prev_blockhash)
.unwrap_or(null_hash),
null_hash
);
assert_eq!(
headers
.last()
.map(BlockHeader::block_hash)
.unwrap_or(null_hash),
latest_blockhash
);
let mut result = HeaderList::empty();
let entries = result.order(headers);
result.apply(entries, latest_blockhash);
result
}
#[derive(Clone)]
struct Stats {
blocks: Counter,
txns: Counter,
vsize: Counter,
height: Gauge,
duration: HistogramVec,
update_duration: Histogram,
update_size: Histogram,
lookup_duration: Histogram,
}
impl Stats {
fn new(metrics: &Metrics) -> Stats {
Stats {
blocks: metrics.counter(MetricOpts::new(
"electrs_index_blocks",
"# of indexed blocks",
)),
txns: metrics.counter(MetricOpts::new(
"electrs_index_txns",
"# of indexed transactions",
)),
vsize: metrics.counter(MetricOpts::new(
"electrs_index_vsize",
"# of indexed vbytes",
)),
height: metrics.gauge(MetricOpts::new(
"electrs_index_height",
"Last indexed block's height",
)),
duration: metrics.histogram_vec(
HistogramOpts::new("electrs_index_duration", "indexing duration (in seconds)"),
fn new(metrics: &Metrics) -> Self {
Self {
update_duration: metrics.histogram_vec(
"index_update_duration",
"Index update duration (in seconds)",
&["step"],
),
update_size: metrics.histogram_vec(
"index_update_size",
"Index update size (in bytes)",
&["step"],
),
lookup_duration: metrics.histogram_vec(
"index_lookup_duration",
"Index lookup duration (in seconds)",
&["step"],
),
}
}
fn update(&self, block: &Block, height: usize) {
self.blocks.inc();
self.txns.inc_by(block.txdata.len() as i64);
for tx in &block.txdata {
self.vsize.inc_by(tx.get_weight() as i64 / 4);
}
self.update_height(height);
}
fn update_height(&self, height: usize) {
self.height.set(height as i64);
}
fn start_timer(&self, step: &str) -> HistogramTimer {
self.duration.with_label_values(&[step]).start_timer()
fn report_stats(&self, batch: &db::WriteBatch) {
self.update_size
.observe_size("write_funding_rows", db_rows_size(&batch.funding_rows));
self.update_size
.observe_size("write_spending_rows", db_rows_size(&batch.spending_rows));
self.update_size
.observe_size("write_txid_rows", db_rows_size(&batch.txid_rows));
self.update_size
.observe_size("write_header_rows", db_rows_size(&batch.header_rows));
debug!(
"writing {} funding and {} spending rows from {} transactions, {} blocks",
batch.funding_rows.len(),
batch.spending_rows.len(),
batch.txid_rows.len(),
batch.header_rows.len()
);
}
}
struct IndexResult {
header_row: HeaderRow,
funding_rows: Vec<ScriptHashRow>,
spending_rows: Vec<SpendingPrefixRow>,
txid_rows: Vec<TxidRow>,
}
impl IndexResult {
fn extend(&self, batch: &mut db::WriteBatch) {
let funding_rows = self.funding_rows.iter().map(ScriptHashRow::to_db_row);
let spending_rows = self.spending_rows.iter().map(SpendingPrefixRow::to_db_row);
let txid_rows = self.txid_rows.iter().map(TxidRow::to_db_row);
batch.funding_rows.extend(funding_rows);
batch.spending_rows.extend(spending_rows);
batch.txid_rows.extend(txid_rows);
batch.header_rows.push(self.header_row.to_db_row());
batch.tip_row = serialize(&self.header_row.header.block_hash()).into_boxed_slice();
}
}
/// Confirmed transactions' address index
pub struct Index {
// TODO: store also latest snapshot.
headers: RwLock<HeaderList>,
daemon: Daemon,
store: db::DBStore,
chain: Chain,
stats: Stats,
batch_size: usize,
}
impl Index {
pub fn load(
store: &dyn ReadStore,
daemon: &Daemon,
metrics: &Metrics,
batch_size: usize,
) -> Result<Index> {
let stats = Stats::new(metrics);
let headers = read_indexed_headers(store);
stats.height.set((headers.len() as i64) - 1);
pub(crate) fn load(store: db::DBStore, mut chain: Chain, metrics: &Metrics) -> Result<Self> {
if let Some(row) = store.get_tip() {
let tip = deserialize(&row).expect("invalid tip");
let headers = store
.read_headers()
.into_iter()
.map(|row| HeaderRow::from_db_row(&row).header)
.collect();
chain.load(headers, tip);
};
Ok(Index {
headers: RwLock::new(headers),
daemon: daemon.reconnect()?,
stats,
batch_size,
store,
chain,
stats: Stats::new(metrics),
})
}
pub fn reload(&self, store: &dyn ReadStore) {
let mut headers = self.headers.write().unwrap();
*headers = read_indexed_headers(store);
pub(crate) fn chain(&self) -> &Chain {
&self.chain
}
pub fn best_header(&self) -> Option<HeaderEntry> {
let headers = self.headers.read().unwrap();
headers.header_by_blockhash(&headers.tip()).cloned()
pub(crate) fn filter_by_txid<'a>(&'a self, txid: Txid) -> impl Iterator<Item = BlockHash> + 'a {
self.store
.iter_txid(TxidRow::scan_prefix(txid))
.map(|row| TxidRow::from_db_row(&row).height())
.filter_map(move |height| self.chain.get_block_hash(height))
}
pub fn get_header(&self, height: usize) -> Option<HeaderEntry> {
self.headers
.read()
.unwrap()
.header_by_height(height)
.cloned()
pub(crate) fn filter_by_funding<'a>(
&'a self,
scripthash: ScriptHash,
) -> impl Iterator<Item = BlockHash> + 'a {
self.store
.iter_funding(ScriptHashRow::scan_prefix(scripthash))
.map(|row| ScriptHashRow::from_db_row(&row).height())
.filter_map(move |height| self.chain.get_block_hash(height))
}
pub fn update(&self, store: &impl WriteStore, waiter: &Waiter) -> Result<BlockHash> {
let daemon = self.daemon.reconnect()?;
let tip = daemon.getbestblockhash()?;
let new_headers: Vec<HeaderEntry> = {
let indexed_headers = self.headers.read().unwrap();
indexed_headers.order(daemon.get_new_headers(&indexed_headers, &tip)?)
};
if let Some(latest_header) = new_headers.last() {
info!("{:?} ({} left to index)", latest_header, new_headers.len());
};
let height_map = HashMap::<BlockHash, usize>::from_iter(
new_headers.iter().map(|h| (*h.hash(), h.height())),
);
let chan = SyncChannel::new(1);
let sender = chan.sender();
let blockhashes: Vec<BlockHash> = new_headers.iter().map(|h| *h.hash()).collect();
let batch_size = self.batch_size;
let fetcher = spawn_thread("fetcher", move || {
for chunk in blockhashes.chunks(batch_size) {
sender
.send(daemon.getblocks(&chunk))
.expect("failed sending blocks to be indexed");
pub(crate) fn filter_by_spending<'a>(
&'a self,
outpoint: OutPoint,
) -> impl Iterator<Item = BlockHash> + 'a {
self.store
.iter_spending(SpendingPrefixRow::scan_prefix(outpoint))
.map(|row| SpendingPrefixRow::from_db_row(&row).height())
.filter_map(move |height| self.chain.get_block_hash(height))
}
sender
.send(Ok(vec![]))
.expect("failed sending explicit end of stream");
});
pub(crate) fn sync(&mut self, daemon: &Daemon, chunk_size: usize) -> Result<()> {
loop {
waiter.poll()?;
let timer = self.stats.start_timer("fetch");
let batch = chan
.receiver()
.recv()
.expect("block fetch exited prematurely")?;
timer.observe_duration();
if batch.is_empty() {
let new_headers = daemon.get_new_headers(&self.chain)?;
if new_headers.is_empty() {
break;
}
info!(
"indexing {} blocks: [{}..{}]",
new_headers.len(),
new_headers.first().unwrap().height(),
new_headers.last().unwrap().height()
);
for chunk in new_headers.chunks(chunk_size) {
let blockhashes: Vec<BlockHash> = chunk.iter().map(|h| h.hash()).collect();
let mut heights_map: HashMap<BlockHash, usize> =
chunk.iter().map(|h| (h.hash(), h.height())).collect();
let rows_iter = batch.iter().flat_map(|block| {
let blockhash = block.block_hash();
let height = *height_map
.get(&blockhash)
.unwrap_or_else(|| panic!("missing header for block {}", blockhash));
let mut batch = db::WriteBatch::default();
self.stats.update(block, height); // TODO: update stats after the block is indexed
index_block(block, height).chain(std::iter::once(last_indexed_block(&blockhash)))
});
let timer = self.stats.start_timer("index+write");
store.write(rows_iter);
timer.observe_duration();
daemon.for_blocks(blockhashes, |blockhash, block| {
let height = heights_map.remove(&blockhash).expect("unexpected block");
let result = index_single_block(block, height);
result.extend(&mut batch);
})?;
assert!(heights_map.is_empty(), "some blocks were not indexed");
batch.sort();
self.stats.report_stats(&batch);
self.store.write(batch);
}
let timer = self.stats.start_timer("flush");
store.flush(); // make sure no row is left behind
timer.observe_duration();
fetcher.join().expect("block fetcher failed");
self.headers.write().unwrap().apply(new_headers, tip);
assert_eq!(tip, self.headers.read().unwrap().tip());
self.stats
.update_height(self.headers.read().unwrap().len() - 1);
Ok(tip)
self.chain.update(new_headers);
}
self.store.flush();
Ok(())
}
}
fn db_rows_size(rows: &[db::Row]) -> usize {
rows.iter().map(|key| key.len()).sum()
}
fn index_single_block(block: Block, height: usize) -> IndexResult {
let mut funding_rows = vec![];
let mut spending_rows = vec![];
let mut txid_rows = Vec::with_capacity(block.txdata.len());
for tx in &block.txdata {
txid_rows.push(TxidRow::new(tx.txid(), height));
funding_rows.extend(
tx.output
.iter()
.filter(|txo| !txo.script_pubkey.is_provably_unspendable())
.map(|txo| {
let scripthash = ScriptHash::new(&txo.script_pubkey);
ScriptHashRow::new(scripthash, height)
}),
);
if tx.is_coin_base() {
continue; // coinbase doesn't have inputs
}
spending_rows.extend(
tx.input
.iter()
.map(|txin| SpendingPrefixRow::new(txin.previous_output, height)),
);
}
IndexResult {
funding_rows,
spending_rows,
txid_rows,
header_row: HeaderRow::new(block.header),
}
}

View File

@ -1,28 +1,36 @@
#![recursion_limit = "1024"]
#[macro_use]
extern crate error_chain;
extern crate anyhow;
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
// I really don't know why it fails without this line
extern crate configure_me;
pub mod app;
pub mod bulk;
pub mod cache;
pub mod config;
pub mod daemon;
pub mod errors;
pub mod fake;
pub mod index;
pub mod mempool;
pub mod metrics;
pub mod query;
pub mod rpc;
pub mod signal;
pub mod store;
pub mod util;
mod cache;
mod chain;
mod config;
mod daemon;
mod db;
mod electrum;
mod index;
mod mempool;
mod merkle;
mod metrics;
pub mod server;
mod signals;
mod status;
mod tracker;
mod types;
pub use {
cache::Cache,
config::Config,
daemon::Daemon,
electrum::{Client, Rpc},
status::Status,
tracker::Tracker,
types::ScriptHash,
};

77
src/map.rs Normal file
View File

@ -0,0 +1,77 @@
use bitcoin::{BlockHash, BlockHeader};
use std::collections::HashMap;
#[derive(Default)]
struct Chain {
by_height: Vec<BlockHash>,
}
impl Chain {
fn build(tip: BlockHash, by_hash: &HashMap<BlockHash, BlockHeader>) -> Self {
// verify full chain till genesis
let mut by_height = vec![];
let mut blockhash = tip;
while blockhash != BlockHash::default() {
by_height.push(blockhash);
blockhash = match by_hash.get(&blockhash) {
Some(header) => header.prev_blockhash,
None => panic!("missing block header: {}", blockhash),
};
}
by_height.reverse();
Self { by_height }
}
fn len(&self) -> usize {
self.by_height.len()
}
fn tip(&self) -> Option<&BlockHash> {
self.by_height.last()
}
}
#[derive(Default)]
pub struct BlockMap {
by_hash: HashMap<BlockHash, BlockHeader>,
chain: Chain,
}
impl BlockMap {
pub(crate) fn new(headers: Vec<BlockHeader>) -> Self {
let mut map = Self::default();
map.add_headers(headers);
map
}
pub fn chain(&self) -> &[BlockHash] {
&self.chain.by_height
}
/// May return stale headers
pub fn get_header(&self, hash: &BlockHash) -> Option<&BlockHeader> {
self.by_hash.get(hash)
}
fn add_headers(&mut self, headers: Vec<BlockHeader>) {
let total_blocks = headers.len();
let mut new_blocks = 0usize;
for header in headers {
let hash = header.block_hash();
self.by_hash.entry(hash).or_insert_with(|| {
new_blocks += 1;
header
});
}
debug!("added {}/{} headers", new_blocks, total_blocks,);
}
pub fn update(&mut self, tip: BlockHash, headers: Vec<BlockHeader>) {
self.add_headers(headers);
let chain = Chain::build(tip, &self.by_hash);
assert_eq!(chain.tip(), Some(&tip));
info!("verified {} headers, tip={}", chain.len(), tip);
self.chain = chain;
}
}

View File

@ -1,287 +1,199 @@
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::hash_types::Txid;
use std::collections::{BTreeMap, HashMap, HashSet};
use anyhow::Result;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::convert::TryInto;
use std::iter::FromIterator;
use std::ops::Bound;
use std::sync::Mutex;
use crate::daemon::{Daemon, MempoolEntry};
use crate::errors::*;
use crate::index::index_transaction;
use crate::metrics::{
Gauge, GaugeVec, HistogramOpts, HistogramTimer, HistogramVec, MetricOpts, Metrics,
};
use crate::store::{ReadStore, Row};
use crate::util::Bytes;
use bitcoin::hashes::Hash;
use bitcoin::{Amount, OutPoint, Transaction, Txid};
use bitcoincore_rpc::json;
use rayon::prelude::*;
use serde::ser::{Serialize, SerializeSeq, Serializer};
const VSIZE_BIN_WIDTH: u32 = 100_000; // in vbytes
use crate::{daemon::Daemon, types::ScriptHash};
struct MempoolStore {
map: BTreeMap<Bytes, Vec<Bytes>>,
pub(crate) struct Entry {
pub txid: Txid,
pub tx: Transaction,
pub fee: Amount,
pub vsize: u64,
pub has_unconfirmed_inputs: bool,
}
impl MempoolStore {
fn new() -> MempoolStore {
MempoolStore {
map: BTreeMap::new(),
/// Mempool current state
pub(crate) struct Mempool {
entries: HashMap<Txid, Entry>,
by_funding: BTreeSet<(ScriptHash, Txid)>,
by_spending: BTreeSet<(OutPoint, Txid)>,
histogram: Histogram,
txid_min: Txid,
txid_max: Txid,
}
impl Mempool {
pub fn new() -> Self {
Self {
entries: Default::default(),
by_funding: Default::default(),
by_spending: Default::default(),
histogram: Histogram::empty(),
txid_min: Txid::from_inner([0x00; 32]),
txid_max: Txid::from_inner([0xFF; 32]),
}
}
fn add(&mut self, tx: &Transaction) {
let rows = index_transaction(tx, 0);
for row in rows {
let (key, value) = row.into_pair();
self.map.entry(key).or_insert_with(Vec::new).push(value);
}
}
fn remove(&mut self, tx: &Transaction) {
let rows = index_transaction(tx, 0);
for row in rows {
let (key, value) = row.into_pair();
let no_values_left = {
let values = self
.map
.get_mut(&key)
.unwrap_or_else(|| panic!("missing key {} in mempool", hex::encode(&key)));
let last_value = values
.pop()
.unwrap_or_else(|| panic!("no values found for key {}", hex::encode(&key)));
// TxInRow and TxOutRow have an empty value, TxRow has height=0 as value.
assert_eq!(
value,
last_value,
"wrong value for key {}: {}",
hex::encode(&key),
hex::encode(&last_value)
);
values.is_empty()
};
if no_values_left {
self.map.remove(&key).unwrap();
}
}
}
}
impl ReadStore for MempoolStore {
fn get(&self, key: &[u8]) -> Option<Bytes> {
Some(self.map.get(key)?.last()?.to_vec())
}
fn scan(&self, prefix: &[u8]) -> Vec<Row> {
let range = self
.map
.range((Bound::Included(prefix.to_vec()), Bound::Unbounded));
let mut rows = vec![];
for (key, values) in range {
if !key.starts_with(prefix) {
break;
}
if let Some(value) = values.last() {
rows.push(Row {
key: key.to_vec(),
value: value.to_vec(),
});
}
}
rows
}
}
struct Item {
tx: Transaction, // stored for faster retrieval and index removal
entry: MempoolEntry, // caches mempool fee rates
}
struct Stats {
count: Gauge,
update: HistogramVec,
vsize: GaugeVec,
max_fee_rate: Mutex<f32>,
}
impl Stats {
fn start_timer(&self, step: &str) -> HistogramTimer {
self.update.with_label_values(&[step]).start_timer()
}
fn update(&self, entries: &[&MempoolEntry]) {
let mut bands: Vec<(f32, u32)> = vec![];
let mut fee_rate = 1.0f32; // [sat/vbyte]
let mut vsize = 0u32; // vsize of transactions paying <= fee_rate
for e in entries {
while fee_rate < e.fee_per_vbyte() {
bands.push((fee_rate, vsize));
fee_rate *= 2.0;
}
vsize += e.vsize();
}
let mut max_fee_rate = self.max_fee_rate.lock().unwrap();
loop {
bands.push((fee_rate, vsize));
if fee_rate < *max_fee_rate {
fee_rate *= 2.0;
continue;
}
*max_fee_rate = fee_rate;
break;
}
drop(max_fee_rate);
for (fee_rate, vsize) in bands {
// labels should be ordered by fee_rate value
let label = format!("{:10.0}", fee_rate);
self.vsize
.with_label_values(&[&label])
.set(f64::from(vsize));
}
}
}
pub struct Tracker {
items: HashMap<Txid, Item>,
index: MempoolStore,
histogram: Vec<(f32, u32)>,
stats: Stats,
}
impl Tracker {
pub fn new(metrics: &Metrics) -> Tracker {
Tracker {
items: HashMap::new(),
index: MempoolStore::new(),
histogram: vec![],
stats: Stats {
count: metrics.gauge(MetricOpts::new(
"electrs_mempool_count",
"# of mempool transactions",
)),
update: metrics.histogram_vec(
HistogramOpts::new(
"electrs_mempool_update",
"Time to update mempool (in seconds)",
),
&["step"],
),
vsize: metrics.gauge_vec(
MetricOpts::new(
"electrs_mempool_vsize",
"Total vsize of transactions paying at most given fee rate",
),
&["fee_rate"],
),
max_fee_rate: Mutex::new(1.0),
},
}
}
pub fn has_txn(&self, txid: &Txid) -> bool {
self.items.contains_key(txid)
}
pub fn get_fee(&self, txid: &Txid) -> Option<u64> {
self.items.get(txid).map(|stats| stats.entry.fee())
}
/// Returns vector of (fee_rate, vsize) pairs, where fee_{n-1} > fee_n and vsize_n is the
/// total virtual size of mempool transactions with fee in the bin [fee_{n-1}, fee_n].
/// Note: fee_{-1} is implied to be infinite.
pub fn fee_histogram(&self) -> &Vec<(f32, u32)> {
pub(crate) fn fees_histogram(&self) -> &Histogram {
&self.histogram
}
pub fn index(&self) -> &dyn ReadStore {
&self.index
pub(crate) fn get(&self, txid: &Txid) -> Option<&Entry> {
self.entries.get(txid)
}
pub fn update(&mut self, daemon: &Daemon) -> Result<()> {
let timer = self.stats.start_timer("fetch");
let new_txids = daemon
.getmempooltxids()
.chain_err(|| "failed to update mempool from daemon")?;
let old_txids = HashSet::from_iter(self.items.keys().cloned());
timer.observe_duration();
pub(crate) fn filter_by_funding(&self, scripthash: &ScriptHash) -> Vec<&Entry> {
let range = (
Bound::Included((*scripthash, self.txid_min)),
Bound::Included((*scripthash, self.txid_max)),
);
self.by_funding
.range(range)
.map(|(_, txid)| self.get(txid).expect("missing funding mempool tx"))
.collect()
}
let timer = self.stats.start_timer("add");
let txids_iter = new_txids.difference(&old_txids);
let entries: Vec<(&Txid, MempoolEntry)> = txids_iter
pub(crate) fn filter_by_spending(&self, outpoint: &OutPoint) -> Vec<&Entry> {
let range = (
Bound::Included((*outpoint, self.txid_min)),
Bound::Included((*outpoint, self.txid_max)),
);
self.by_spending
.range(range)
.map(|(_, txid)| self.get(txid).expect("missing spending mempool tx"))
.collect()
}
pub fn sync(&mut self, daemon: &Daemon) -> Result<()> {
let txids = daemon.get_mempool_txids()?;
debug!("loading {} mempool transactions", txids.len());
let new_txids = HashSet::<Txid>::from_iter(txids);
let old_txids = HashSet::<Txid>::from_iter(self.entries.keys().copied());
let to_add = &new_txids - &old_txids;
let to_remove = &old_txids - &new_txids;
let removed = to_remove.len();
for txid in to_remove {
self.remove_entry(txid);
}
let entries: Vec<_> = to_add
.par_iter()
.filter_map(|txid| {
match daemon.getmempoolentry(txid) {
Ok(entry) => Some((txid, entry)),
Err(err) => {
debug!("no mempool entry {}: {}", txid, err); // e.g. new block or RBF
None // ignore this transaction for now
}
match (
daemon.get_transaction(txid, None),
daemon.get_mempool_entry(txid),
) {
(Ok(tx), Ok(entry)) => Some((txid, tx, entry)),
_ => None,
}
})
.collect();
if !entries.is_empty() {
let txids: Vec<&Txid> = entries.iter().map(|(txid, _)| *txid).collect();
let txs = match daemon.gettransactions(&txids) {
Ok(txs) => txs,
Err(err) => {
debug!("failed to get transactions {:?}: {}", txids, err); // e.g. new block or RBF
return Ok(()); // keep the mempool until next update()
let added = entries.len();
for (txid, tx, entry) in entries {
self.add_entry(*txid, tx, entry);
}
};
for ((txid, entry), tx) in entries.into_iter().zip(txs.into_iter()) {
assert_eq!(tx.txid(), *txid);
self.add(txid, tx, entry);
}
}
timer.observe_duration();
let timer = self.stats.start_timer("remove");
for txid in old_txids.difference(&new_txids) {
self.remove(txid);
}
timer.observe_duration();
let timer = self.stats.start_timer("fees");
self.update_fee_histogram();
timer.observe_duration();
self.stats.count.set(self.items.len() as i64);
self.histogram = Histogram::new(self.entries.values().map(|e| (e.fee, e.vsize)));
debug!(
"{} mempool txs: {} added, {} removed",
self.entries.len(),
added,
removed,
);
Ok(())
}
fn add(&mut self, txid: &Txid, tx: Transaction, entry: MempoolEntry) {
self.index.add(&tx);
self.items.insert(*txid, Item { tx, entry });
fn add_entry(&mut self, txid: Txid, tx: Transaction, entry: json::GetMempoolEntryResult) {
for txi in &tx.input {
self.by_spending.insert((txi.previous_output, txid));
}
for txo in &tx.output {
let scripthash = ScriptHash::new(&txo.script_pubkey);
self.by_funding.insert((scripthash, txid)); // may have duplicates
}
let entry = Entry {
txid,
tx,
vsize: entry.vsize,
fee: entry.fees.base,
has_unconfirmed_inputs: !entry.depends.is_empty(),
};
assert!(
self.entries.insert(txid, entry).is_none(),
"duplicate mempool txid"
);
}
fn remove(&mut self, txid: &Txid) {
let stats = self
.items
.remove(txid)
.unwrap_or_else(|| panic!("missing mempool tx {}", txid));
self.index.remove(&stats.tx);
fn remove_entry(&mut self, txid: Txid) {
let entry = self.entries.remove(&txid).expect("missing tx from mempool");
for txi in entry.tx.input {
self.by_spending.remove(&(txi.previous_output, txid));
}
for txo in entry.tx.output {
let scripthash = ScriptHash::new(&txo.script_pubkey);
self.by_funding.remove(&(scripthash, txid)); // may have misses
}
fn update_fee_histogram(&mut self) {
let mut entries: Vec<&MempoolEntry> = self.items.values().map(|stat| &stat.entry).collect();
entries.sort_unstable_by(|e1, e2| {
e1.fee_per_vbyte().partial_cmp(&e2.fee_per_vbyte()).unwrap()
});
self.histogram = electrum_fees(&entries);
self.stats.update(&entries);
}
}
fn electrum_fees(entries: &[&MempoolEntry]) -> Vec<(f32, u32)> {
let mut histogram = vec![];
let mut bin_size = 0;
let mut last_fee_rate = None;
for e in entries.iter().rev() {
last_fee_rate = Some(e.fee_per_vbyte());
bin_size += e.vsize();
if bin_size > VSIZE_BIN_WIDTH {
// vsize of transactions paying >= e.fee_per_vbyte()
histogram.push((e.fee_per_vbyte(), bin_size));
bin_size = 0;
pub(crate) struct Histogram {
/// bins[64-i] contains the total vsize of transactions with fee rate [2**(i-1), 2**i).
/// bins[63] = [1, 2)
/// bins[62] = [2, 4)
/// bins[61] = [4, 8)
/// bins[60] = [8, 16)
/// ...
/// bins[1] = [2**62, 2**63)
/// bins[0] = [2**63, 2**64)
bins: [u64; Histogram::SIZE],
}
impl Histogram {
const SIZE: usize = 64;
fn empty() -> Self {
Self::new(std::iter::empty())
}
fn new(items: impl Iterator<Item = (Amount, u64)>) -> Self {
let mut bins = [0; Self::SIZE];
for (fee, vsize) in items {
let fee_rate = fee.as_sat() / vsize;
let index: usize = fee_rate.leading_zeros().try_into().unwrap();
// skip transactions with too low fee rate (<1 sat/vB)
if let Some(bin) = bins.get_mut(index) {
*bin += vsize
}
}
if let Some(fee_rate) = last_fee_rate {
histogram.push((fee_rate, bin_size));
Self { bins }
}
}
impl Serialize for Histogram {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.bins.len()))?;
// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#mempool-get-fee-histogram
let mut fee_rate = std::u64::MAX;
for vsize in self.bins.iter() {
let element = (fee_rate, *vsize);
seq.serialize_element(&element)?;
fee_rate >>= 1;
}
seq.end()
}
histogram
}

79
src/merkle.rs Normal file
View File

@ -0,0 +1,79 @@
use bitcoin::{
hashes::{hex::ToHex, Hash},
TxMerkleNode, Txid,
};
pub(crate) struct Proof {
proof: Vec<TxMerkleNode>,
position: usize,
}
impl Proof {
pub(crate) fn create(txids: &[Txid], position: usize) -> Self {
assert!(position < txids.len());
let mut offset = position;
let mut hashes: Vec<TxMerkleNode> = txids
.iter()
.map(|txid| TxMerkleNode::from_hash(txid.as_hash()))
.collect();
let mut proof = vec![];
while hashes.len() > 1 {
if hashes.len() % 2 != 0 {
let last = *hashes.last().unwrap();
hashes.push(last);
}
offset = if offset % 2 == 0 {
offset + 1
} else {
offset - 1
};
proof.push(hashes[offset]);
offset /= 2;
hashes = hashes
.chunks(2)
.map(|pair| {
let left = pair[0];
let right = pair[1];
let input = [&left[..], &right[..]].concat();
TxMerkleNode::hash(&input)
})
.collect()
}
Self { proof, position }
}
pub(crate) fn to_hex(&self) -> Vec<String> {
self.proof.iter().map(|node| node.to_hex()).collect()
}
pub(crate) fn position(&self) -> usize {
self.position
}
}
// TODO: add tests
// // {"id":37,"jsonrpc":"2.0",
// "result":{"block_height":333961,"merkle":[
// "5d8cfb001d9ec17861ad9c158244239cb6e3298a619b2a5f7b176ddd54459c75",
// "06811172e13312f2e496259d2c8a7262f1192be5223fcf4d6a9ed7f58a2175ba",
// "cbcec841dea3294706809d1510c72b4424d141fac89106af65b70399b1d79f3f",
// "a24d6c3601a54d40f4350e6c8887bf82a873fe8619f95c772b573ec0373119d3",
// "2015c1bb133ee2c972e55fdcd205a9aee7b0122fd74c2f5d5d27b24a562c7790",
// "f379496fef2e603c4e1c03e2179ebaf5153d6463b8d61aa16d41db3321a18165",
// "7a798d6529663fd472d26cc90c434b64f78955747ac2f93c8dcd35b8f684946e",
// "ad3811062b8db664f2342cbff1b491865310b74416dd7b901f14d980886821f8"],"pos":157}}
// // {"id":38,"jsonrpc":"2.0",
// "result":{"block_height":437303,"merkle":[
// "d29769df672657689fd6d293b416ee9211c77fbe243ab7820813f327b0e8dd47",
// "d71f0947b47cab0f64948acfe52d41c293f492fe9627690c330d4004f2852ce4",
// "5f36c4330c727d7c8d98cc906cb286f13a61b5b4cab2124c5d041897834b42d8",
// "e77d181f83355ed38d0e6305fdb87c9637373fd90d1dfb911262ac55d260181e",
// "a8f83ca44dc486d9d45c4cff9567839c254bda96e6960d310a5e471c70c6a95b",
// "e9a5ff7f74cb060b451ed2cd27de038efff4df911f4e0f99e2661b46ebcc7e1c",
// "6b0144095e3f0e0d0551cbaa6c5dfc89387024f836528281b6d290e356e196cf",
// "bb0761b0636ffd387e0ce322289a3579e926b6813e090130a88228bd80cff982",
// "ac327124304cccf6739da308a25bb365a6b63e9344bad2be139b0b02c042567c",
// "42e11f2d67050cd31295f85507ebc7706fc4c1fddf1e5a45b98ae3f7c63d2592",
// "52657042fcfc88067524bf6c5f9a66414c7de4f4fcabcb65bca56fa84cf309b4"],"pos":6}}

View File

@ -1,162 +1,87 @@
use prometheus::{self, Encoder, IntGauge};
use std::fs;
use std::io;
use std::net::SocketAddr;
use std::thread;
use std::time::Duration;
pub use prometheus::{
GaugeVec, Histogram, HistogramOpts, HistogramTimer, HistogramVec, IntCounter as Counter,
IntCounterVec as CounterVec, IntGauge as Gauge, Opts as MetricOpts,
use anyhow::{Context, Result};
use hyper::server::{Handler, Listening, Request, Response, Server};
use prometheus::{
self, process_collector::ProcessCollector, Encoder, HistogramOpts, HistogramVec, Registry,
};
use crate::errors::*;
use crate::util::spawn_thread;
use std::net::SocketAddr;
pub struct Metrics {
reg: prometheus::Registry,
addr: SocketAddr,
reg: Registry,
listen: Listening,
}
impl Drop for Metrics {
fn drop(&mut self) {
debug!("closing Prometheus server");
if let Err(e) = self.listen.close() {
warn!("failed to stop Prometheus server: {}", e);
}
}
}
#[derive(Clone)]
pub struct Histogram {
hist: HistogramVec,
}
impl Histogram {
pub fn observe_size(&self, label: &str, value: usize) {
self.hist.with_label_values(&[label]).observe(value as f64);
}
pub fn observe_duration<F, T>(&self, label: &str, func: F) -> T
where
F: FnOnce() -> T,
{
self.hist
.with_label_values(&[label])
.observe_closure_duration(func)
}
}
struct RegistryHandler {
reg: Registry,
}
impl RegistryHandler {
fn gather(&self) -> Result<Vec<u8>> {
let mut buffer = vec![];
prometheus::TextEncoder::new()
.encode(&self.reg.gather(), &mut buffer)
.context("failed to encode metrics")?;
Ok(buffer)
}
}
impl Handler for RegistryHandler {
fn handle(&self, req: Request, res: Response) {
trace!("{} {}", req.method, req.uri);
let buffer = self.gather().expect("failed to gather metrics");
res.send(&buffer).expect("send failed");
}
}
impl Metrics {
pub fn new(addr: SocketAddr) -> Metrics {
Metrics {
reg: prometheus::Registry::new(),
addr,
}
pub fn new(addr: SocketAddr) -> Result<Self> {
let reg = Registry::new();
reg.register(Box::new(ProcessCollector::for_self()))
.expect("failed to register ProcessCollector");
let listen = Server::http(addr)?
.handle(RegistryHandler { reg: reg.clone() })
.with_context(|| format!("failed to serve on {}", addr))?;
info!("serving Prometheus metrics on {}", addr);
Ok(Self { reg, listen })
}
pub fn counter(&self, opts: prometheus::Opts) -> Counter {
let c = Counter::with_opts(opts).unwrap();
self.reg.register(Box::new(c.clone())).unwrap();
c
}
pub fn counter_vec(&self, opts: prometheus::Opts, labels: &[&str]) -> CounterVec {
let c = CounterVec::new(opts, labels).unwrap();
self.reg.register(Box::new(c.clone())).unwrap();
c
}
pub fn gauge(&self, opts: prometheus::Opts) -> Gauge {
let g = Gauge::with_opts(opts).unwrap();
self.reg.register(Box::new(g.clone())).unwrap();
g
}
pub fn gauge_vec(&self, opts: prometheus::Opts, labels: &[&str]) -> GaugeVec {
let g = GaugeVec::new(opts, labels).unwrap();
self.reg.register(Box::new(g.clone())).unwrap();
g
}
pub fn gauge_int(&self, opts: prometheus::Opts) -> IntGauge {
let g = Gauge::with_opts(opts).unwrap();
self.reg.register(Box::new(g.clone())).unwrap();
g
}
pub fn histogram(&self, opts: prometheus::HistogramOpts) -> Histogram {
let h = Histogram::with_opts(opts).unwrap();
self.reg.register(Box::new(h.clone())).unwrap();
h
}
pub fn histogram_vec(&self, opts: prometheus::HistogramOpts, labels: &[&str]) -> HistogramVec {
let h = HistogramVec::new(opts, labels).unwrap();
self.reg.register(Box::new(h.clone())).unwrap();
h
}
pub fn start(&self) {
let server = tiny_http::Server::http(self.addr).unwrap_or_else(|e| {
panic!(
"failed to start monitoring HTTP server at {}: {}",
self.addr, e
)
});
start_process_exporter(&self);
let reg = self.reg.clone();
spawn_thread("metrics", move || loop {
if let Err(e) = handle_request(&reg, server.recv()) {
error!("http error: {}", e);
}
});
pub fn histogram_vec(&self, name: &str, desc: &str, labels: &[&str]) -> Histogram {
let opts = HistogramOpts::new(name, desc);
let hist = HistogramVec::new(opts, labels).unwrap();
self.reg
.register(Box::new(hist.clone()))
.expect("failed to register Histogram");
Histogram { hist }
}
}
fn handle_request(
reg: &prometheus::Registry,
request: io::Result<tiny_http::Request>,
) -> io::Result<()> {
let request = request?;
let mut buffer = vec![];
prometheus::TextEncoder::new()
.encode(&reg.gather(), &mut buffer)
.unwrap();
let response = tiny_http::Response::from_data(buffer);
request.respond(response)
}
struct Stats {
utime: f64,
rss: u64,
fds: usize,
}
fn parse_stats() -> Result<Stats> {
let value =
fs::read_to_string("/proc/self/stat").chain_err(|| "failed to read /proc/self/stat")?;
let parts: Vec<&str> = value.split_whitespace().collect();
let page_size = page_size::get() as u64;
let ticks_per_second = sysconf::raw::sysconf(sysconf::raw::SysconfVariable::ScClkTck)
.expect("failed to get _SC_CLK_TCK") as f64;
let parse_part = |index: usize, name: &str| -> Result<u64> {
Ok(parts
.get(index)
.chain_err(|| format!("missing {}: {:?}", name, parts))?
.parse::<u64>()
.chain_err(|| format!("invalid {}: {:?}", name, parts))?)
};
// For details, see '/proc/[pid]/stat' section at `man 5 proc`:
let utime = parse_part(13, "utime")? as f64 / ticks_per_second;
let rss = parse_part(23, "rss")? * page_size;
let fds = fs::read_dir("/proc/self/fd")
.chain_err(|| "failed to read /proc/self/fd directory")?
.count();
Ok(Stats { utime, rss, fds })
}
fn start_process_exporter(metrics: &Metrics) {
let rss = metrics.gauge(MetricOpts::new(
"electrs_process_memory_rss",
"Resident memory size [bytes]",
));
let cpu = metrics.gauge_vec(
MetricOpts::new(
"electrs_process_cpu_usage",
"CPU usage by this process [seconds]",
),
&["type"],
);
let fds = metrics.gauge(MetricOpts::new(
"electrs_process_open_fds",
"# of file descriptors",
));
spawn_thread("exporter", move || loop {
match parse_stats() {
Ok(stats) => {
cpu.with_label_values(&["utime"]).set(stats.utime as f64);
rss.set(stats.rss as i64);
fds.set(stats.fds as i64);
}
Err(e) => {
warn!("failed to export process stats: {}", e);
return;
}
}
thread::sleep(Duration::from_secs(5));
});
}

View File

@ -1,70 +0,0 @@
// TODO: network::socket::Socket needs to be reimplemented.
use bitcoin::network::constants::Network;
use bitcoin::network::message::NetworkMessage;
use bitcoin::network::message_blockdata::InvType;
use bitcoin::network::socket::Socket;
use bitcoin::hash_types::Txid;
use bitcoin::util::Error;
use std::sync::mpsc::Sender;
use std::thread;
use std::time::Duration;
use crate::util;
fn connect() -> Result<Socket, Error> {
let mut sock = Socket::new(Network::Bitcoin);
sock.connect("127.0.0.1", 8333)?;
Ok(sock)
}
fn handle(mut sock: Socket, tx: Sender<Txid>) {
let mut outgoing = vec![sock.version_message(0).unwrap()];
loop {
for msg in outgoing.split_off(0) {
trace!("send {:?}", msg);
if let Err(e) = sock.send_message(msg.clone()) {
warn!("failed to connect to node: {}", e);
break;
}
}
// Receive new message
let msg = match sock.receive_message() {
Ok(msg) => msg,
Err(e) => {
warn!("failed to receive p2p message: {}", e);
break;
}
};
trace!("recv {:?}", msg);
match msg {
NetworkMessage::Alert(_) => continue, // deprecated
NetworkMessage::Version(_) => outgoing.push(NetworkMessage::Verack),
NetworkMessage::Ping(nonce) => outgoing.push(NetworkMessage::Pong(nonce)),
NetworkMessage::Inv(ref inventory) => {
inventory
.iter()
.filter(|inv| inv.inv_type == InvType::Block)
.for_each(|inv| tx.send(inv.hash).expect("failed to send message"));
}
_ => (),
};
}
}
pub fn run() -> util::Channel<Txid> {
let chan = util::Channel::new();
let tx = chan.sender();
util::spawn_thread("p2p", move || loop {
// TODO: support testnet and regtest as well.
match connect() {
Ok(sock) => handle(sock, tx.clone()),
Err(e) => warn!("p2p error: {}", e),
}
thread::sleep(Duration::from_secs(3));
});
chan
}

View File

@ -1,580 +0,0 @@
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::consensus::encode::deserialize;
use bitcoin::hash_types::{BlockHash, TxMerkleNode, Txid};
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hashes::hex::ToHex;
use bitcoin::hashes::Hash;
use crypto::digest::Digest;
use crypto::sha2::Sha256;
use serde_json::Value;
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use crate::app::App;
use crate::cache::TransactionCache;
use crate::errors::*;
use crate::index::{compute_script_hash, TxInRow, TxOutRow, TxRow};
use crate::mempool::Tracker;
use crate::metrics::{HistogramOpts, HistogramVec, Metrics};
use crate::store::{ReadStore, Row};
use crate::util::{FullHash, HashPrefix, HeaderEntry};
pub struct FundingOutput {
pub txn_id: Txid,
pub height: u32,
pub output_index: usize,
pub value: u64,
}
type OutPoint = (Txid, usize); // (txid, output_index)
struct SpendingInput {
txn_id: Txid,
height: u32,
funding_output: OutPoint,
value: u64,
}
pub struct Status {
confirmed: (Vec<FundingOutput>, Vec<SpendingInput>),
mempool: (Vec<FundingOutput>, Vec<SpendingInput>),
txn_fees: HashMap<Txid, u64>,
}
fn calc_balance((funding, spending): &(Vec<FundingOutput>, Vec<SpendingInput>)) -> i64 {
let funded: u64 = funding.iter().map(|output| output.value).sum();
let spent: u64 = spending.iter().map(|input| input.value).sum();
funded as i64 - spent as i64
}
pub struct HistoryItem {
height: i32,
tx_hash: Txid,
fee: Option<u64>, // need to be set only for unconfirmed transactions (i.e. height <= 0)
}
impl HistoryItem {
pub fn to_json(&self) -> Value {
let mut result = json!({ "height": self.height, "tx_hash": self.tx_hash.to_hex()});
self.fee.map(|f| {
result
.as_object_mut()
.unwrap()
.insert("fee".to_string(), json!(f))
});
result
}
}
impl Status {
fn funding(&self) -> impl Iterator<Item = &FundingOutput> {
self.confirmed.0.iter().chain(self.mempool.0.iter())
}
fn spending(&self) -> impl Iterator<Item = &SpendingInput> {
self.confirmed.1.iter().chain(self.mempool.1.iter())
}
pub fn confirmed_balance(&self) -> i64 {
calc_balance(&self.confirmed)
}
pub fn mempool_balance(&self) -> i64 {
calc_balance(&self.mempool)
}
pub fn history(&self) -> Vec<HistoryItem> {
let mut txns_map = HashMap::<Txid, i32>::new();
for f in self.funding() {
txns_map.insert(f.txn_id, f.height as i32);
}
for s in self.spending() {
txns_map.insert(s.txn_id, s.height as i32);
}
let mut items: Vec<HistoryItem> = txns_map
.into_iter()
.map(|item| HistoryItem {
height: item.1,
tx_hash: item.0,
fee: self.txn_fees.get(&item.0).cloned(),
})
.collect();
items.sort_unstable_by_key(|item| item.height);
items
}
pub fn unspent(&self) -> Vec<&FundingOutput> {
let mut outputs_map = HashMap::<OutPoint, &FundingOutput>::new();
for f in self.funding() {
outputs_map.insert((f.txn_id, f.output_index), f);
}
for s in self.spending() {
if outputs_map.remove(&s.funding_output).is_none() {
warn!("failed to remove {:?}", s.funding_output);
}
}
let mut outputs = outputs_map
.into_iter()
.map(|item| item.1) // a reference to unspent output
.collect::<Vec<&FundingOutput>>();
outputs.sort_unstable_by_key(|out| out.height);
outputs
}
pub fn hash(&self) -> Option<FullHash> {
let txns = self.history();
if txns.is_empty() {
None
} else {
let mut hash = FullHash::default();
let mut sha2 = Sha256::new();
for item in txns {
let part = format!("{}:{}:", item.tx_hash.to_hex(), item.height);
sha2.input(part.as_bytes());
}
sha2.result(&mut hash);
Some(hash)
}
}
}
struct TxnHeight {
txn: Transaction,
height: u32,
}
fn merklize<T: Hash>(left: T, right: T) -> T {
let data = [&left[..], &right[..]].concat();
<T as Hash>::hash(&data)
}
fn create_merkle_branch_and_root<T: Hash>(mut hashes: Vec<T>, mut index: usize) -> (Vec<T>, T) {
let mut merkle = vec![];
while hashes.len() > 1 {
if hashes.len() % 2 != 0 {
let last = *hashes.last().unwrap();
hashes.push(last);
}
index = if index % 2 == 0 { index + 1 } else { index - 1 };
merkle.push(hashes[index]);
index /= 2;
hashes = hashes
.chunks(2)
.map(|pair| merklize(pair[0], pair[1]))
.collect()
}
(merkle, hashes[0])
}
// TODO: the functions below can be part of ReadStore.
fn txrow_by_txid(store: &dyn ReadStore, txid: &Txid) -> Option<TxRow> {
let key = TxRow::filter_full(&txid);
let value = store.get(&key)?;
Some(TxRow::from_row(&Row { key, value }))
}
fn txrows_by_prefix(store: &dyn ReadStore, txid_prefix: HashPrefix) -> Vec<TxRow> {
store
.scan(&TxRow::filter_prefix(txid_prefix))
.iter()
.map(|row| TxRow::from_row(row))
.collect()
}
fn txids_by_script_hash(store: &dyn ReadStore, script_hash: &[u8]) -> Vec<HashPrefix> {
store
.scan(&TxOutRow::filter(script_hash))
.iter()
.map(|row| TxOutRow::from_row(row).txid_prefix)
.collect()
}
fn txids_by_funding_output(
store: &dyn ReadStore,
txn_id: &Txid,
output_index: usize,
) -> Vec<HashPrefix> {
store
.scan(&TxInRow::filter(&txn_id, output_index))
.iter()
.map(|row| TxInRow::from_row(row).txid_prefix)
.collect()
}
pub struct Query {
app: Arc<App>,
tracker: RwLock<Tracker>,
tx_cache: TransactionCache,
txid_limit: usize,
duration: HistogramVec,
}
impl Query {
pub fn new(
app: Arc<App>,
metrics: &Metrics,
tx_cache: TransactionCache,
txid_limit: usize,
) -> Arc<Query> {
Arc::new(Query {
app,
tracker: RwLock::new(Tracker::new(metrics)),
tx_cache,
txid_limit,
duration: metrics.histogram_vec(
HistogramOpts::new("electrs_query_duration", "Request duration (in seconds)"),
&["type"],
),
})
}
fn load_txns_by_prefix(
&self,
store: &dyn ReadStore,
prefixes: Vec<HashPrefix>,
) -> Result<Vec<TxnHeight>> {
let mut txns = vec![];
for txid_prefix in prefixes {
for tx_row in txrows_by_prefix(store, txid_prefix) {
let txid: Txid = deserialize(&tx_row.key.txid).unwrap();
let txn = self.load_txn(&txid, Some(tx_row.height))?;
txns.push(TxnHeight {
txn,
height: tx_row.height,
})
}
}
Ok(txns)
}
fn find_spending_input(
&self,
store: &dyn ReadStore,
funding: &FundingOutput,
) -> Result<Option<SpendingInput>> {
let spending_txns: Vec<TxnHeight> = self.load_txns_by_prefix(
store,
txids_by_funding_output(store, &funding.txn_id, funding.output_index),
)?;
let mut spending_inputs = vec![];
for t in &spending_txns {
for input in t.txn.input.iter() {
if input.previous_output.txid == funding.txn_id
&& input.previous_output.vout == funding.output_index as u32
{
spending_inputs.push(SpendingInput {
txn_id: t.txn.txid(),
height: t.height,
funding_output: (funding.txn_id, funding.output_index),
value: funding.value,
})
}
}
}
assert!(spending_inputs.len() <= 1);
Ok(if spending_inputs.len() == 1 {
Some(spending_inputs.remove(0))
} else {
None
})
}
fn find_funding_outputs(&self, t: &TxnHeight, script_hash: &[u8]) -> Vec<FundingOutput> {
let mut result = vec![];
let txn_id = t.txn.txid();
for (index, output) in t.txn.output.iter().enumerate() {
if compute_script_hash(&output.script_pubkey[..]) == script_hash {
result.push(FundingOutput {
txn_id,
height: t.height,
output_index: index,
value: output.value,
})
}
}
result
}
fn confirmed_status(
&self,
script_hash: &[u8],
) -> Result<(Vec<FundingOutput>, Vec<SpendingInput>)> {
let mut funding = vec![];
let mut spending = vec![];
let read_store = self.app.read_store();
let txid_prefixes = txids_by_script_hash(read_store, script_hash);
// if the limit is enabled
if self.txid_limit > 0 && txid_prefixes.len() > self.txid_limit {
bail!(
"{}+ transactions found, query may take a long time",
txid_prefixes.len()
);
}
for t in self.load_txns_by_prefix(read_store, txid_prefixes)? {
funding.extend(self.find_funding_outputs(&t, script_hash));
}
for funding_output in &funding {
if let Some(spent) = self.find_spending_input(read_store, &funding_output)? {
spending.push(spent);
}
}
Ok((funding, spending))
}
fn mempool_status(
&self,
script_hash: &[u8],
confirmed_funding: &[FundingOutput],
tracker: &Tracker,
) -> Result<(Vec<FundingOutput>, Vec<SpendingInput>)> {
let mut funding = vec![];
let mut spending = vec![];
let txid_prefixes = txids_by_script_hash(tracker.index(), script_hash);
for t in self.load_txns_by_prefix(tracker.index(), txid_prefixes)? {
funding.extend(self.find_funding_outputs(&t, script_hash));
}
// // TODO: dedup outputs (somehow) both confirmed and in mempool (e.g. reorg?)
for funding_output in funding.iter().chain(confirmed_funding.iter()) {
if let Some(spent) = self.find_spending_input(tracker.index(), &funding_output)? {
spending.push(spent);
}
}
Ok((funding, spending))
}
pub fn status(&self, script_hash: &[u8]) -> Result<Status> {
let timer = self
.duration
.with_label_values(&["confirmed_status"])
.start_timer();
let confirmed = self
.confirmed_status(script_hash)
.chain_err(|| "failed to get confirmed status")?;
timer.observe_duration();
let tracker = self.tracker.read().unwrap();
let timer = self
.duration
.with_label_values(&["mempool_status"])
.start_timer();
let mempool = self
.mempool_status(script_hash, &confirmed.0, &tracker)
.chain_err(|| "failed to get mempool status")?;
timer.observe_duration();
let mut txn_fees = HashMap::new();
let funding_txn_ids = mempool.0.iter().map(|funding| funding.txn_id);
let spending_txn_ids = mempool.1.iter().map(|spending| spending.txn_id);
for mempool_txid in funding_txn_ids.chain(spending_txn_ids) {
tracker
.get_fee(&mempool_txid)
.map(|fee| txn_fees.insert(mempool_txid, fee));
}
Ok(Status {
confirmed,
mempool,
txn_fees,
})
}
fn lookup_confirmed_blockhash(
&self,
tx_hash: &Txid,
block_height: Option<u32>,
) -> Result<Option<BlockHash>> {
let blockhash = if self.tracker.read().unwrap().has_txn(&tx_hash) {
None // found in mempool (as unconfirmed transaction)
} else {
// Lookup in confirmed transactions' index
let height = match block_height {
Some(height) => height,
None => {
txrow_by_txid(self.app.read_store(), &tx_hash)
.chain_err(|| format!("not indexed tx {}", tx_hash))?
.height
}
};
let header = self
.app
.index()
.get_header(height as usize)
.chain_err(|| format!("missing header at height {}", height))?;
Some(*header.hash())
};
Ok(blockhash)
}
// Internal API for transaction retrieval
fn load_txn(&self, txid: &Txid, block_height: Option<u32>) -> Result<Transaction> {
let _timer = self.duration.with_label_values(&["load_txn"]).start_timer();
self.tx_cache.get_or_else(&txid, || {
let blockhash = self.lookup_confirmed_blockhash(txid, block_height)?;
let value: Value = self
.app
.daemon()
.gettransaction_raw(txid, blockhash, /*verbose*/ false)?;
let value_hex: &str = value.as_str().chain_err(|| "non-string tx")?;
hex::decode(&value_hex).chain_err(|| "non-hex tx")
})
}
// Public API for transaction retrieval (for Electrum RPC)
pub fn get_transaction(&self, tx_hash: &Txid, verbose: bool) -> Result<Value> {
let _timer = self
.duration
.with_label_values(&["get_transaction"])
.start_timer();
let blockhash = self.lookup_confirmed_blockhash(tx_hash, /*block_height*/ None)?;
self.app
.daemon()
.gettransaction_raw(tx_hash, blockhash, verbose)
}
pub fn get_confirmed_blockhash(&self, tx_hash: &Txid) -> Result<Value> {
let blockhash = self.lookup_confirmed_blockhash(tx_hash, None)?;
Ok(json!({ "block_hash": blockhash }))
}
pub fn get_headers(&self, heights: &[usize]) -> Vec<HeaderEntry> {
let _timer = self
.duration
.with_label_values(&["get_headers"])
.start_timer();
let index = self.app.index();
heights
.iter()
.filter_map(|height| index.get_header(*height))
.collect()
}
pub fn get_best_header(&self) -> Result<HeaderEntry> {
let last_header = self.app.index().best_header();
Ok(last_header.chain_err(|| "no headers indexed")?)
}
pub fn get_merkle_proof(
&self,
tx_hash: &Txid,
height: usize,
) -> Result<(Vec<TxMerkleNode>, usize)> {
let header_entry = self
.app
.index()
.get_header(height)
.chain_err(|| format!("missing block #{}", height))?;
let txids = self.app.daemon().getblocktxids(&header_entry.hash())?;
let pos = txids
.iter()
.position(|txid| txid == tx_hash)
.chain_err(|| format!("missing txid {}", tx_hash))?;
let tx_nodes: Vec<TxMerkleNode> = txids
.into_iter()
.map(|txid| TxMerkleNode::from_inner(txid.into_inner()))
.collect();
let (branch, _root) = create_merkle_branch_and_root(tx_nodes, pos);
Ok((branch, pos))
}
pub fn get_header_merkle_proof(
&self,
height: usize,
cp_height: usize,
) -> Result<(Vec<Sha256dHash>, Sha256dHash)> {
if cp_height < height {
bail!("cp_height #{} < height #{}", cp_height, height);
}
let best_height = self.get_best_header()?.height();
if best_height < cp_height {
bail!(
"cp_height #{} above best block height #{}",
cp_height,
best_height
);
}
let heights: Vec<usize> = (0..=cp_height).collect();
let header_hashes: Vec<BlockHash> = self
.get_headers(&heights)
.into_iter()
.map(|h| *h.hash())
.collect();
let merkle_nodes: Vec<Sha256dHash> = header_hashes
.iter()
.map(|block_hash| Sha256dHash::from_inner(block_hash.into_inner()))
.collect();
assert_eq!(header_hashes.len(), heights.len());
Ok(create_merkle_branch_and_root(merkle_nodes, height))
}
pub fn get_id_from_pos(
&self,
height: usize,
tx_pos: usize,
want_merkle: bool,
) -> Result<(Txid, Vec<TxMerkleNode>)> {
let header_entry = self
.app
.index()
.get_header(height)
.chain_err(|| format!("missing block #{}", height))?;
let txids = self.app.daemon().getblocktxids(header_entry.hash())?;
let txid = *txids
.get(tx_pos)
.chain_err(|| format!("No tx in position #{} in block #{}", tx_pos, height))?;
let tx_nodes = txids
.into_iter()
.map(|txid| TxMerkleNode::from_inner(txid.into_inner()))
.collect();
let branch = if want_merkle {
create_merkle_branch_and_root(tx_nodes, tx_pos).0
} else {
vec![]
};
Ok((txid, branch))
}
pub fn broadcast(&self, txn: &Transaction) -> Result<Txid> {
self.app.daemon().broadcast(txn)
}
pub fn update_mempool(&self) -> Result<()> {
let _timer = self
.duration
.with_label_values(&["update_mempool"])
.start_timer();
self.tracker.write().unwrap().update(self.app.daemon())
}
/// Returns [vsize, fee_rate] pairs (measured in vbytes and satoshis).
pub fn get_fee_histogram(&self) -> Vec<(f32, u32)> {
self.tracker.read().unwrap().fee_histogram().clone()
}
// Fee rate [BTC/kB] to be confirmed in `blocks` from now.
pub fn estimate_fee(&self, blocks: usize) -> f64 {
let mut total_vsize = 0u32;
let mut last_fee_rate = 0.0;
let blocks_in_vbytes = (blocks * 1_000_000) as u32; // assume ~1MB blocks
for (fee_rate, vsize) in self.tracker.read().unwrap().fee_histogram() {
last_fee_rate = *fee_rate;
total_vsize += vsize;
if total_vsize >= blocks_in_vbytes {
break; // under-estimate the fee rate a bit
}
}
(last_fee_rate as f64) * 1e-5 // [BTC/kB] = 10^5 [sat/B]
}
pub fn get_banner(&self) -> Result<String> {
self.app.get_banner()
}
pub fn get_relayfee(&self) -> Result<f64> {
self.app.daemon().get_relayfee()
}
}

View File

@ -1,636 +0,0 @@
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::consensus::encode::{deserialize, serialize};
use bitcoin::hashes::hex::{FromHex, ToHex};
use bitcoin::hashes::{sha256d::Hash as Sha256dHash, Hash};
use error_chain::ChainedError;
use serde_json::{from_str, Value};
use std::collections::HashMap;
use std::io::{BufRead, BufReader, Write};
use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream};
use std::sync::mpsc::{self, Receiver, Sender, SyncSender, TrySendError};
use std::sync::{Arc, Mutex};
use std::thread;
use crate::errors::*;
use crate::metrics::{Gauge, HistogramOpts, HistogramVec, MetricOpts, Metrics};
use crate::query::{Query, Status};
use crate::util::{spawn_thread, Channel, HeaderEntry};
const ELECTRS_VERSION: &str = env!("CARGO_PKG_VERSION");
const PROTOCOL_VERSION: &str = "1.4";
// TODO: Sha256dHash should be a generic hash-container (since script hash is single SHA256)
fn hash_from_value<T: Hash>(val: Option<&Value>) -> Result<T> {
let script_hash = val.chain_err(|| "missing hash")?;
let script_hash = script_hash.as_str().chain_err(|| "non-string hash")?;
let script_hash = T::from_hex(script_hash).chain_err(|| "non-hex hash")?;
Ok(script_hash)
}
fn usize_from_value(val: Option<&Value>, name: &str) -> Result<usize> {
let val = val.chain_err(|| format!("missing {}", name))?;
let val = val.as_u64().chain_err(|| format!("non-integer {}", name))?;
Ok(val as usize)
}
fn usize_from_value_or(val: Option<&Value>, name: &str, default: usize) -> Result<usize> {
if val.is_none() {
return Ok(default);
}
usize_from_value(val, name)
}
fn bool_from_value(val: Option<&Value>, name: &str) -> Result<bool> {
let val = val.chain_err(|| format!("missing {}", name))?;
let val = val.as_bool().chain_err(|| format!("not a bool {}", name))?;
Ok(val)
}
fn bool_from_value_or(val: Option<&Value>, name: &str, default: bool) -> Result<bool> {
if val.is_none() {
return Ok(default);
}
bool_from_value(val, name)
}
fn unspent_from_status(status: &Status) -> Value {
json!(Value::Array(
status
.unspent()
.into_iter()
.map(|out| json!({
"height": out.height,
"tx_pos": out.output_index,
"tx_hash": out.txn_id.to_hex(),
"value": out.value,
}))
.collect()
))
}
struct Connection {
query: Arc<Query>,
last_header_entry: Option<HeaderEntry>,
status_hashes: HashMap<Sha256dHash, Value>, // ScriptHash -> StatusHash
stream: TcpStream,
addr: SocketAddr,
sender: SyncSender<Message>,
stats: Arc<Stats>,
relayfee: f64,
}
impl Connection {
pub fn new(
query: Arc<Query>,
stream: TcpStream,
addr: SocketAddr,
stats: Arc<Stats>,
relayfee: f64,
sender: SyncSender<Message>,
) -> Connection {
Connection {
query,
last_header_entry: None, // disable header subscription for now
status_hashes: HashMap::new(),
stream,
addr,
sender,
stats,
relayfee,
}
}
fn blockchain_headers_subscribe(&mut self) -> Result<Value> {
let entry = self.query.get_best_header()?;
let hex_header = hex::encode(serialize(entry.header()));
let result = json!({"hex": hex_header, "height": entry.height()});
self.last_header_entry = Some(entry);
Ok(result)
}
fn server_version(&self) -> Result<Value> {
Ok(json!([
format!("electrs {}", ELECTRS_VERSION),
PROTOCOL_VERSION
]))
}
fn server_banner(&self) -> Result<Value> {
Ok(json!(self.query.get_banner()?))
}
fn server_donation_address(&self) -> Result<Value> {
Ok(Value::Null)
}
fn server_peers_subscribe(&self) -> Result<Value> {
Ok(json!([]))
}
fn mempool_get_fee_histogram(&self) -> Result<Value> {
Ok(json!(self.query.get_fee_histogram()))
}
fn blockchain_block_header(&self, params: &[Value]) -> Result<Value> {
let height = usize_from_value(params.get(0), "height")?;
let cp_height = usize_from_value_or(params.get(1), "cp_height", 0)?;
let raw_header_hex: String = self
.query
.get_headers(&[height])
.into_iter()
.map(|entry| hex::encode(&serialize(entry.header())))
.collect();
if cp_height == 0 {
return Ok(json!(raw_header_hex));
}
let (branch, root) = self.query.get_header_merkle_proof(height, cp_height)?;
let branch_vec: Vec<String> = branch.into_iter().map(|b| b.to_hex()).collect();
Ok(json!({
"header": raw_header_hex,
"root": root.to_hex(),
"branch": branch_vec
}))
}
fn blockchain_block_headers(&self, params: &[Value]) -> Result<Value> {
let start_height = usize_from_value(params.get(0), "start_height")?;
let count = usize_from_value(params.get(1), "count")?;
let cp_height = usize_from_value_or(params.get(2), "cp_height", 0)?;
let heights: Vec<usize> = (start_height..(start_height + count)).collect();
let headers: Vec<String> = self
.query
.get_headers(&heights)
.into_iter()
.map(|entry| hex::encode(&serialize(entry.header())))
.collect();
if count == 0 || cp_height == 0 {
return Ok(json!({
"count": headers.len(),
"hex": headers.join(""),
"max": 2016,
}));
}
let (branch, root) = self
.query
.get_header_merkle_proof(start_height + (count - 1), cp_height)?;
let branch_vec: Vec<String> = branch.into_iter().map(|b| b.to_hex()).collect();
Ok(json!({
"count": headers.len(),
"hex": headers.join(""),
"max": 2016,
"root": root.to_hex(),
"branch" : branch_vec
}))
}
fn blockchain_estimatefee(&self, params: &[Value]) -> Result<Value> {
let blocks_count = usize_from_value(params.get(0), "blocks_count")?;
let fee_rate = self.query.estimate_fee(blocks_count); // in BTC/kB
Ok(json!(fee_rate.max(self.relayfee)))
}
fn blockchain_relayfee(&self) -> Result<Value> {
Ok(json!(self.relayfee)) // in BTC/kB
}
fn blockchain_scripthash_subscribe(&mut self, params: &[Value]) -> Result<Value> {
let script_hash =
hash_from_value::<Sha256dHash>(params.get(0)).chain_err(|| "bad script_hash")?;
let status = self.query.status(&script_hash[..])?;
let result = status.hash().map_or(Value::Null, |h| json!(hex::encode(h)));
if self
.status_hashes
.insert(script_hash, result.clone())
.is_none()
{
self.stats.subscriptions.inc();
}
Ok(result)
}
fn blockchain_scripthash_get_balance(&self, params: &[Value]) -> Result<Value> {
let script_hash =
hash_from_value::<Sha256dHash>(params.get(0)).chain_err(|| "bad script_hash")?;
let status = self.query.status(&script_hash[..])?;
Ok(
json!({ "confirmed": status.confirmed_balance(), "unconfirmed": status.mempool_balance() }),
)
}
fn blockchain_scripthash_get_history(&self, params: &[Value]) -> Result<Value> {
let script_hash =
hash_from_value::<Sha256dHash>(params.get(0)).chain_err(|| "bad script_hash")?;
let status = self.query.status(&script_hash[..])?;
Ok(json!(Value::Array(
status
.history()
.into_iter()
.map(|item| item.to_json())
.collect()
)))
}
fn blockchain_scripthash_listunspent(&self, params: &[Value]) -> Result<Value> {
let script_hash =
hash_from_value::<Sha256dHash>(params.get(0)).chain_err(|| "bad script_hash")?;
Ok(unspent_from_status(&self.query.status(&script_hash[..])?))
}
fn blockchain_transaction_broadcast(&self, params: &[Value]) -> Result<Value> {
let tx = params.get(0).chain_err(|| "missing tx")?;
let tx = tx.as_str().chain_err(|| "non-string tx")?;
let tx = hex::decode(&tx).chain_err(|| "non-hex tx")?;
let tx: Transaction = deserialize(&tx).chain_err(|| "failed to parse tx")?;
let txid = self.query.broadcast(&tx)?;
self.query.update_mempool()?;
if let Err(e) = self.sender.try_send(Message::PeriodicUpdate) {
warn!("failed to issue PeriodicUpdate after broadcast: {}", e);
}
Ok(json!(txid.to_hex()))
}
fn blockchain_transaction_get(&self, params: &[Value]) -> Result<Value> {
let tx_hash = hash_from_value(params.get(0)).chain_err(|| "bad tx_hash")?;
let verbose = match params.get(1) {
Some(value) => value.as_bool().chain_err(|| "non-bool verbose value")?,
None => false,
};
Ok(self.query.get_transaction(&tx_hash, verbose)?)
}
fn blockchain_transaction_get_confirmed_blockhash(&self, params: &[Value]) -> Result<Value> {
let tx_hash = hash_from_value(params.get(0)).chain_err(|| "bad tx_hash")?;
self.query.get_confirmed_blockhash(&tx_hash)
}
fn blockchain_transaction_get_merkle(&self, params: &[Value]) -> Result<Value> {
let tx_hash = hash_from_value(params.get(0)).chain_err(|| "bad tx_hash")?;
let height = usize_from_value(params.get(1), "height")?;
let (merkle, pos) = self
.query
.get_merkle_proof(&tx_hash, height)
.chain_err(|| "cannot create merkle proof")?;
let merkle: Vec<String> = merkle.into_iter().map(|txid| txid.to_hex()).collect();
Ok(json!({
"block_height": height,
"merkle": merkle,
"pos": pos}))
}
fn blockchain_transaction_id_from_pos(&self, params: &[Value]) -> Result<Value> {
let height = usize_from_value(params.get(0), "height")?;
let tx_pos = usize_from_value(params.get(1), "tx_pos")?;
let want_merkle = bool_from_value_or(params.get(2), "merkle", false)?;
let (txid, merkle) = self.query.get_id_from_pos(height, tx_pos, want_merkle)?;
if !want_merkle {
return Ok(json!(txid.to_hex()));
}
let merkle_vec: Vec<String> = merkle.into_iter().map(|entry| entry.to_hex()).collect();
Ok(json!({
"tx_hash" : txid.to_hex(),
"merkle" : merkle_vec}))
}
fn handle_command(&mut self, method: &str, params: &[Value], id: &Value) -> Result<Value> {
let timer = self
.stats
.latency
.with_label_values(&[method])
.start_timer();
let result = match method {
"blockchain.block.header" => self.blockchain_block_header(&params),
"blockchain.block.headers" => self.blockchain_block_headers(&params),
"blockchain.estimatefee" => self.blockchain_estimatefee(&params),
"blockchain.headers.subscribe" => self.blockchain_headers_subscribe(),
"blockchain.relayfee" => self.blockchain_relayfee(),
"blockchain.scripthash.get_balance" => self.blockchain_scripthash_get_balance(&params),
"blockchain.scripthash.get_history" => self.blockchain_scripthash_get_history(&params),
"blockchain.scripthash.listunspent" => self.blockchain_scripthash_listunspent(&params),
"blockchain.scripthash.subscribe" => self.blockchain_scripthash_subscribe(&params),
"blockchain.transaction.broadcast" => self.blockchain_transaction_broadcast(&params),
"blockchain.transaction.get" => self.blockchain_transaction_get(&params),
"blockchain.transaction.get_merkle" => self.blockchain_transaction_get_merkle(&params),
"blockchain.transaction.get_confirmed_blockhash" => self.blockchain_transaction_get_confirmed_blockhash(&params),
"blockchain.transaction.id_from_pos" => {
self.blockchain_transaction_id_from_pos(&params)
}
"mempool.get_fee_histogram" => self.mempool_get_fee_histogram(),
"server.banner" => self.server_banner(),
"server.donation_address" => self.server_donation_address(),
"server.peers.subscribe" => self.server_peers_subscribe(),
"server.ping" => Ok(Value::Null),
"server.version" => self.server_version(),
&_ => bail!("unknown method {} {:?}", method, params),
};
timer.observe_duration();
// TODO: return application errors should be sent to the client
Ok(match result {
Ok(result) => json!({"jsonrpc": "2.0", "id": id, "result": result}),
Err(e) => {
warn!(
"rpc #{} {} {:?} failed: {}",
id,
method,
params,
e.display_chain()
);
json!({"jsonrpc": "2.0", "id": id, "error": format!("{}", e)})
}
})
}
fn update_subscriptions(&mut self) -> Result<Vec<Value>> {
let timer = self
.stats
.latency
.with_label_values(&["periodic_update"])
.start_timer();
let mut result = vec![];
if let Some(ref mut last_entry) = self.last_header_entry {
let entry = self.query.get_best_header()?;
if *last_entry != entry {
*last_entry = entry;
let hex_header = hex::encode(serialize(last_entry.header()));
let header = json!({"hex": hex_header, "height": last_entry.height()});
result.push(json!({
"jsonrpc": "2.0",
"method": "blockchain.headers.subscribe",
"params": [header]}));
}
}
for (script_hash, status_hash) in self.status_hashes.iter_mut() {
let status = self.query.status(&script_hash[..])?;
let new_status_hash = status.hash().map_or(Value::Null, |h| json!(hex::encode(h)));
if new_status_hash == *status_hash {
continue;
}
result.push(json!({
"jsonrpc": "2.0",
"method": "blockchain.scripthash.subscribe",
"params": [script_hash.to_hex(), new_status_hash]}));
*status_hash = new_status_hash;
}
timer.observe_duration();
Ok(result)
}
fn send_values(&mut self, values: &[Value]) -> Result<()> {
for value in values {
let line = value.to_string() + "\n";
self.stream
.write_all(line.as_bytes())
.chain_err(|| format!("failed to send {}", value))?;
}
Ok(())
}
fn handle_replies(&mut self, receiver: Receiver<Message>) -> Result<()> {
let empty_params = json!([]);
loop {
let msg = receiver.recv().chain_err(|| "channel closed")?;
trace!("RPC {:?}", msg);
match msg {
Message::Request(line) => {
let cmd: Value = from_str(&line).chain_err(|| "invalid JSON format")?;
let reply = match (
cmd.get("method"),
cmd.get("params").unwrap_or_else(|| &empty_params),
cmd.get("id"),
) {
(
Some(&Value::String(ref method)),
&Value::Array(ref params),
Some(ref id),
) => self.handle_command(method, params, id)?,
_ => bail!("invalid command: {}", cmd),
};
self.send_values(&[reply])?
}
Message::PeriodicUpdate => {
let values = self
.update_subscriptions()
.chain_err(|| "failed to update subscriptions")?;
self.send_values(&values)?
}
Message::Done => return Ok(()),
}
}
}
fn parse_requests(mut reader: BufReader<TcpStream>, tx: SyncSender<Message>) -> Result<()> {
loop {
let mut line = Vec::<u8>::new();
reader
.read_until(b'\n', &mut line)
.chain_err(|| "failed to read a request")?;
if line.is_empty() {
tx.send(Message::Done).chain_err(|| "channel closed")?;
return Ok(());
} else {
if line.starts_with(&[22, 3, 1]) {
// (very) naive SSL handshake detection
let _ = tx.send(Message::Done);
bail!("invalid request - maybe SSL-encrypted data?: {:?}", line)
}
match String::from_utf8(line) {
Ok(req) => tx
.send(Message::Request(req))
.chain_err(|| "channel closed")?,
Err(err) => {
let _ = tx.send(Message::Done);
bail!("invalid UTF8: {}", err)
}
}
}
}
}
pub fn run(mut self, receiver: Receiver<Message>) {
let reader = BufReader::new(self.stream.try_clone().expect("failed to clone TcpStream"));
let sender = self.sender.clone();
let child = spawn_thread("reader", || Connection::parse_requests(reader, sender));
if let Err(e) = self.handle_replies(receiver) {
error!(
"[{}] connection handling failed: {}",
self.addr,
e.display_chain().to_string()
);
}
self.stats
.subscriptions
.sub(self.status_hashes.len() as i64);
debug!("[{}] shutting down connection", self.addr);
let _ = self.stream.shutdown(Shutdown::Both);
if let Err(err) = child.join().expect("receiver panicked") {
error!("[{}] receiver failed: {}", self.addr, err);
}
}
}
#[derive(Debug)]
pub enum Message {
Request(String),
PeriodicUpdate,
Done,
}
pub enum Notification {
Periodic,
Exit,
}
pub struct RPC {
notification: Sender<Notification>,
server: Option<thread::JoinHandle<()>>, // so we can join the server while dropping this ojbect
}
struct Stats {
latency: HistogramVec,
subscriptions: Gauge,
}
impl RPC {
fn start_notifier(
notification: Channel<Notification>,
senders: Arc<Mutex<Vec<SyncSender<Message>>>>,
acceptor: Sender<Option<(TcpStream, SocketAddr)>>,
) {
spawn_thread("notification", move || {
for msg in notification.receiver().iter() {
let mut senders = senders.lock().unwrap();
match msg {
Notification::Periodic => {
senders.retain(|sender| {
if let Err(TrySendError::Disconnected(_)) =
sender.try_send(Message::PeriodicUpdate)
{
false // drop disconnected clients
} else {
true
}
})
}
Notification::Exit => acceptor.send(None).unwrap(), // mark acceptor as done
}
}
});
}
fn start_acceptor(addr: SocketAddr) -> Channel<Option<(TcpStream, SocketAddr)>> {
let chan = Channel::unbounded();
let acceptor = chan.sender();
spawn_thread("acceptor", move || {
let listener =
TcpListener::bind(addr).unwrap_or_else(|e| panic!("bind({}) failed: {}", addr, e));
info!(
"Electrum RPC server running on {} (protocol {})",
addr, PROTOCOL_VERSION
);
loop {
let (stream, addr) = listener.accept().expect("accept failed");
stream
.set_nonblocking(false)
.expect("failed to set connection as blocking");
acceptor.send(Some((stream, addr))).expect("send failed");
}
});
chan
}
pub fn start(addr: SocketAddr, query: Arc<Query>, metrics: &Metrics, relayfee: f64) -> RPC {
let stats = Arc::new(Stats {
latency: metrics.histogram_vec(
HistogramOpts::new("electrs_electrum_rpc", "Electrum RPC latency (seconds)"),
&["method"],
),
subscriptions: metrics.gauge(MetricOpts::new(
"electrs_electrum_subscriptions",
"# of Electrum subscriptions",
)),
});
stats.subscriptions.set(0);
let notification = Channel::unbounded();
RPC {
notification: notification.sender(),
server: Some(spawn_thread("rpc", move || {
let senders = Arc::new(Mutex::new(Vec::<SyncSender<Message>>::new()));
let acceptor = RPC::start_acceptor(addr);
RPC::start_notifier(notification, senders.clone(), acceptor.sender());
let mut threads = HashMap::new();
let (garbage_sender, garbage_receiver) = crossbeam_channel::unbounded();
while let Some((stream, addr)) = acceptor.receiver().recv().unwrap() {
// explicitely scope the shadowed variables for the new thread
let query = Arc::clone(&query);
let stats = Arc::clone(&stats);
let garbage_sender = garbage_sender.clone();
let (sender, receiver) = mpsc::sync_channel(10);
senders.lock().unwrap().push(sender.clone());
let spawned = spawn_thread("peer", move || {
info!("[{}] connected peer", addr);
let conn = Connection::new(query, stream, addr, stats, relayfee, sender);
conn.run(receiver);
info!("[{}] disconnected peer", addr);
let _ = garbage_sender.send(std::thread::current().id());
});
trace!("[{}] spawned {:?}", addr, spawned.thread().id());
threads.insert(spawned.thread().id(), spawned);
while let Ok(id) = garbage_receiver.try_recv() {
if let Some(thread) = threads.remove(&id) {
trace!("[{}] joining {:?}", addr, id);
if let Err(error) = thread.join() {
error!("failed to join {:?}: {:?}", id, error);
}
}
}
}
trace!("closing {} RPC connections", senders.lock().unwrap().len());
for sender in senders.lock().unwrap().iter() {
let _ = sender.send(Message::Done);
}
for (id, thread) in threads {
trace!("joining {:?}", id);
if let Err(error) = thread.join() {
error!("failed to join {:?}: {:?}", id, error);
}
}
trace!("RPC connections are closed");
})),
}
}
pub fn notify(&self) {
self.notification.send(Notification::Periodic).unwrap();
}
}
impl Drop for RPC {
fn drop(&mut self) {
trace!("stop accepting new RPCs");
self.notification.send(Notification::Exit).unwrap();
if let Some(handle) = self.server.take() {
handle.join().unwrap();
}
trace!("RPC server is stopped");
}
}

201
src/server.rs Normal file
View File

@ -0,0 +1,201 @@
use anyhow::{Context, Result};
use bitcoin::BlockHash;
use bitcoincore_rpc::RpcApi;
use crossbeam_channel::{bounded, select, unbounded, Receiver, Sender};
use rayon::prelude::*;
use serde_json::{de::from_str, Value};
use std::{
collections::hash_map::HashMap,
convert::TryFrom,
io::{BufRead, BufReader, Write},
net::{Shutdown, TcpListener, TcpStream},
thread,
};
use crate::{
config::Config,
daemon::rpc_connect,
electrum::{Client, Rpc},
signals,
};
fn spawn<F>(name: &'static str, f: F) -> thread::JoinHandle<()>
where
F: 'static + Send + FnOnce() -> Result<()>,
{
thread::Builder::new()
.name(name.to_owned())
.spawn(move || {
if let Err(e) = f() {
warn!("{} thread failed: {}", name, e);
}
})
.expect("failed to spawn a thread")
}
struct Peer {
client: Client,
stream: TcpStream,
}
impl Peer {
fn new(stream: TcpStream) -> Self {
Self {
client: Client::default(),
stream,
}
}
}
fn tip_receiver(config: &Config) -> Result<Receiver<BlockHash>> {
let (tip_tx, tip_rx) = bounded(0);
let rpc = rpc_connect(&config)?;
let duration = u64::try_from(config.wait_duration.as_millis()).unwrap();
use crossbeam_channel::TrySendError;
spawn("tip_loop", move || loop {
let tip = rpc.get_best_block_hash()?;
match tip_tx.try_send(tip) {
Ok(_) | Err(TrySendError::Full(_)) => (),
Err(TrySendError::Disconnected(_)) => bail!("tip receiver disconnected"),
}
rpc.wait_for_new_block(duration)?;
});
Ok(tip_rx)
}
pub fn run(config: &Config, mut rpc: Rpc) -> Result<()> {
let listener = TcpListener::bind(config.electrum_rpc_addr)?;
let tip_rx = tip_receiver(&config)?;
info!("serving Electrum RPC on {}", listener.local_addr()?);
let (server_tx, server_rx) = unbounded();
spawn("accept_loop", || accept_loop(listener, server_tx)); // detach accepting thread
let signal_rx = signals::register();
let mut peers = HashMap::<usize, Peer>::new();
loop {
select! {
recv(signal_rx) -> sig => {
match sig.context("signal channel disconnected")? {
signals::Signal::Exit => break,
signals::Signal::Trigger => (),
}
},
recv(tip_rx) -> tip => match tip {
Ok(_) => (), // sync and update
Err(_) => break, // daemon is shutting down
},
recv(server_rx) -> event => {
let event = event.context("server disconnected")?;
let buffered_events = server_rx.iter().take(server_rx.len());
for event in std::iter::once(event).chain(buffered_events) {
handle(&rpc, &mut peers, event);
}
},
};
rpc.sync().context("rpc sync failed")?;
peers
.par_iter_mut()
.map(|(peer_id, peer)| {
let notifications = rpc.update_client(&mut peer.client)?;
send(*peer_id, peer, &notifications)
})
.collect::<Result<_>>()?;
}
info!("stopping Electrum RPC server");
Ok(())
}
struct Event {
peer_id: usize,
msg: Message,
}
enum Message {
New(TcpStream),
Request(String),
Done,
}
fn handle(rpc: &Rpc, peers: &mut HashMap<usize, Peer>, event: Event) {
match event.msg {
Message::New(stream) => {
debug!("{}: connected", event.peer_id);
peers.insert(event.peer_id, Peer::new(stream));
}
Message::Request(line) => {
let result = match peers.get_mut(&event.peer_id) {
Some(peer) => handle_request(rpc, event.peer_id, peer, line),
None => {
warn!("{}: unknown peer for {}", event.peer_id, line);
Ok(())
}
};
if let Err(e) = result {
error!("{}: {}", event.peer_id, e);
let _ = peers
.remove(&event.peer_id)
.map(|peer| peer.stream.shutdown(Shutdown::Both));
}
}
Message::Done => {
debug!("{}: disconnected", event.peer_id);
peers.remove(&event.peer_id);
}
}
}
fn handle_request(rpc: &Rpc, peer_id: usize, peer: &mut Peer, line: String) -> Result<()> {
let request: Value = from_str(&line).with_context(|| format!("invalid request: {}", line))?;
let response: Value = rpc
.handle_request(&mut peer.client, request)
.with_context(|| format!("failed to handle request: {}", line))?;
send(peer_id, peer, &[response])
}
fn send(peer_id: usize, peer: &mut Peer, values: &[Value]) -> Result<()> {
for value in values {
let mut response = value.to_string();
debug!("{}: send {}", peer_id, response);
response += "\n";
peer.stream
.write_all(response.as_bytes())
.with_context(|| format!("failed to send response: {}", response))?;
}
Ok(())
}
fn accept_loop(listener: TcpListener, server_tx: Sender<Event>) -> Result<()> {
for (peer_id, conn) in listener.incoming().enumerate() {
let stream = conn.context("failed to accept")?;
let tx = server_tx.clone();
spawn("recv_loop", move || {
let result = recv_loop(peer_id, &stream, tx);
let _ = stream.shutdown(Shutdown::Both);
result
});
}
Ok(())
}
fn recv_loop(peer_id: usize, stream: &TcpStream, server_tx: Sender<Event>) -> Result<()> {
server_tx.send(Event {
peer_id,
msg: Message::New(stream.try_clone()?),
})?;
let reader = BufReader::new(stream);
for line in reader.lines() {
let line = line.with_context(|| format!("{}: recv failed", peer_id))?;
debug!("{}: recv {}", peer_id, line);
let msg = Message::Request(line);
server_tx.send(Event { peer_id, msg })?;
}
server_tx.send(Event {
peer_id,
msg: Message::Done,
})?;
Ok(())
}

View File

@ -1,52 +0,0 @@
use crossbeam_channel as channel;
use crossbeam_channel::RecvTimeoutError;
use std::thread;
use std::time::Duration;
use crate::errors::*;
#[derive(Clone)] // so multiple threads could wait on signals
pub struct Waiter {
receiver: channel::Receiver<i32>,
}
fn notify(signals: &[i32]) -> channel::Receiver<i32> {
let (s, r) = channel::bounded(1);
let signals =
signal_hook::iterator::Signals::new(signals).expect("failed to register signal hook");
thread::spawn(move || {
for signal in signals.forever() {
s.send(signal)
.unwrap_or_else(|_| panic!("failed to send signal {}", signal));
}
});
r
}
impl Waiter {
pub fn start() -> Waiter {
Waiter {
receiver: notify(&[
signal_hook::SIGINT,
signal_hook::SIGTERM,
signal_hook::SIGUSR1, // allow external triggering (e.g. via bitcoind `blocknotify`)
]),
}
}
pub fn wait(&self, duration: Duration) -> Result<()> {
match self.receiver.recv_timeout(duration) {
Ok(sig) => {
trace!("notified via SIG{}", sig);
if sig != signal_hook::SIGUSR1 {
bail!(ErrorKind::Interrupt(sig))
};
Ok(())
}
Err(RecvTimeoutError::Timeout) => Ok(()),
Err(RecvTimeoutError::Disconnected) => bail!("signal hook channel disconnected"),
}
}
pub fn poll(&self) -> Result<()> {
self.wait(Duration::from_secs(0))
}
}

30
src/signals.rs Normal file
View File

@ -0,0 +1,30 @@
use crossbeam_channel::{unbounded, Receiver};
use signal_hook::consts::signal::*;
use signal_hook::iterator::Signals;
use std::thread;
pub(crate) enum Signal {
Exit,
Trigger,
}
pub(crate) fn register() -> Receiver<Signal> {
let ids = [
SIGINT, SIGTERM,
SIGUSR1, // allow external triggering (e.g. via bitcoind `blocknotify`)
];
let (tx, rx) = unbounded();
let mut signals = Signals::new(&ids).expect("failed to register signal hook");
thread::spawn(move || {
for id in &mut signals {
info!("notified via SIG{}", id);
let signal = match id {
SIGUSR1 => Signal::Trigger,
_ => Signal::Exit,
};
tx.send(signal).expect("failed to send signal");
}
});
rx
}

365
src/status.rs Normal file
View File

@ -0,0 +1,365 @@
use anyhow::Result;
use bitcoin::{
hashes::{sha256, Hash, HashEngine},
Amount, Block, BlockHash, OutPoint, Transaction, Txid,
};
use rayon::prelude::*;
use serde_json::{json, Value};
use std::collections::{BTreeMap, HashMap, HashSet};
use std::convert::TryFrom;
use crate::{
cache::Cache,
chain::Chain,
daemon::Daemon,
index::Index,
mempool::Mempool,
merkle::Proof,
types::{ScriptHash, StatusHash},
};
#[derive(Default)]
struct Entry {
outputs: Vec<u32>,
spent: Vec<OutPoint>,
}
struct TxEntry {
txid: Txid,
outputs: Vec<u32>,
spent: Vec<OutPoint>,
}
impl TxEntry {
fn new(txid: Txid, entry: Entry) -> Self {
Self {
txid,
outputs: entry.outputs,
spent: entry.spent,
}
}
}
pub(crate) struct ConfirmedEntry {
txid: Txid,
height: usize,
}
impl ConfirmedEntry {
pub fn hash(&self, engine: &mut sha256::HashEngine) {
let s = format!("{}:{}:", self.txid, self.height);
engine.input(s.as_bytes());
}
pub fn value(&self) -> Value {
json!({"tx_hash": self.txid, "height": self.height})
}
}
pub(crate) struct MempoolEntry {
txid: Txid,
has_unconfirmed_inputs: bool,
fee: Amount,
}
impl MempoolEntry {
fn height(&self) -> isize {
match self.has_unconfirmed_inputs {
true => -1,
false => 0,
}
}
pub fn hash(&self, engine: &mut sha256::HashEngine) {
let s = format!("{}:{}:", self.txid, self.height());
engine.input(s.as_bytes());
}
pub fn value(&self) -> Value {
json!({"tx_hash": self.txid, "height": self.height(), "fee": self.fee.as_sat()})
}
}
/// ScriptHash subscription status
pub struct Status {
scripthash: ScriptHash,
tip: BlockHash,
statushash: Option<StatusHash>,
confirmed: HashMap<BlockHash, Vec<TxEntry>>,
mempool: Vec<TxEntry>,
}
fn make_outpoints<'a>(txid: &'a Txid, outputs: &'a [u32]) -> impl Iterator<Item = OutPoint> + 'a {
outputs.iter().map(move |vout| OutPoint::new(*txid, *vout))
}
impl Status {
pub fn new(scripthash: ScriptHash) -> Self {
Self {
scripthash,
tip: BlockHash::default(),
statushash: None,
confirmed: HashMap::new(),
mempool: Vec::new(),
}
}
fn filter_outputs(&self, tx: &Transaction) -> Vec<u32> {
let outputs = tx.output.iter().zip(0u32..);
outputs
.filter_map(move |(txo, vout)| {
if ScriptHash::new(&txo.script_pubkey) == self.scripthash {
Some(vout)
} else {
None
}
})
.collect()
}
fn filter_inputs(&self, tx: &Transaction, outpoints: &HashSet<OutPoint>) -> Vec<OutPoint> {
tx.input
.iter()
.filter_map(|txi| {
if outpoints.contains(&txi.previous_output) {
Some(txi.previous_output)
} else {
None
}
})
.collect()
}
fn funding_confirmed(&self, chain: &Chain) -> HashSet<OutPoint> {
self.confirmed
.iter()
.filter_map(|(blockhash, entries)| chain.get_block_height(blockhash).map(|_| entries))
.flat_map(|entries| {
entries
.iter()
.flat_map(|entry| make_outpoints(&entry.txid, &entry.outputs))
})
.collect()
}
pub(crate) fn get_unspent(&self, chain: &Chain) -> HashSet<OutPoint> {
let mut unspent: HashSet<OutPoint> = self.funding_confirmed(chain);
unspent.extend(
self.mempool
.iter()
.flat_map(|entry| make_outpoints(&entry.txid, &entry.outputs)),
);
let spent_outpoints = self
.confirmed
.iter()
.filter_map(|(blockhash, entries)| {
chain.get_block_height(blockhash).map(|_height| entries)
})
.flatten()
.chain(self.mempool.iter())
.flat_map(|entry| entry.spent.iter());
for outpoint in spent_outpoints {
assert!(unspent.remove(outpoint), "missing outpoint {}", outpoint);
}
unspent
}
pub(crate) fn get_confirmed(&self, chain: &Chain) -> Vec<ConfirmedEntry> {
self.confirmed
.iter()
.filter_map(move |(blockhash, entries)| {
chain
.get_block_height(blockhash)
.map(|height| (height, &entries[..]))
})
.collect::<BTreeMap<usize, &[TxEntry]>>()
.into_iter()
.flat_map(|(height, entries)| {
entries.iter().map(move |e| ConfirmedEntry {
txid: e.txid,
height,
})
})
.collect()
}
pub(crate) fn get_mempool(&self, mempool: &Mempool) -> Vec<MempoolEntry> {
let mut entries = self
.mempool
.iter()
.filter_map(|e| mempool.get(&e.txid))
.collect::<Vec<_>>();
entries.sort_by_key(|e| (e.has_unconfirmed_inputs, e.txid));
entries
.into_iter()
.map(|e| MempoolEntry {
txid: e.txid,
has_unconfirmed_inputs: e.has_unconfirmed_inputs,
fee: e.fee,
})
.collect()
}
fn for_new_blocks<B, F>(&self, blockhashes: B, daemon: &Daemon, func: F) -> Result<()>
where
B: IntoIterator<Item = BlockHash>,
F: FnMut(BlockHash, Block),
{
daemon.for_blocks(
blockhashes
.into_iter()
.filter(|blockhash| !self.confirmed.contains_key(blockhash)),
func,
)
}
fn sync_confirmed(
&self,
index: &Index,
daemon: &Daemon,
cache: &Cache,
outpoints: &mut HashSet<OutPoint>,
) -> Result<HashMap<BlockHash, Vec<TxEntry>>> {
type PosTxid = (u32, Txid);
let mut result = HashMap::<BlockHash, HashMap<PosTxid, Entry>>::new();
let funding_blockhashes = index.filter_by_funding(self.scripthash);
self.for_new_blocks(funding_blockhashes, daemon, |blockhash, block| {
let txids: Vec<Txid> = block.txdata.iter().map(|tx| tx.txid()).collect();
for (pos, (tx, txid)) in block.txdata.into_iter().zip(txids.iter()).enumerate() {
let funding_outputs = self.filter_outputs(&tx);
if funding_outputs.is_empty() {
continue;
}
cache.add_tx(*txid, move || tx);
cache.add_proof(blockhash, *txid, || Proof::create(&txids, pos));
outpoints.extend(make_outpoints(&txid, &funding_outputs));
result
.entry(blockhash)
.or_default()
.entry((u32::try_from(pos).unwrap(), *txid))
.or_default()
.outputs = funding_outputs;
}
})?;
let spending_blockhashes: HashSet<BlockHash> = outpoints
.par_iter()
.flat_map_iter(|outpoint| index.filter_by_spending(*outpoint))
.collect();
self.for_new_blocks(
spending_blockhashes.into_iter(),
daemon,
|blockhash, block| {
let txids: Vec<Txid> = block.txdata.iter().map(|tx| tx.txid()).collect();
for (pos, (tx, txid)) in block.txdata.into_iter().zip(txids.iter()).enumerate() {
let spent_outpoints = self.filter_inputs(&tx, &outpoints);
if spent_outpoints.is_empty() {
continue;
}
cache.add_tx(*txid, move || tx);
cache.add_proof(blockhash, *txid, || Proof::create(&txids, pos));
result
.entry(blockhash)
.or_default()
.entry((u32::try_from(pos).unwrap(), *txid))
.or_default()
.spent = spent_outpoints;
}
},
)?;
Ok(result
.into_iter()
.map(|(blockhash, entries_map)| {
let sorted_entries = entries_map
.into_iter()
.collect::<BTreeMap<PosTxid, Entry>>()
.into_iter()
.map(|((_pos, txid), entry)| TxEntry::new(txid, entry))
.collect::<Vec<TxEntry>>();
(blockhash, sorted_entries)
})
.collect())
}
fn sync_mempool(
&self,
mempool: &Mempool,
cache: &Cache,
outpoints: &mut HashSet<OutPoint>,
) -> Vec<TxEntry> {
let mut result = HashMap::<Txid, Entry>::new();
for entry in mempool.filter_by_funding(&self.scripthash) {
let funding_outputs = self.filter_outputs(&entry.tx);
assert!(!funding_outputs.is_empty());
outpoints.extend(make_outpoints(&entry.txid, &funding_outputs));
result.entry(entry.txid).or_default().outputs = funding_outputs;
cache.add_tx(entry.txid, || entry.tx.clone());
}
for entry in outpoints
.iter()
.flat_map(|outpoint| mempool.filter_by_spending(outpoint))
{
let spent_outpoints = self.filter_inputs(&entry.tx, &outpoints);
assert!(!spent_outpoints.is_empty());
result.entry(entry.txid).or_default().spent = spent_outpoints;
cache.add_tx(entry.txid, || entry.tx.clone());
}
result
.into_iter()
.map(|(txid, entry)| TxEntry::new(txid, entry))
.collect()
}
fn compute_status_hash(&self, chain: &Chain, mempool: &Mempool) -> Option<StatusHash> {
let confirmed = self.get_confirmed(chain);
let mempool = self.get_mempool(mempool);
if confirmed.is_empty() && mempool.is_empty() {
return None;
}
let mut engine = StatusHash::engine();
for entry in confirmed {
entry.hash(&mut engine);
}
for entry in mempool {
entry.hash(&mut engine);
}
Some(StatusHash::from_engine(engine))
}
pub(crate) fn sync(
&mut self,
index: &Index,
mempool: &Mempool,
daemon: &Daemon,
cache: &Cache,
) -> Result<()> {
let mut outpoints: HashSet<OutPoint> = self.funding_confirmed(index.chain());
let new_tip = index.chain().tip();
if self.tip != new_tip {
let update = self.sync_confirmed(index, daemon, cache, &mut outpoints)?;
self.confirmed.extend(update);
self.tip = new_tip;
}
if !self.confirmed.is_empty() {
debug!(
"{} transactions from {} blocks",
self.confirmed.values().map(Vec::len).sum::<usize>(),
self.confirmed.len()
);
}
self.mempool = self.sync_mempool(mempool, cache, &mut outpoints);
if !self.mempool.is_empty() {
debug!("{} mempool transactions", self.mempool.len());
}
self.statushash = self.compute_status_hash(index.chain(), mempool);
Ok(())
}
pub fn statushash(&self) -> Option<StatusHash> {
self.statushash
}
}

View File

@ -1,193 +0,0 @@
use std::path::{Path, PathBuf};
use crate::util::Bytes;
#[derive(Clone)]
pub struct Row {
pub key: Bytes,
pub value: Bytes,
}
impl Row {
pub fn into_pair(self) -> (Bytes, Bytes) {
(self.key, self.value)
}
}
pub trait ReadStore: Sync {
fn get(&self, key: &[u8]) -> Option<Bytes>;
fn scan(&self, prefix: &[u8]) -> Vec<Row>;
}
pub trait WriteStore: Sync {
fn write<I: IntoIterator<Item = Row>>(&self, rows: I);
fn flush(&self);
}
#[derive(Clone)]
struct Options {
path: PathBuf,
bulk_import: bool,
low_memory: bool,
}
pub struct DBStore {
db: rocksdb::DB,
opts: Options,
}
impl DBStore {
fn open_opts(opts: Options) -> Self {
debug!("opening DB at {:?}", opts.path);
let mut db_opts = rocksdb::Options::default();
db_opts.create_if_missing(true);
// db_opts.set_keep_log_file_num(10);
db_opts.set_max_open_files(if opts.bulk_import { 16 } else { 256 });
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy);
db_opts.set_target_file_size_base(256 << 20);
db_opts.set_write_buffer_size(256 << 20);
db_opts.set_disable_auto_compactions(opts.bulk_import); // for initial bulk load
db_opts.set_advise_random_on_open(!opts.bulk_import); // bulk load uses sequential I/O
if !opts.low_memory {
db_opts.set_compaction_readahead_size(1 << 20);
}
let mut block_opts = rocksdb::BlockBasedOptions::default();
block_opts.set_block_size(if opts.low_memory { 256 << 10 } else { 1 << 20 });
DBStore {
db: rocksdb::DB::open(&db_opts, &opts.path).unwrap(),
opts,
}
}
/// Opens a new RocksDB at the specified location.
pub fn open(path: &Path, low_memory: bool) -> Self {
DBStore::open_opts(Options {
path: path.to_path_buf(),
bulk_import: true,
low_memory,
})
}
pub fn enable_compaction(self) -> Self {
let mut opts = self.opts.clone();
if opts.bulk_import {
opts.bulk_import = false;
info!("enabling auto-compactions");
let opts = [("disable_auto_compactions", "false")];
self.db.set_options(&opts).unwrap();
}
self
}
pub fn compact(self) -> Self {
info!("starting full compaction");
self.db.compact_range(None::<&[u8]>, None::<&[u8]>); // would take a while
info!("finished full compaction");
self
}
pub fn iter_scan(&self, prefix: &[u8]) -> ScanIterator {
ScanIterator {
prefix: prefix.to_vec(),
iter: self.db.prefix_iterator(prefix),
done: false,
}
}
}
pub struct ScanIterator<'a> {
prefix: Vec<u8>,
iter: rocksdb::DBIterator<'a>,
done: bool,
}
impl<'a> Iterator for ScanIterator<'a> {
type Item = Row;
fn next(&mut self) -> Option<Row> {
if self.done {
return None;
}
let (key, value) = self.iter.next()?;
if !key.starts_with(&self.prefix) {
self.done = true;
return None;
}
Some(Row {
key: key.to_vec(),
value: value.to_vec(),
})
}
}
impl ReadStore for DBStore {
fn get(&self, key: &[u8]) -> Option<Bytes> {
self.db.get(key).unwrap().map(|v| v.to_vec())
}
// TODO: use generators
fn scan(&self, prefix: &[u8]) -> Vec<Row> {
let mut rows = vec![];
for (key, value) in self.db.iterator(rocksdb::IteratorMode::From(
prefix,
rocksdb::Direction::Forward,
)) {
if !key.starts_with(prefix) {
break;
}
rows.push(Row {
key: key.to_vec(),
value: value.to_vec(),
});
}
rows
}
}
impl WriteStore for DBStore {
fn write<I: IntoIterator<Item = Row>>(&self, rows: I) {
let mut batch = rocksdb::WriteBatch::default();
for row in rows {
batch.put(row.key.as_slice(), row.value.as_slice()).unwrap();
}
let mut opts = rocksdb::WriteOptions::new();
opts.set_sync(!self.opts.bulk_import);
opts.disable_wal(self.opts.bulk_import);
self.db.write_opt(batch, &opts).unwrap();
}
fn flush(&self) {
let mut opts = rocksdb::WriteOptions::new();
opts.set_sync(true);
opts.disable_wal(false);
let empty = rocksdb::WriteBatch::default();
self.db.write_opt(empty, &opts).unwrap();
}
}
impl Drop for DBStore {
fn drop(&mut self) {
trace!("closing DB at {:?}", self.opts.path);
}
}
fn full_compaction_marker() -> Row {
Row {
key: b"F".to_vec(),
value: b"".to_vec(),
}
}
pub fn full_compaction(store: DBStore) -> DBStore {
store.flush();
let store = store.compact().enable_compaction();
store.write(vec![full_compaction_marker()]);
store
}
pub fn is_fully_compacted(store: &dyn ReadStore) -> bool {
let marker = store.get(&full_compaction_marker().key);
marker.is_some()
}

File diff suppressed because one or more lines are too long

102
src/tracker.rs Normal file
View File

@ -0,0 +1,102 @@
use anyhow::{Context, Result};
use bitcoin::{BlockHash, Txid};
use serde_json::Value;
use std::convert::TryInto;
use std::path::Path;
use crate::{
cache::Cache,
chain::Chain,
config::Config,
daemon::Daemon,
db::DBStore,
index::Index,
mempool::{Histogram, Mempool},
metrics::Metrics,
status::Status,
};
/// Electrum protocol subscriptions' tracker
pub struct Tracker {
index: Index,
mempool: Mempool,
metrics: Metrics,
index_batch_size: usize,
}
impl Tracker {
pub fn new(config: &Config) -> Result<Self> {
let metrics = Metrics::new(config.monitoring_addr)?;
let store = DBStore::open(Path::new(&config.db_path))?;
let chain = Chain::new(config.network);
Ok(Self {
index: Index::load(store, chain, &metrics).context("failed to open index")?,
mempool: Mempool::new(),
metrics,
index_batch_size: config.index_batch_size,
})
}
pub(crate) fn chain(&self) -> &Chain {
self.index.chain()
}
pub(crate) fn fees_histogram(&self) -> &Histogram {
&self.mempool.fees_histogram()
}
pub(crate) fn metrics(&self) -> &Metrics {
&self.metrics
}
pub fn get_history(&self, status: &Status) -> impl Iterator<Item = Value> {
let confirmed = status
.get_confirmed(&self.index.chain())
.into_iter()
.map(|entry| entry.value());
let mempool = status
.get_mempool(&self.mempool)
.into_iter()
.map(|entry| entry.value());
confirmed.chain(mempool)
}
pub fn sync(&mut self, daemon: &Daemon) -> Result<()> {
self.index.sync(daemon, self.index_batch_size)?;
self.mempool.sync(daemon)?;
// TODO: double check tip - and retry on diff
Ok(())
}
pub fn update_status(
&self,
status: &mut Status,
daemon: &Daemon,
cache: &Cache,
) -> Result<bool> {
let prev_statushash = status.statushash();
status.sync(&self.index, &self.mempool, daemon, cache)?;
Ok(prev_statushash != status.statushash())
}
pub fn get_balance(&self, status: &Status, cache: &Cache) -> bitcoin::Amount {
let unspent = status.get_unspent(&self.index.chain());
let mut balance = bitcoin::Amount::ZERO;
for outpoint in &unspent {
let value = cache
.get_tx(&outpoint.txid, |tx| {
let vout: usize = outpoint.vout.try_into().unwrap();
bitcoin::Amount::from_sat(tx.output[vout].value)
})
.expect("missing tx");
balance += value;
}
balance
}
pub fn get_blockhash_by_txid(&self, txid: Txid) -> Option<BlockHash> {
// Note: there are two blocks with coinbase transactions having same txid (see BIP-30)
self.index.filter_by_txid(txid).next()
}
}

303
src/types.rs Normal file
View File

@ -0,0 +1,303 @@
use anyhow::Result;
use std::convert::TryInto;
use bitcoin::{
consensus::encode::{deserialize, serialize, Decodable, Encodable},
hashes::{borrow_slice_impl, hash_newtype, hex_fmt_impl, index_impl, serde_impl, sha256, Hash},
BlockHeader, OutPoint, Script, Txid,
};
use crate::db;
macro_rules! impl_consensus_encoding {
($thing:ident, $($field:ident),+) => (
impl Encodable for $thing {
#[inline]
fn consensus_encode<S: ::std::io::Write>(
&self,
mut s: S,
) -> Result<usize, std::io::Error> {
let mut len = 0;
$(len += self.$field.consensus_encode(&mut s)?;)+
Ok(len)
}
}
impl Decodable for $thing {
#[inline]
fn consensus_decode<D: ::std::io::Read>(
mut d: D,
) -> Result<$thing, bitcoin::consensus::encode::Error> {
Ok($thing {
$($field: Decodable::consensus_decode(&mut d)?),+
})
}
}
);
}
hash_newtype!(
ScriptHash,
sha256::Hash,
32,
doc = "https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-basics.html#script-hashes",
true
);
impl ScriptHash {
pub fn new(script: &Script) -> Self {
ScriptHash::hash(&script[..])
}
fn prefix(&self) -> ScriptHashPrefix {
let mut prefix = [0u8; PREFIX_LEN];
prefix.copy_from_slice(&self.0[..PREFIX_LEN]);
ScriptHashPrefix { prefix }
}
}
const PREFIX_LEN: usize = 8;
#[derive(Debug, Serialize, Deserialize, PartialEq)]
struct ScriptHashPrefix {
prefix: [u8; PREFIX_LEN],
}
impl_consensus_encoding!(ScriptHashPrefix, prefix);
type Height = u32;
#[derive(Debug, Serialize, Deserialize, PartialEq)]
pub(crate) struct ScriptHashRow {
prefix: ScriptHashPrefix,
height: Height, // transaction confirmed height
}
impl_consensus_encoding!(ScriptHashRow, prefix, height);
impl ScriptHashRow {
pub(crate) fn scan_prefix(scripthash: ScriptHash) -> Box<[u8]> {
scripthash.0[..PREFIX_LEN].to_vec().into_boxed_slice()
}
pub(crate) fn new(scripthash: ScriptHash, height: usize) -> Self {
Self {
prefix: scripthash.prefix(),
height: height.try_into().expect("invalid height"),
}
}
pub(crate) fn to_db_row(&self) -> db::Row {
serialize(self).into_boxed_slice()
}
pub(crate) fn from_db_row(row: &[u8]) -> Self {
deserialize(&row).expect("bad ScriptHashRow")
}
pub(crate) fn height(&self) -> usize {
self.height.try_into().expect("invalid height")
}
}
// ***************************************************************************
hash_newtype!(
StatusHash,
sha256::Hash,
32,
doc = "https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-basics.html#status",
false
);
// ***************************************************************************
#[derive(Debug, Serialize, Deserialize, PartialEq)]
struct SpendingPrefix {
prefix: [u8; PREFIX_LEN],
}
impl_consensus_encoding!(SpendingPrefix, prefix);
fn spending_prefix(prev: OutPoint) -> SpendingPrefix {
let txid_prefix = &prev.txid[..PREFIX_LEN];
let value = u64::from_be_bytes(txid_prefix.try_into().unwrap());
let value = value.wrapping_add(prev.vout.into());
SpendingPrefix {
prefix: value.to_be_bytes(),
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
pub(crate) struct SpendingPrefixRow {
prefix: SpendingPrefix,
height: Height, // transaction confirmed height
}
impl_consensus_encoding!(SpendingPrefixRow, prefix, height);
impl SpendingPrefixRow {
pub(crate) fn scan_prefix(outpoint: OutPoint) -> Box<[u8]> {
Box::new(spending_prefix(outpoint).prefix)
}
pub(crate) fn new(outpoint: OutPoint, height: usize) -> Self {
Self {
prefix: spending_prefix(outpoint),
height: height.try_into().expect("invalid height"),
}
}
pub(crate) fn to_db_row(&self) -> db::Row {
serialize(self).into_boxed_slice()
}
pub(crate) fn from_db_row(row: &[u8]) -> Self {
deserialize(&row).expect("bad SpendingPrefixRow")
}
pub(crate) fn height(&self) -> usize {
self.height.try_into().expect("invalid height")
}
}
// ***************************************************************************
#[derive(Debug, Serialize, Deserialize, PartialEq)]
struct TxidPrefix {
prefix: [u8; PREFIX_LEN],
}
impl_consensus_encoding!(TxidPrefix, prefix);
fn txid_prefix(txid: &Txid) -> TxidPrefix {
let mut prefix = [0u8; PREFIX_LEN];
prefix.copy_from_slice(&txid[..PREFIX_LEN]);
TxidPrefix { prefix }
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
pub(crate) struct TxidRow {
prefix: TxidPrefix,
height: Height, // transaction confirmed height
}
impl_consensus_encoding!(TxidRow, prefix, height);
impl TxidRow {
pub(crate) fn scan_prefix(txid: Txid) -> Box<[u8]> {
Box::new(txid_prefix(&txid).prefix)
}
pub(crate) fn new(txid: Txid, height: usize) -> Self {
Self {
prefix: txid_prefix(&txid),
height: height.try_into().expect("invalid height"),
}
}
pub(crate) fn to_db_row(&self) -> db::Row {
serialize(self).into_boxed_slice()
}
pub(crate) fn from_db_row(row: &[u8]) -> Self {
deserialize(&row).expect("bad TxidRow")
}
pub(crate) fn height(&self) -> usize {
self.height.try_into().expect("invalid height")
}
}
// ***************************************************************************
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct HeaderRow {
pub(crate) header: BlockHeader,
}
impl_consensus_encoding!(HeaderRow, header);
impl HeaderRow {
pub(crate) fn new(header: BlockHeader) -> Self {
Self { header }
}
pub(crate) fn to_db_row(&self) -> db::Row {
serialize(self).into_boxed_slice()
}
pub(crate) fn from_db_row(row: &[u8]) -> Self {
deserialize(&row).expect("bad HeaderRow")
}
}
#[cfg(test)]
mod tests {
use crate::types::{spending_prefix, ScriptHash, ScriptHashRow, SpendingPrefix};
use bitcoin::{hashes::hex::ToHex, Address, OutPoint, Txid};
use serde_json::{from_str, json};
use std::str::FromStr;
#[test]
fn test_scripthash_serde() {
let hex = "\"4b3d912c1523ece4615e91bf0d27381ca72169dbf6b1c2ffcc9f92381d4984a3\"";
let scripthash: ScriptHash = from_str(&hex).unwrap();
assert_eq!(format!("\"{}\"", scripthash), hex);
assert_eq!(json!(scripthash).to_string(), hex);
}
#[test]
fn test_scripthash_row() {
let hex = "\"4b3d912c1523ece4615e91bf0d27381ca72169dbf6b1c2ffcc9f92381d4984a3\"";
let scripthash: ScriptHash = from_str(&hex).unwrap();
let row1 = ScriptHashRow::new(scripthash, 123456);
let db_row = row1.to_db_row();
assert_eq!(db_row[..].to_hex(), "a384491d38929fcc40e20100");
let row2 = ScriptHashRow::from_db_row(&db_row);
assert_eq!(row1, row2);
}
#[test]
fn test_scripthash() {
let addr = Address::from_str("1KVNjD3AAnQ3gTMqoTKcWFeqSFujq9gTBT").unwrap();
let scripthash = ScriptHash::new(&addr.script_pubkey());
assert_eq!(
scripthash.to_hex(),
"00dfb264221d07712a144bda338e89237d1abd2db4086057573895ea2659766a"
);
}
#[test]
fn test_spending_prefix() {
let hex = "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f";
let txid = Txid::from_str(hex).unwrap();
assert_eq!(
spending_prefix(OutPoint { txid, vout: 0 }),
SpendingPrefix {
prefix: [31, 30, 29, 28, 27, 26, 25, 24]
}
);
assert_eq!(
spending_prefix(OutPoint { txid, vout: 10 }),
SpendingPrefix {
prefix: [31, 30, 29, 28, 27, 26, 25, 34]
}
);
assert_eq!(
spending_prefix(OutPoint { txid, vout: 255 }),
SpendingPrefix {
prefix: [31, 30, 29, 28, 27, 26, 26, 23]
}
);
assert_eq!(
spending_prefix(OutPoint { txid, vout: 256 }),
SpendingPrefix {
prefix: [31, 30, 29, 28, 27, 26, 26, 24]
}
);
}
}

View File

@ -1,408 +0,0 @@
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::hash_types::BlockHash;
use std::collections::HashMap;
use std::convert::TryInto;
use std::fmt;
use std::iter::FromIterator;
use std::slice;
use std::sync::mpsc::{channel, sync_channel, Receiver, Sender, SyncSender};
use std::thread;
pub type Bytes = Vec<u8>;
pub type HeaderMap = HashMap<BlockHash, BlockHeader>;
// TODO: consolidate serialization/deserialize code for bincode/bitcoin.
const HASH_LEN: usize = 32;
pub const HASH_PREFIX_LEN: usize = 8;
pub type FullHash = [u8; HASH_LEN];
pub type HashPrefix = [u8; HASH_PREFIX_LEN];
pub fn hash_prefix(hash: &[u8]) -> HashPrefix {
hash[..HASH_PREFIX_LEN]
.try_into()
.expect("failed to convert into HashPrefix")
}
pub fn full_hash(hash: &[u8]) -> FullHash {
hash.try_into().expect("failed to convert into FullHash")
}
#[derive(Eq, PartialEq, Clone)]
pub struct HeaderEntry {
height: usize,
hash: BlockHash,
header: BlockHeader,
}
impl HeaderEntry {
pub fn hash(&self) -> &BlockHash {
&self.hash
}
pub fn header(&self) -> &BlockHeader {
&self.header
}
pub fn height(&self) -> usize {
self.height
}
}
impl fmt::Debug for HeaderEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let spec = time::Timespec::new(i64::from(self.header().time), 0);
let last_block_time = time::at_utc(spec).rfc3339().to_string();
write!(
f,
"best={} height={} @ {}",
self.hash(),
self.height(),
last_block_time,
)
}
}
struct HashedHeader {
blockhash: BlockHash,
header: BlockHeader,
}
fn hash_headers(headers: Vec<BlockHeader>) -> Vec<HashedHeader> {
// header[i] -> header[i-1] (i.e. header.last() is the tip)
let hashed_headers =
Vec::<HashedHeader>::from_iter(headers.into_iter().map(|header| HashedHeader {
blockhash: header.block_hash(),
header,
}));
for i in 1..hashed_headers.len() {
assert_eq!(
hashed_headers[i].header.prev_blockhash,
hashed_headers[i - 1].blockhash
);
}
hashed_headers
}
pub struct HeaderList {
headers: Vec<HeaderEntry>,
heights: HashMap<BlockHash, usize>,
}
impl HeaderList {
pub fn empty() -> HeaderList {
HeaderList {
headers: vec![],
heights: HashMap::new(),
}
}
pub fn order(&self, new_headers: Vec<BlockHeader>) -> Vec<HeaderEntry> {
// header[i] -> header[i-1] (i.e. header.last() is the tip)
let hashed_headers = hash_headers(new_headers);
let prev_blockhash = match hashed_headers.first() {
Some(h) => h.header.prev_blockhash,
None => return vec![], // hashed_headers is empty
};
let null_hash = BlockHash::default();
let new_height: usize = if prev_blockhash == null_hash {
0
} else {
self.header_by_blockhash(&prev_blockhash)
.unwrap_or_else(|| panic!("{} is not part of the blockchain", prev_blockhash))
.height()
+ 1
};
(new_height..)
.zip(hashed_headers.into_iter())
.map(|(height, hashed_header)| HeaderEntry {
height,
hash: hashed_header.blockhash,
header: hashed_header.header,
})
.collect()
}
pub fn apply(&mut self, new_headers: Vec<HeaderEntry>, tip: BlockHash) {
if tip == BlockHash::default() {
assert!(new_headers.is_empty());
self.heights.clear();
self.headers.clear();
return;
}
// new_headers[i] -> new_headers[i - 1] (i.e. new_headers.last() is the tip)
for i in 1..new_headers.len() {
assert_eq!(new_headers[i - 1].height() + 1, new_headers[i].height());
assert_eq!(
*new_headers[i - 1].hash(),
new_headers[i].header().prev_blockhash
);
}
let new_height = match new_headers.first() {
Some(entry) => {
// Make sure tip is consistent (if there are new headers)
let expected_tip = new_headers.last().unwrap().hash();
assert_eq!(tip, *expected_tip);
// Make sure first header connects correctly to existing chain
let height = entry.height();
let expected_prev_blockhash = if height > 0 {
*self.headers[height - 1].hash()
} else {
BlockHash::default()
};
assert_eq!(entry.header().prev_blockhash, expected_prev_blockhash);
// First new header's height (may override existing headers)
height
}
// No new headers - chain's "tail" may be removed
None => {
let tip_height = *self
.heights
.get(&tip)
.unwrap_or_else(|| panic!("missing tip: {}", tip));
tip_height + 1 // keep the tip, drop the rest
}
};
debug!(
"applying {} new headers from height {}",
new_headers.len(),
new_height
);
self.headers.truncate(new_height); // keep [0..new_height) entries
assert_eq!(new_height, self.headers.len());
for new_header in new_headers {
assert_eq!(new_header.height(), self.headers.len());
assert_eq!(new_header.header().prev_blockhash, self.tip());
self.heights.insert(*new_header.hash(), new_header.height());
self.headers.push(new_header);
}
assert_eq!(tip, self.tip());
assert!(self.heights.contains_key(&tip));
}
pub fn header_by_blockhash(&self, blockhash: &BlockHash) -> Option<&HeaderEntry> {
let height = self.heights.get(blockhash)?;
let header = self.headers.get(*height)?;
if *blockhash == *header.hash() {
Some(header)
} else {
None
}
}
pub fn header_by_height(&self, height: usize) -> Option<&HeaderEntry> {
self.headers.get(height).map(|entry| {
assert_eq!(entry.height(), height);
entry
})
}
pub fn equals(&self, other: &HeaderList) -> bool {
self.headers.last() == other.headers.last()
}
pub fn tip(&self) -> BlockHash {
self.headers.last().map(|h| *h.hash()).unwrap_or_default()
}
pub fn len(&self) -> usize {
self.headers.len()
}
pub fn is_empty(&self) -> bool {
self.headers.is_empty()
}
pub fn iter(&self) -> slice::Iter<HeaderEntry> {
self.headers.iter()
}
}
pub struct SyncChannel<T> {
tx: SyncSender<T>,
rx: Receiver<T>,
}
impl<T> SyncChannel<T> {
pub fn new(size: usize) -> SyncChannel<T> {
let (tx, rx) = sync_channel(size);
SyncChannel { tx, rx }
}
pub fn sender(&self) -> SyncSender<T> {
self.tx.clone()
}
pub fn receiver(&self) -> &Receiver<T> {
&self.rx
}
pub fn into_receiver(self) -> Receiver<T> {
self.rx
}
}
pub struct Channel<T> {
tx: Sender<T>,
rx: Receiver<T>,
}
impl<T> Channel<T> {
pub fn unbounded() -> Self {
let (tx, rx) = channel();
Channel { tx, rx }
}
pub fn sender(&self) -> Sender<T> {
self.tx.clone()
}
pub fn receiver(&self) -> &Receiver<T> {
&self.rx
}
pub fn into_receiver(self) -> Receiver<T> {
self.rx
}
}
pub fn spawn_thread<F, T>(name: &str, f: F) -> thread::JoinHandle<T>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
thread::Builder::new()
.name(name.to_owned())
.spawn(f)
.unwrap()
}
#[cfg(test)]
mod tests {
#[test]
fn test_headers() {
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::hash_types::{BlockHash, TxMerkleNode};
use bitcoin::hashes::Hash;
use super::HeaderList;
// Test an empty header list
let null_hash = BlockHash::default();
let mut header_list = HeaderList::empty();
assert_eq!(header_list.tip(), null_hash);
let ordered = header_list.order(vec![]);
assert_eq!(ordered.len(), 0);
header_list.apply(vec![], null_hash);
let merkle_root = TxMerkleNode::hash(&[255]);
let mut headers = vec![BlockHeader {
version: 1,
prev_blockhash: BlockHash::default(),
merkle_root,
time: 0,
bits: 0,
nonce: 0,
}];
for _height in 1..10 {
let prev_blockhash = headers.last().unwrap().block_hash();
let header = BlockHeader {
version: 1,
prev_blockhash,
merkle_root,
time: 0,
bits: 0,
nonce: 0,
};
headers.push(header);
}
// Test adding some new headers
let ordered = header_list.order(headers[..3].to_vec());
assert_eq!(ordered.len(), 3);
header_list.apply(ordered.clone(), ordered[2].hash);
assert_eq!(header_list.len(), 3);
assert_eq!(header_list.tip(), ordered[2].hash);
for h in 0..3 {
let entry = header_list.header_by_height(h).unwrap();
assert_eq!(entry.header, headers[h]);
assert_eq!(entry.hash, headers[h].block_hash());
assert_eq!(entry.height, h);
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
}
// Test adding some more headers
let ordered = header_list.order(headers[3..6].to_vec());
assert_eq!(ordered.len(), 3);
header_list.apply(ordered.clone(), ordered[2].hash);
assert_eq!(header_list.len(), 6);
assert_eq!(header_list.tip(), ordered[2].hash);
for h in 0..6 {
let entry = header_list.header_by_height(h).unwrap();
assert_eq!(entry.header, headers[h]);
assert_eq!(entry.hash, headers[h].block_hash());
assert_eq!(entry.height, h);
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
}
// Test adding some more headers (with an overlap)
let ordered = header_list.order(headers[5..].to_vec());
assert_eq!(ordered.len(), 5);
header_list.apply(ordered.clone(), ordered[4].hash);
assert_eq!(header_list.len(), 10);
assert_eq!(header_list.tip(), ordered[4].hash);
for h in 0..10 {
let entry = header_list.header_by_height(h).unwrap();
assert_eq!(entry.header, headers[h]);
assert_eq!(entry.hash, headers[h].block_hash());
assert_eq!(entry.height, h);
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
}
// Reorg the chain and test apply() on it
for h in 8..10 {
headers[h].nonce += 1;
headers[h].prev_blockhash = headers[h - 1].block_hash()
}
// Test reorging the chain
let ordered = header_list.order(headers[8..10].to_vec());
assert_eq!(ordered.len(), 2);
header_list.apply(ordered.clone(), ordered[1].hash);
assert_eq!(header_list.len(), 10);
assert_eq!(header_list.tip(), ordered[1].hash);
for h in 0..10 {
let entry = header_list.header_by_height(h).unwrap();
assert_eq!(entry.header, headers[h]);
assert_eq!(entry.hash, headers[h].block_hash());
assert_eq!(entry.height, h);
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
}
// Test "trimming" the chain
header_list.apply(vec![], headers[7].block_hash());
assert_eq!(header_list.len(), 8);
assert_eq!(header_list.tip(), headers[7].block_hash());
for h in 0..8 {
let entry = header_list.header_by_height(h).unwrap();
assert_eq!(entry.header, headers[h]);
assert_eq!(entry.hash, headers[h].block_hash());
assert_eq!(entry.height, h);
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
}
// Test "un-trimming" the chain
let ordered = header_list.order(headers[8..].to_vec());
assert_eq!(ordered.len(), 2);
header_list.apply(ordered.clone(), ordered[1].hash);
assert_eq!(header_list.len(), 10);
assert_eq!(header_list.tip(), ordered[1].hash);
for h in 0..10 {
let entry = header_list.header_by_height(h).unwrap();
assert_eq!(entry.header, headers[h]);
assert_eq!(entry.hash, headers[h].block_hash());
assert_eq!(entry.height, h);
assert_eq!(header_list.header_by_blockhash(&entry.hash), Some(entry));
}
}
}

15
sync.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
set -eux
cd `dirname $0`
cargo fmt --all
cargo build --all --release
NETWORK=$1
shift
CMD="target/release/sync --network $NETWORK --db-dir ./db2 --daemon-dir $HOME/.bitcoin"
export RUST_LOG=${RUST_LOG-info}
$CMD $*
# use SIGINT to quit

102
tests/run.sh Executable file
View File

@ -0,0 +1,102 @@
#!/bin/bash
set -euo pipefail
rm -rf data/
mkdir -p data/{bitcoin,electrum,electrs}
cleanup() {
trap - SIGTERM SIGINT
set +eo pipefail
jobs
for j in `jobs -rp`
do
kill $j
wait $j
done
}
trap cleanup SIGINT SIGTERM EXIT
BTC="bitcoin-cli -regtest -datadir=data/bitcoin"
ELECTRUM="electrum --regtest"
EL="$ELECTRUM --wallet=data/electrum/wallet"
tail_log() {
tail -n +0 -F $1 || true
}
echo "Starting $(bitcoind -version | head -n1)..."
bitcoind -regtest -datadir=data/bitcoin -printtoconsole=0 &
BITCOIND_PID=$!
$BTC -rpcwait getblockcount > /dev/null
echo "Creating Electrum `electrum version --offline` wallet..."
WALLET=`$EL --offline create --seed_type=segwit`
MINING_ADDR=`$EL --offline getunusedaddress`
$BTC generatetoaddress 110 $MINING_ADDR > /dev/null
echo `$BTC getblockchaininfo | jq -r '"Generated \(.blocks) regtest blocks (\(.size_on_disk/1e3) kB)"'` to $MINING_ADDR
TIP=`$BTC getbestblockhash`
export RUST_LOG=electrs=debug
electrs --db-dir=data/electrs --daemon-dir=data/bitcoin --network=regtest 2> data/electrs/regtest-debug.log &
ELECTRS_PID=$!
tail_log data/electrs/regtest-debug.log | grep -m1 "serving Electrum RPC"
$ELECTRUM daemon --server localhost:60401:t -1 -vDEBUG 2> data/electrum/regtest-debug.log &
ELECTRUM_PID=$!
tail_log data/electrum/regtest-debug.log | grep -m1 "connection established"
$EL getinfo | jq .
echo "Loading Electrum wallet..."
test `$EL load_wallet` == "true"
echo "Running integration tests:"
echo " * getbalance"
test "`$EL getbalance | jq -c .`" == '{"confirmed":"550","unmatured":"4950"}'
echo " * getunusedaddress"
NEW_ADDR=`$EL getunusedaddress`
echo " * payto & broadcast"
TXID=$($EL broadcast $($EL payto $NEW_ADDR 123 --fee 0.001))
echo " * get_tx_status"
test "`$EL get_tx_status $TXID | jq -c .`" == '{"confirmations":0}'
echo " * getaddresshistory"
test "`$EL getaddresshistory $NEW_ADDR | jq -c .`" == "[{\"fee\":100000,\"height\":0,\"tx_hash\":\"$TXID\"}]"
echo " * getbalance"
test "`$EL getbalance | jq -c .`" == '{"confirmed":"550","unconfirmed":"-0.001","unmatured":"4950"}'
echo "Generating bitcoin block..."
$BTC generatetoaddress 1 $MINING_ADDR > /dev/null
$BTC getblockcount > /dev/null
echo " * wait for new block"
kill -USR1 $ELECTRS_PID # notify server to index new block
tail_log data/electrum/regtest-debug.log | grep -m1 "verified $TXID" > /dev/null
echo " * get_tx_status"
test "`$EL get_tx_status $TXID | jq -c .`" == '{"confirmations":1}'
echo " * getaddresshistory"
test "`$EL getaddresshistory $NEW_ADDR | jq -c .`" == "[{\"height\":111,\"tx_hash\":\"$TXID\"}]"
echo " * getbalance"
test "`$EL getbalance | jq -c .`" == '{"confirmed":"599.999","unmatured":"4950.001"}'
echo "Electrum `$EL stop`" # disconnect wallet
wait $ELECTRUM_PID
kill -INT $ELECTRS_PID # close server
tail_log data/electrs/regtest-debug.log | grep -m1 "stopping Electrum RPC server"
wait $ELECTRS_PID
$BTC stop # stop bitcoind
wait $BITCOIND_PID
echo "=== PASSED ==="