mirror of
https://github.com/romanz/electrs.git
synced 2025-02-24 06:57:53 +01:00
Merge branch 'vec_to_iter'
This commit is contained in:
commit
0858b627e1
5 changed files with 40 additions and 37 deletions
|
@ -27,7 +27,7 @@ impl App {
|
|||
}))
|
||||
}
|
||||
|
||||
fn write_store(&self) -> &store::WriteStore {
|
||||
fn write_store(&self) -> &impl store::WriteStore {
|
||||
&self.store
|
||||
}
|
||||
// TODO: use index for queries.
|
||||
|
|
|
@ -13,7 +13,7 @@ impl ReadStore for FakeStore {
|
|||
}
|
||||
|
||||
impl WriteStore for FakeStore {
|
||||
fn write(&self, _rows: Vec<Row>) {}
|
||||
fn write<I: IntoIterator<Item = Row>>(&self, _rows: I) {}
|
||||
fn flush(&self) {}
|
||||
}
|
||||
|
||||
|
|
63
src/index.rs
63
src/index.rs
|
@ -170,38 +170,47 @@ pub fn compute_script_hash(data: &[u8]) -> FullHash {
|
|||
hash
|
||||
}
|
||||
|
||||
pub fn index_transaction(txn: &Transaction, height: usize, rows: &mut Vec<Row>) {
|
||||
pub fn index_transaction<'a>(
|
||||
txn: &'a Transaction,
|
||||
height: usize,
|
||||
) -> impl 'a + Iterator<Item = Row> {
|
||||
let null_hash = Sha256dHash::default();
|
||||
let txid: Sha256dHash = txn.txid();
|
||||
for input in &txn.input {
|
||||
|
||||
let inputs = txn.input.iter().filter_map(move |input| {
|
||||
if input.previous_output.txid == null_hash {
|
||||
continue;
|
||||
None
|
||||
} else {
|
||||
Some(TxInRow::new(&txid, &input).to_row())
|
||||
}
|
||||
rows.push(TxInRow::new(&txid, &input).to_row());
|
||||
}
|
||||
for output in &txn.output {
|
||||
rows.push(TxOutRow::new(&txid, &output).to_row());
|
||||
}
|
||||
});
|
||||
let outputs = txn
|
||||
.output
|
||||
.iter()
|
||||
.map(move |output| TxOutRow::new(&txid, &output).to_row());
|
||||
|
||||
// Persist transaction ID and confirmed height
|
||||
rows.push(TxRow::new(&txid, height as u32).to_row());
|
||||
inputs
|
||||
.chain(outputs)
|
||||
.chain(std::iter::once(TxRow::new(&txid, height as u32).to_row()))
|
||||
}
|
||||
|
||||
pub fn index_block(block: &Block, height: usize) -> Vec<Row> {
|
||||
let mut rows = vec![];
|
||||
for txn in &block.txdata {
|
||||
index_transaction(&txn, height, &mut rows);
|
||||
}
|
||||
pub fn index_block<'a>(block: &'a Block, height: usize) -> impl 'a + Iterator<Item = Row> {
|
||||
let blockhash = block.bitcoin_hash();
|
||||
// Persist block hash and header
|
||||
rows.push(Row {
|
||||
let row = Row {
|
||||
key: bincode::serialize(&BlockKey {
|
||||
code: b'B',
|
||||
hash: full_hash(&blockhash[..]),
|
||||
})
|
||||
.unwrap(),
|
||||
value: serialize(&block.header),
|
||||
});
|
||||
rows
|
||||
};
|
||||
block
|
||||
.txdata
|
||||
.iter()
|
||||
.flat_map(move |txn| index_transaction(&txn, height))
|
||||
.chain(std::iter::once(row))
|
||||
}
|
||||
|
||||
pub fn last_indexed_block(blockhash: &Sha256dHash) -> Row {
|
||||
|
@ -361,7 +370,7 @@ impl Index {
|
|||
.cloned()
|
||||
}
|
||||
|
||||
pub fn update(&self, store: &WriteStore, waiter: &Waiter) -> Result<Sha256dHash> {
|
||||
pub fn update(&self, store: &impl WriteStore, waiter: &Waiter) -> Result<Sha256dHash> {
|
||||
let daemon = self.daemon.reconnect()?;
|
||||
let tip = daemon.getbestblockhash()?;
|
||||
let new_headers: Vec<HeaderEntry> = {
|
||||
|
@ -401,22 +410,18 @@ impl Index {
|
|||
break;
|
||||
}
|
||||
|
||||
let mut rows = vec![];
|
||||
for block in &batch {
|
||||
let rows_iter = batch.iter().flat_map(|block| {
|
||||
let blockhash = block.bitcoin_hash();
|
||||
let height = *height_map
|
||||
.get(&blockhash)
|
||||
.unwrap_or_else(|| panic!("missing header for block {}", blockhash));
|
||||
|
||||
let timer = self.stats.start_timer("index");
|
||||
let mut block_rows = index_block(block, height);
|
||||
block_rows.push(last_indexed_block(&blockhash));
|
||||
rows.extend(block_rows);
|
||||
timer.observe_duration();
|
||||
self.stats.update(block, height);
|
||||
}
|
||||
let timer = self.stats.start_timer("write");
|
||||
store.write(rows);
|
||||
self.stats.update(block, height); // TODO: update stats after the block is indexed
|
||||
index_block(block, height).chain(std::iter::once(last_indexed_block(&blockhash)))
|
||||
});
|
||||
|
||||
let timer = self.stats.start_timer("index+write");
|
||||
store.write(rows_iter);
|
||||
timer.observe_duration();
|
||||
}
|
||||
let timer = self.stats.start_timer("flush");
|
||||
|
|
|
@ -29,8 +29,7 @@ impl MempoolStore {
|
|||
}
|
||||
|
||||
fn add(&mut self, tx: &Transaction) {
|
||||
let mut rows = vec![];
|
||||
index_transaction(tx, 0, &mut rows);
|
||||
let rows = index_transaction(tx, 0);
|
||||
for row in rows {
|
||||
let (key, value) = row.into_pair();
|
||||
self.map.entry(key).or_insert_with(|| vec![]).push(value);
|
||||
|
@ -38,8 +37,7 @@ impl MempoolStore {
|
|||
}
|
||||
|
||||
fn remove(&mut self, tx: &Transaction) {
|
||||
let mut rows = vec![];
|
||||
index_transaction(tx, 0, &mut rows);
|
||||
let rows = index_transaction(tx, 0);
|
||||
for row in rows {
|
||||
let (key, value) = row.into_pair();
|
||||
let no_values_left = {
|
||||
|
|
|
@ -21,7 +21,7 @@ pub trait ReadStore: Sync {
|
|||
}
|
||||
|
||||
pub trait WriteStore: Sync {
|
||||
fn write(&self, rows: Vec<Row>);
|
||||
fn write<I: IntoIterator<Item = Row>>(&self, rows: I);
|
||||
fn flush(&self);
|
||||
}
|
||||
|
||||
|
@ -148,7 +148,7 @@ impl ReadStore for DBStore {
|
|||
}
|
||||
|
||||
impl WriteStore for DBStore {
|
||||
fn write(&self, rows: Vec<Row>) {
|
||||
fn write<I: IntoIterator<Item = Row>>(&self, rows: I) {
|
||||
let mut batch = rocksdb::WriteBatch::default();
|
||||
for row in rows {
|
||||
batch.put(row.key.as_slice(), row.value.as_slice()).unwrap();
|
||||
|
|
Loading…
Add table
Reference in a new issue