1
0
Fork 0
mirror of https://github.com/romanz/electrs.git synced 2025-02-24 06:57:53 +01:00

Bunch of Vec<Row> translated to Iterator<Item=Row>

There were several places where `Vec<Row>` was created just to iterate
it later and throw away, thus allocating memory unnecessarily. This
translates those `Vec`s to `Iterator`s in order to improve the
efficiency.
This commit is contained in:
Martin Habovstiak 2019-05-29 15:13:39 +02:00
parent 9df66d97e3
commit 8199563e0f
5 changed files with 26 additions and 32 deletions

View file

@ -27,7 +27,7 @@ impl App {
}))
}
fn write_store(&self) -> &store::WriteStore {
fn write_store(&self) -> &impl store::WriteStore {
&self.store
}
// TODO: use index for queries.

View file

@ -13,7 +13,7 @@ impl ReadStore for FakeStore {
}
impl WriteStore for FakeStore {
fn write(&self, _rows: Vec<Row>) {}
fn write<I: IntoIterator<Item=Row>>(&self, _rows: I) {}
fn flush(&self) {}
}

View file

@ -170,38 +170,35 @@ pub fn compute_script_hash(data: &[u8]) -> FullHash {
hash
}
pub fn index_transaction(txn: &Transaction, height: usize, rows: &mut Vec<Row>) {
pub fn index_transaction<'a>(txn: &'a Transaction, height: usize) -> impl 'a + Iterator<Item=Row> {
let null_hash = Sha256dHash::default();
let txid: Sha256dHash = txn.txid();
for input in &txn.input {
let inputs = txn.input.iter().filter_map(move |input| {
if input.previous_output.txid == null_hash {
continue;
None
} else {
Some(TxInRow::new(&txid, &input).to_row())
}
rows.push(TxInRow::new(&txid, &input).to_row());
}
for output in &txn.output {
rows.push(TxOutRow::new(&txid, &output).to_row());
}
});
let outputs = txn.output.iter().map(move |output| TxOutRow::new(&txid, &output).to_row());
// Persist transaction ID and confirmed height
rows.push(TxRow::new(&txid, height as u32).to_row());
inputs.chain(outputs).chain(std::iter::once(TxRow::new(&txid, height as u32).to_row()))
}
pub fn index_block(block: &Block, height: usize) -> Vec<Row> {
let mut rows = vec![];
for txn in &block.txdata {
index_transaction(&txn, height, &mut rows);
}
pub fn index_block<'a>(block: &'a Block, height: usize) -> impl 'a + Iterator<Item=Row> {
let blockhash = block.bitcoin_hash();
// Persist block hash and header
rows.push(Row {
let row = Row {
key: bincode::serialize(&BlockKey {
code: b'B',
hash: full_hash(&blockhash[..]),
})
.unwrap(),
value: serialize(&block.header),
});
rows
};
block.txdata.iter().flat_map(move |txn| index_transaction(&txn, height)).chain(std::iter::once(row))
}
pub fn last_indexed_block(blockhash: &Sha256dHash) -> Row {
@ -361,7 +358,7 @@ impl Index {
.cloned()
}
pub fn update(&self, store: &WriteStore, waiter: &Waiter) -> Result<Sha256dHash> {
pub fn update(&self, store: &impl WriteStore, waiter: &Waiter) -> Result<Sha256dHash> {
let daemon = self.daemon.reconnect()?;
let tip = daemon.getbestblockhash()?;
let new_headers: Vec<HeaderEntry> = {
@ -401,20 +398,19 @@ impl Index {
break;
}
let mut rows = vec![];
for block in &batch {
let rows = batch.iter().flat_map(|block| {
let blockhash = block.bitcoin_hash();
let height = *height_map
.get(&blockhash)
.unwrap_or_else(|| panic!("missing header for block {}", blockhash));
let timer = self.stats.start_timer("index");
let mut block_rows = index_block(block, height);
block_rows.push(last_indexed_block(&blockhash));
rows.extend(block_rows);
let block_rows = index_block(block, height);
timer.observe_duration();
self.stats.update(block, height);
}
block_rows.chain(std::iter::once(last_indexed_block(&blockhash)))
});
let timer = self.stats.start_timer("write");
store.write(rows);
timer.observe_duration();

View file

@ -29,8 +29,7 @@ impl MempoolStore {
}
fn add(&mut self, tx: &Transaction) {
let mut rows = vec![];
index_transaction(tx, 0, &mut rows);
let rows = index_transaction(tx, 0);
for row in rows {
let (key, value) = row.into_pair();
self.map.entry(key).or_insert_with(|| vec![]).push(value);
@ -38,8 +37,7 @@ impl MempoolStore {
}
fn remove(&mut self, tx: &Transaction) {
let mut rows = vec![];
index_transaction(tx, 0, &mut rows);
let rows = index_transaction(tx, 0);
for row in rows {
let (key, value) = row.into_pair();
let no_values_left = {

View file

@ -21,7 +21,7 @@ pub trait ReadStore: Sync {
}
pub trait WriteStore: Sync {
fn write(&self, rows: Vec<Row>);
fn write<I: IntoIterator<Item=Row>>(&self, rows: I);
fn flush(&self);
}
@ -148,7 +148,7 @@ impl ReadStore for DBStore {
}
impl WriteStore for DBStore {
fn write(&self, rows: Vec<Row>) {
fn write<I: IntoIterator<Item=Row>>(&self, rows: I) {
let mut batch = rocksdb::WriteBatch::default();
for row in rows {
batch.put(row.key.as_slice(), row.value.as_slice()).unwrap();