mirror of
https://github.com/mempool/mempool.git
synced 2025-02-22 14:22:44 +01:00
Fix clippy pedantic and nursery lints as well
This commit is contained in:
parent
e61ae59e53
commit
609df31e0c
6 changed files with 101 additions and 49 deletions
10
backend/rust-gbt/index.d.ts
vendored
10
backend/rust-gbt/index.d.ts
vendored
|
@ -5,7 +5,17 @@
|
|||
|
||||
export class GbtGenerator {
|
||||
constructor()
|
||||
/**
|
||||
* # Errors
|
||||
*
|
||||
* Rejects if the thread panics or if the Mutex is poisoned.
|
||||
*/
|
||||
make(mempoolBuffer: Uint8Array): Promise<GbtResult>
|
||||
/**
|
||||
* # Errors
|
||||
*
|
||||
* Rejects if the thread panics or if the Mutex is poisoned.
|
||||
*/
|
||||
update(newTxs: Uint8Array, removeTxs: Uint8Array): Promise<GbtResult>
|
||||
}
|
||||
/**
|
||||
|
|
|
@ -5,6 +5,7 @@ use std::{
|
|||
hash::{Hash, Hasher},
|
||||
};
|
||||
|
||||
#[allow(clippy::struct_excessive_bools)]
|
||||
#[derive(Clone)]
|
||||
pub struct AuditTransaction {
|
||||
pub uid: u32,
|
||||
|
@ -43,7 +44,7 @@ impl PartialEq for AuditTransaction {
|
|||
impl Eq for AuditTransaction {}
|
||||
|
||||
impl PartialOrd for AuditTransaction {
|
||||
fn partial_cmp(&self, other: &AuditTransaction) -> Option<Ordering> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
// If either score is NaN, this is false,
|
||||
// and partial_cmp will return None
|
||||
if self.score == other.score {
|
||||
|
@ -55,7 +56,7 @@ impl PartialOrd for AuditTransaction {
|
|||
}
|
||||
|
||||
impl Ord for AuditTransaction {
|
||||
fn cmp(&self, other: &AuditTransaction) -> Ordering {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
// Safety: The only possible values for score are f64
|
||||
// that are not NaN. This is because outside code can not
|
||||
// freely assign score. Also, calc_new_score guarantees no NaN.
|
||||
|
@ -65,7 +66,7 @@ impl Ord for AuditTransaction {
|
|||
|
||||
impl AuditTransaction {
|
||||
pub fn from_thread_transaction(tx: &ThreadTransaction) -> Self {
|
||||
AuditTransaction {
|
||||
Self {
|
||||
uid: tx.uid,
|
||||
fee: tx.fee,
|
||||
weight: tx.weight,
|
||||
|
@ -88,7 +89,7 @@ impl AuditTransaction {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn score(&self) -> f64 {
|
||||
pub const fn score(&self) -> f64 {
|
||||
self.score
|
||||
}
|
||||
|
||||
|
@ -99,7 +100,43 @@ impl AuditTransaction {
|
|||
/ (if self.ancestor_weight == 0 {
|
||||
1.0
|
||||
} else {
|
||||
self.ancestor_weight as f64 / 4.0
|
||||
f64::from(self.ancestor_weight) / 4.0
|
||||
});
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn set_ancestors(
|
||||
&mut self,
|
||||
ancestors: HashSet<u32>,
|
||||
total_fee: u64,
|
||||
total_weight: u32,
|
||||
total_sigops: u32,
|
||||
) {
|
||||
self.ancestors = ancestors;
|
||||
self.ancestor_fee = self.fee + total_fee;
|
||||
self.ancestor_weight = self.weight + total_weight;
|
||||
self.ancestor_sigops = self.sigops + total_sigops;
|
||||
self.calc_new_score();
|
||||
self.relatives_set_flag = true;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn remove_root(
|
||||
&mut self,
|
||||
root_txid: u32,
|
||||
root_fee: u64,
|
||||
root_weight: u32,
|
||||
root_sigops: u32,
|
||||
cluster_rate: f64,
|
||||
) -> f64 {
|
||||
self.ancestors.remove(&root_txid);
|
||||
self.ancestor_fee -= root_fee;
|
||||
self.ancestor_weight -= root_weight;
|
||||
self.ancestor_sigops -= root_sigops;
|
||||
let old_score = self.score();
|
||||
self.calc_new_score();
|
||||
self.dependency_rate = self.dependency_rate.min(cluster_rate);
|
||||
self.modified = true;
|
||||
old_score
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ impl PartialEq for TxPriority {
|
|||
}
|
||||
impl Eq for TxPriority {}
|
||||
impl PartialOrd for TxPriority {
|
||||
fn partial_cmp(&self, other: &TxPriority) -> Option<Ordering> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
if self.score == other.score {
|
||||
Some(self.uid.cmp(&other.uid))
|
||||
} else {
|
||||
|
@ -43,11 +43,16 @@ impl Ord for TxPriority {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Build projected mempool blocks using an approximation of the transaction selection algorithm from Bitcoin Core
|
||||
* (see BlockAssembler in https://github.com/bitcoin/bitcoin/blob/master/src/node/miner.cpp)
|
||||
* Ported from https://github.com/mempool/mempool/blob/master/backend/src/api/tx-selection-worker.ts
|
||||
*/
|
||||
/// Build projected mempool blocks using an approximation of the transaction selection algorithm from Bitcoin Core
|
||||
///
|
||||
/// See `BlockAssembler` in Bitcoin Core's
|
||||
/// [miner.cpp](https://github.com/bitcoin/bitcoin/blob/master/src/node/miner.cpp).
|
||||
/// Ported from mempool backend's
|
||||
/// [tx-selection-worker.ts](https://github.com/mempool/mempool/blob/master/backend/src/api/tx-selection-worker.ts).
|
||||
//
|
||||
// TODO: Make gbt smaller to fix these lints.
|
||||
#[allow(clippy::too_many_lines)]
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
pub fn gbt(mempool: &mut ThreadTransactionsMap) -> Option<GbtResult> {
|
||||
let mut audit_pool: AuditPool = u32hashmap_with_capacity(STARTING_CAPACITY);
|
||||
let mut mempool_array: VecDeque<u32> = VecDeque::with_capacity(STARTING_CAPACITY);
|
||||
|
@ -140,10 +145,10 @@ pub fn gbt(mempool: &mut ThreadTransactionsMap) -> Option<GbtResult> {
|
|||
|
||||
let cluster_rate = next_tx
|
||||
.dependency_rate
|
||||
.min(next_tx.ancestor_fee as f64 / (next_tx.ancestor_weight as f64 / 4.0));
|
||||
.min(next_tx.ancestor_fee as f64 / (f64::from(next_tx.ancestor_weight) / 4.0));
|
||||
|
||||
for package_entry in &package {
|
||||
if let Some(tx) = audit_pool.get_mut(&package_entry.0) {
|
||||
for (txid, _, _) in &package {
|
||||
if let Some(tx) = audit_pool.get_mut(txid) {
|
||||
tx.used = true;
|
||||
if tx.effective_fee_per_vsize != cluster_rate {
|
||||
tx.effective_fee_per_vsize = cluster_rate;
|
||||
|
@ -153,12 +158,7 @@ pub fn gbt(mempool: &mut ThreadTransactionsMap) -> Option<GbtResult> {
|
|||
block_weight += tx.weight;
|
||||
block_sigops += tx.sigops;
|
||||
}
|
||||
update_descendants(
|
||||
package_entry.0,
|
||||
&mut audit_pool,
|
||||
&mut modified,
|
||||
cluster_rate,
|
||||
);
|
||||
update_descendants(*txid, &mut audit_pool, &mut modified, cluster_rate);
|
||||
}
|
||||
|
||||
failures = 0;
|
||||
|
@ -205,14 +205,14 @@ pub fn gbt(mempool: &mut ThreadTransactionsMap) -> Option<GbtResult> {
|
|||
let mut rates: Vec<Vec<f64>> = Vec::new();
|
||||
for (txid, tx) in audit_pool {
|
||||
if tx.dirty {
|
||||
rates.push(vec![txid as f64, tx.effective_fee_per_vsize]);
|
||||
rates.push(vec![f64::from(txid), tx.effective_fee_per_vsize]);
|
||||
}
|
||||
}
|
||||
|
||||
Some(GbtResult {
|
||||
blocks,
|
||||
rates,
|
||||
clusters,
|
||||
rates,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -257,12 +257,7 @@ fn set_relatives(txid: u32, audit_pool: &mut AuditPool) {
|
|||
}
|
||||
|
||||
if let Some(tx) = audit_pool.get_mut(&txid) {
|
||||
tx.ancestors = ancestors;
|
||||
tx.ancestor_fee = tx.fee + total_fee;
|
||||
tx.ancestor_weight = tx.weight + total_weight;
|
||||
tx.ancestor_sigops = tx.sigops + total_sigops;
|
||||
tx.calc_new_score();
|
||||
tx.relatives_set_flag = true;
|
||||
tx.set_ancestors(ancestors, total_fee, total_weight, total_sigops);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -294,16 +289,11 @@ fn update_descendants(
|
|||
while let Some(next_txid) = descendant_stack.pop() {
|
||||
if let Some(descendant) = audit_pool.get_mut(&next_txid) {
|
||||
// remove root tx as ancestor
|
||||
descendant.ancestors.remove(&root_txid);
|
||||
descendant.ancestor_fee -= root_fee;
|
||||
descendant.ancestor_weight -= root_weight;
|
||||
descendant.ancestor_sigops -= root_sigops;
|
||||
let current_score = descendant.score();
|
||||
descendant.calc_new_score();
|
||||
descendant.dependency_rate = descendant.dependency_rate.min(cluster_rate);
|
||||
descendant.modified = true;
|
||||
let old_score =
|
||||
descendant.remove_root(root_txid, root_fee, root_weight, root_sigops, cluster_rate);
|
||||
// update modified priority if score has changed
|
||||
if !descendant.modified || descendant.score() < current_score {
|
||||
// remove_root() always sets modified to true
|
||||
if descendant.score() < old_score {
|
||||
modified.push_decrease(
|
||||
descendant.uid,
|
||||
TxPriority {
|
||||
|
@ -311,7 +301,7 @@ fn update_descendants(
|
|||
score: descendant.score(),
|
||||
},
|
||||
);
|
||||
} else if descendant.score() > current_score {
|
||||
} else if descendant.score() > old_score {
|
||||
modified.push_increase(
|
||||
descendant.uid,
|
||||
TxPriority {
|
||||
|
|
|
@ -1,4 +1,12 @@
|
|||
use napi::bindgen_prelude::*;
|
||||
#![warn(clippy::all)]
|
||||
#![warn(clippy::pedantic)]
|
||||
#![warn(clippy::nursery)]
|
||||
#![allow(clippy::cast_precision_loss)]
|
||||
#![allow(clippy::cast_possible_truncation)]
|
||||
#![allow(clippy::cast_sign_loss)]
|
||||
#![allow(clippy::float_cmp)]
|
||||
|
||||
use napi::bindgen_prelude::{Result, Uint8Array};
|
||||
use napi_derive::napi;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
@ -13,7 +21,7 @@ mod utils;
|
|||
use thread_transaction::ThreadTransaction;
|
||||
|
||||
/// This is the starting capacity for HashMap/Vec/etc. that deal with transactions.
|
||||
/// HashMap doubles capacity when it hits it, so 2048 is a decent tradeoff between
|
||||
/// `HashMap` doubles capacity when it hits it, so 2048 is a decent tradeoff between
|
||||
/// not wasting too much memory when it's below this, and allowing for less re-allocations
|
||||
/// by virtue of starting with such a large capacity.
|
||||
///
|
||||
|
@ -31,12 +39,16 @@ pub struct GbtGenerator {
|
|||
impl GbtGenerator {
|
||||
#[napi(constructor)]
|
||||
#[allow(clippy::new_without_default)]
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
thread_transactions: Arc::new(Mutex::new(u32hashmap_with_capacity(STARTING_CAPACITY))),
|
||||
}
|
||||
}
|
||||
|
||||
/// # Errors
|
||||
///
|
||||
/// Rejects if the thread panics or if the Mutex is poisoned.
|
||||
#[napi]
|
||||
pub async fn make(&self, mempool_buffer: Uint8Array) -> Result<GbtResult> {
|
||||
run_task(Arc::clone(&self.thread_transactions), move |map| {
|
||||
|
@ -47,6 +59,9 @@ impl GbtGenerator {
|
|||
.await
|
||||
}
|
||||
|
||||
/// # Errors
|
||||
///
|
||||
/// Rejects if the thread panics or if the Mutex is poisoned.
|
||||
#[napi]
|
||||
pub async fn update(&self, new_txs: Uint8Array, remove_txs: Uint8Array) -> Result<GbtResult> {
|
||||
run_task(Arc::clone(&self.thread_transactions), move |map| {
|
||||
|
@ -77,12 +92,12 @@ pub struct GbtResult {
|
|||
/// All on another thread, this runs an arbitrary task in between
|
||||
/// taking the lock and running gbt.
|
||||
///
|
||||
/// Rather than filling / updating the HashMap on the main thread,
|
||||
/// this allows for HashMap modifying tasks to be run before running and returning gbt results.
|
||||
/// Rather than filling / updating the `HashMap` on the main thread,
|
||||
/// this allows for `HashMap` modifying tasks to be run before running and returning gbt results.
|
||||
///
|
||||
/// `thread_transactions` is a cloned Arc of the Mutex for the HashMap state.
|
||||
/// `thread_transactions` is a cloned `Arc` of the `Mutex` for the `HashMap` state.
|
||||
/// `callback` is a `'static + Send` `FnOnce` closure/function that takes a mutable reference
|
||||
/// to the HashMap as the only argument. (A move closure is recommended to meet the bounds)
|
||||
/// to the `HashMap` as the only argument. (A move closure is recommended to meet the bounds)
|
||||
async fn run_task<F>(
|
||||
thread_transactions: Arc<Mutex<ThreadTransactionsMap>>,
|
||||
callback: F,
|
||||
|
|
|
@ -12,8 +12,8 @@ pub struct ThreadTransaction {
|
|||
}
|
||||
|
||||
impl ThreadTransaction {
|
||||
pub fn batch_from_buffer(buffer: &[u8]) -> Vec<ThreadTransaction> {
|
||||
let mut transactions: Vec<ThreadTransaction> = Vec::new();
|
||||
pub fn batch_from_buffer(buffer: &[u8]) -> Vec<Self> {
|
||||
let mut transactions: Vec<Self> = Vec::new();
|
||||
let mut cursor = Cursor::new(buffer);
|
||||
let size = cursor.get_u32();
|
||||
for _ in 0..size {
|
||||
|
@ -28,7 +28,7 @@ impl ThreadTransaction {
|
|||
for _ in 0..input_count {
|
||||
inputs.push(cursor.get_u32());
|
||||
}
|
||||
transactions.push(ThreadTransaction {
|
||||
transactions.push(Self {
|
||||
uid,
|
||||
fee,
|
||||
weight,
|
||||
|
|
|
@ -4,12 +4,12 @@ use std::{
|
|||
hash::{BuildHasher, Hasher},
|
||||
};
|
||||
|
||||
/// This is the only way to create a HashMap with the U32HasherState and capacity
|
||||
/// This is the only way to create a `HashMap` with the `U32HasherState` and capacity
|
||||
pub fn u32hashmap_with_capacity<V>(capacity: usize) -> HashMap<u32, V, U32HasherState> {
|
||||
HashMap::with_capacity_and_hasher(capacity, U32HasherState(()))
|
||||
}
|
||||
|
||||
/// This is the only way to create a PriorityQueue with the U32HasherState and capacity
|
||||
/// This is the only way to create a `PriorityQueue` with the `U32HasherState` and capacity
|
||||
pub fn u32priority_queue_with_capacity<V: Ord>(
|
||||
capacity: usize,
|
||||
) -> PriorityQueue<u32, V, U32HasherState> {
|
||||
|
|
Loading…
Add table
Reference in a new issue