Introduce OnchainTxHandler, move bumping and tracking logic

Encapsulates tracking and bumping of in-flight transactions in
its own component. This component may be latter abstracted
to reuse tracking and RBF for new features (e.g dual-funding,
splicing)

Build all transactions generation in one place. Also as fees
and signatures are closely tied, what keys do you have determine
what bumping mode you can use.
This commit is contained in:
Antoine Riard 2020-01-24 11:57:52 -05:00
parent e94635703a
commit 3d640da5c3
4 changed files with 885 additions and 1130 deletions

File diff suppressed because it is too large Load diff

View file

@ -2325,33 +2325,41 @@ fn claim_htlc_outputs_single_tx() {
}
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 26);
assert_eq!(node_txn.len(), 21);
// ChannelMonitor: justice tx revoked offered htlc, justice tx revoked received htlc, justice tx revoked to_local (3)
// ChannelManager: local commmitment + local HTLC-timeout (2)
// ChannelMonitor: bumped justice tx * 7 (7), after one increase, bumps on HTLC aren't generated not being substantial anymore
// ChannelMonitor: local commitment + local HTLC-timeout (14)
// ChannelMonitor: bumped justice tx (4), after one increase, bumps on HTLC aren't generated not being substantial anymore
// ChannelMonito r: local commitment + local HTLC-timeout (14)
assert_eq!(node_txn[0], node_txn[5]);
assert_eq!(node_txn[0], node_txn[7]);
assert_eq!(node_txn[0], node_txn[9]);
assert_eq!(node_txn[0], node_txn[13]);
assert_eq!(node_txn[0], node_txn[15]);
assert_eq!(node_txn[0], node_txn[17]);
assert_eq!(node_txn[0], node_txn[19]);
assert_eq!(node_txn[1], node_txn[6]);
assert_eq!(node_txn[1], node_txn[8]);
assert_eq!(node_txn[1], node_txn[10]);
assert_eq!(node_txn[1], node_txn[14]);
assert_eq!(node_txn[1], node_txn[16]);
assert_eq!(node_txn[1], node_txn[18]);
assert_eq!(node_txn[1], node_txn[20]);
assert_eq!(node_txn[3], node_txn[5]);
assert_eq!(node_txn[3], node_txn[7]);
assert_eq!(node_txn[3], node_txn[9]);
assert_eq!(node_txn[3], node_txn[14]);
assert_eq!(node_txn[3], node_txn[17]);
assert_eq!(node_txn[3], node_txn[20]);
assert_eq!(node_txn[3], node_txn[23]);
assert_eq!(node_txn[4], node_txn[6]);
assert_eq!(node_txn[4], node_txn[8]);
assert_eq!(node_txn[4], node_txn[10]);
assert_eq!(node_txn[4], node_txn[15]);
assert_eq!(node_txn[4], node_txn[18]);
assert_eq!(node_txn[4], node_txn[21]);
assert_eq!(node_txn[4], node_txn[24]);
// Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration and present 8 times (rebroadcast at every block from 200 to 206)
assert_eq!(node_txn[0].input.len(), 1);
check_spends!(node_txn[0], chan_1.3.clone());
assert_eq!(node_txn[1].input.len(), 1);
assert_eq!(node_txn[2].input.len(), 1);
let witness_script = node_txn[1].input[0].witness.last().unwrap();
assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
check_spends!(node_txn[1], node_txn[0].clone());
// Justice transactions are indices 2-3-4
assert_eq!(node_txn[2].input.len(), 1);
assert_eq!(node_txn[3].input.len(), 1);
assert_eq!(node_txn[4].input.len(), 1);
fn get_txout(out_point: &BitcoinOutPoint, tx: &Transaction) -> Option<TxOut> {
if out_point.txid == tx.txid() {
tx.output.get(out_point.vout as usize).cloned()
@ -2359,28 +2367,18 @@ fn claim_htlc_outputs_single_tx() {
None
}
}
node_txn[0].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
node_txn[1].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
node_txn[2].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
node_txn[3].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
node_txn[4].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
let mut witness_lens = BTreeSet::new();
witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
witness_lens.insert(node_txn[3].input[0].witness.last().unwrap().len());
witness_lens.insert(node_txn[4].input[0].witness.last().unwrap().len());
assert_eq!(witness_lens.len(), 3);
assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
assert_eq!(node_txn[3].input.len(), 1);
check_spends!(node_txn[3], chan_1.3.clone());
assert_eq!(node_txn[4].input.len(), 1);
let witness_script = node_txn[4].input[0].witness.last().unwrap();
assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid());
assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid);
}
get_announce_close_broadcast_events(&nodes, 0, 1);
assert_eq!(nodes[0].node.list_channels().len(), 0);
@ -2623,21 +2621,20 @@ fn test_htlc_on_chain_timeout() {
{
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 7); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : (local commitment tx + HTLC-timeout) * 2 (block-rescan), timeout tx
assert_eq!(node_txn[1], node_txn[3]);
assert_eq!(node_txn[1], node_txn[5]);
assert_eq!(node_txn[2], node_txn[4]);
assert_eq!(node_txn[2], node_txn[6]);
check_spends!(node_txn[0], commitment_tx[0].clone());
assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
check_spends!(node_txn[1], chan_2.3.clone());
check_spends!(node_txn[2], node_txn[1].clone());
assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), 71);
assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
check_spends!(node_txn[3], chan_2.3.clone());
check_spends!(node_txn[4], node_txn[3].clone());
assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71);
assert_eq!(node_txn[4].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
timeout_tx = node_txn[0].clone();
assert_eq!(node_txn[0], node_txn[3]);
assert_eq!(node_txn[0], node_txn[5]);
assert_eq!(node_txn[1], node_txn[4]);
assert_eq!(node_txn[1], node_txn[6]);
check_spends!(node_txn[2], commitment_tx[0].clone());
assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
check_spends!(node_txn[0], chan_2.3.clone());
check_spends!(node_txn[1], node_txn[0].clone());
assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), 71);
assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
timeout_tx = node_txn[2].clone();
node_txn.clear();
}
@ -4075,10 +4072,9 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
let spend_txn = check_spendable_outputs!(nodes[1], 1);
assert_eq!(spend_txn.len(), 4);
assert_eq!(spend_txn.len(), 3);
assert_eq!(spend_txn[0], spend_txn[2]); // to_remote output on revoked remote commitment_tx
check_spends!(spend_txn[0], revoked_local_txn[0].clone());
assert_eq!(spend_txn[1], spend_txn[3]); // to_local output on local commitment tx
check_spends!(spend_txn[1], node_txn[0].clone());
}
@ -4123,8 +4119,7 @@ eprintln!("{:?}", node_txn[1]);
check_spends!(node_txn[2], node_txn[1]);
let spend_txn = check_spendable_outputs!(nodes[1], 1); // , 0, 0, 1, 1);
assert_eq!(spend_txn.len(), 2);
assert_eq!(spend_txn[0], spend_txn[1]);
assert_eq!(spend_txn.len(), 1);
check_spends!(spend_txn[0], node_txn[0].clone());
}
@ -4155,8 +4150,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
check_spends!(node_txn[0], revoked_local_txn[0].clone());
let spend_txn = check_spendable_outputs!(nodes[1], 1);
assert_eq!(spend_txn.len(), 2);
assert_eq!(spend_txn[0], spend_txn[1]);
assert_eq!(spend_txn.len(), 1);
check_spends!(spend_txn[0], node_txn[0].clone());
}
@ -4201,10 +4195,9 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
// Check B's ChannelMonitor was able to generate the right spendable output descriptor
let spend_txn = check_spendable_outputs!(nodes[1], 1);
assert_eq!(spend_txn.len(), 3);
assert_eq!(spend_txn[0], spend_txn[1]);
assert_eq!(spend_txn.len(), 2);
check_spends!(spend_txn[0], node_txn[0].clone());
check_spends!(spend_txn[2], node_txn[2].clone());
check_spends!(spend_txn[1], node_txn[2].clone());
}
#[test]
@ -4247,12 +4240,11 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
// Check A's ChannelMonitor was able to generate the right spendable output descriptor
let spend_txn = check_spendable_outputs!(nodes[0], 1);
assert_eq!(spend_txn.len(), 5);
assert_eq!(spend_txn.len(), 4);
assert_eq!(spend_txn[0], spend_txn[2]);
assert_eq!(spend_txn[1], spend_txn[3]);
check_spends!(spend_txn[0], revoked_local_txn[0].clone()); // spending to_remote output from revoked local tx
check_spends!(spend_txn[1], node_txn[0].clone()); // spending justice tx output from revoked local tx htlc received output
check_spends!(spend_txn[4], node_txn[2].clone()); // spending justice tx output on htlc success tx
check_spends!(spend_txn[3], node_txn[2].clone()); // spending justice tx output on htlc success tx
}
#[test]
@ -6903,8 +6895,6 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
check_closed_broadcast!(nodes[1], false);
let mut received = ::std::usize::MAX;
let mut offered = ::std::usize::MAX;
let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(revoked_htlc_txn.len(), 6);
if revoked_htlc_txn[0].input[0].witness.last().unwrap().len() == ACCEPTED_HTLC_SCRIPT_WEIGHT {
@ -6913,106 +6903,81 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
assert_eq!(revoked_htlc_txn[1].input.len(), 1);
assert_eq!(revoked_htlc_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
check_spends!(revoked_htlc_txn[1], revoked_local_txn[0].clone());
received = 0;
offered = 1;
} else if revoked_htlc_txn[1].input[0].witness.last().unwrap().len() == ACCEPTED_HTLC_SCRIPT_WEIGHT {
assert_eq!(revoked_htlc_txn[1].input.len(), 1);
check_spends!(revoked_htlc_txn[1], revoked_local_txn[0].clone());
assert_eq!(revoked_htlc_txn[0].input.len(), 1);
assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone());
received = 1;
offered = 0;
}
// Broadcast set of revoked txn on A
let header_128 = connect_blocks(&nodes[0].block_notifier, 128, 0, true, header.bitcoin_hash());
let header_128 = connect_blocks(&nodes[0].block_notifier, 128, 0, true, header.bitcoin_hash());
let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] }, 129);
let first;
let second;
let feerate_1;
let feerate_2;
let penalty_txn;
{
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 6); // 3 penalty txn on revoked commitment tx + A commitment tx + 2 penalty tnx on revoked HTLC txn
assert_eq!(node_txn.len(), 5); // 3 penalty txn on revoked commitment tx + A commitment tx + 1 penalty tnx on revoked HTLC txn
// Verify claim tx are spending revoked HTLC txn
assert_eq!(node_txn[4].input.len(), 1);
assert_eq!(node_txn[4].input.len(), 2);
assert_eq!(node_txn[4].output.len(), 1);
check_spends!(node_txn[4], revoked_htlc_txn[0].clone());
if node_txn[4].input[0].previous_output.txid == revoked_htlc_txn[0].txid() {
assert_eq!(node_txn[4].input[1].previous_output.txid, revoked_htlc_txn[1].txid());
} else if node_txn[4].input[0].previous_output.txid == revoked_htlc_txn[1].txid() {
assert_eq!(node_txn[4].input[1].previous_output.txid, revoked_htlc_txn[0].txid());
} else {
panic!();
}
first = node_txn[4].txid();
assert_eq!(node_txn[5].input.len(), 1);
assert_eq!(node_txn[5].output.len(), 1);
check_spends!(node_txn[5], revoked_htlc_txn[1].clone());
second = node_txn[5].txid();
// Store both feerates for later comparison
let fee_1 = revoked_htlc_txn[0].output[0].value - node_txn[4].output[0].value;
let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[4].output[0].value;
feerate_1 = fee_1 * 1000 / node_txn[4].get_weight() as u64;
let fee_2 = revoked_htlc_txn[1].output[0].value - node_txn[5].output[0].value;
feerate_2 = fee_2 * 1000 / node_txn[5].get_weight() as u64;
penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
node_txn.clear();
}
// Connect three more block to see if bumped penalty are issued for HTLC txn
let header_132 = connect_blocks(&nodes[0].block_notifier, 3, 129, true, header_129.bitcoin_hash());
let penalty_local_tx;
let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].block_notifier.block_connected(&Block { header: header_130, txdata: penalty_txn }, 130);
{
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 3); // 2 bumped penalty txn on offered/received HTLC outputs of revoked commitment tx + 1 penalty tx on to_local of revoked commitment tx + 2 bumped penalty tx on revoked HTLC txn
assert_eq!(node_txn.len(), 2); // 2 bumped penalty txn on revoked commitment tx
check_spends!(node_txn[0], revoked_local_txn[0].clone());
check_spends!(node_txn[1], revoked_local_txn[0].clone());
check_spends!(node_txn[2], revoked_local_txn[0].clone());
penalty_local_tx = node_txn[2].clone();
node_txn.clear();
};
// Few more blocks to broadcast and confirm penalty_local_tx
let header_133 = BlockHeader { version: 0x20000000, prev_blockhash: header_132, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].block_notifier.block_connected(&Block { header: header_133, txdata: vec![penalty_local_tx] }, 133);
let header_135 = connect_blocks(&nodes[0].block_notifier, 2, 133, true, header_133.bitcoin_hash());
// Few more blocks to confirm penalty txn
let header_135 = connect_blocks(&nodes[0].block_notifier, 5, 130, true, header_130.bitcoin_hash());
{
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], revoked_local_txn[0].clone());
assert_eq!(node_txn.len(), 0);
node_txn.clear();
}
let header_144 = connect_blocks(&nodes[0].block_notifier, 9, 135, true, header_135);
let node_txn = {
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 2);
assert_eq!(node_txn.len(), 1);
let mut penalty_offered = ::std::usize::MAX;
let mut penalty_received = ::std::usize::MAX;
{
for (i, tx) in node_txn.iter().enumerate() {
if tx.input[0].previous_output.txid == revoked_htlc_txn[offered].txid() {
penalty_offered = i;
} else if tx.input[0].previous_output.txid == revoked_htlc_txn[received].txid() {
penalty_received = i;
}
}
assert_eq!(node_txn[0].input.len(), 2);
if node_txn[0].input[0].previous_output.txid == revoked_htlc_txn[0].txid() {
assert_eq!(node_txn[0].input[1].previous_output.txid, revoked_htlc_txn[1].txid());
} else if node_txn[0].input[0].previous_output.txid == revoked_htlc_txn[1].txid() {
assert_eq!(node_txn[0].input[1].previous_output.txid, revoked_htlc_txn[0].txid());
} else {
panic!();
}
assert_eq!(node_txn[penalty_received].input.len(), 1);
assert_eq!(node_txn[penalty_received].output.len(), 1);
assert_eq!(node_txn[penalty_offered].input.len(), 1);
assert_eq!(node_txn[penalty_offered].output.len(), 1);
// Verify bumped tx is different and 25% bump heuristic
check_spends!(node_txn[penalty_offered], revoked_htlc_txn[offered].clone());
assert_ne!(first, node_txn[penalty_offered].txid());
let fee = revoked_htlc_txn[offered].output[0].value - node_txn[penalty_offered].output[0].value;
let new_feerate = fee * 1000 / node_txn[penalty_offered].get_weight() as u64;
assert!(new_feerate * 100 > feerate_1 * 125);
check_spends!(node_txn[penalty_received], revoked_htlc_txn[received].clone());
assert_ne!(second, node_txn[penalty_received].txid());
let fee = revoked_htlc_txn[received].output[0].value - node_txn[penalty_received].output[0].value;
let new_feerate = fee * 1000 / node_txn[penalty_received].get_weight() as u64;
assert!(new_feerate * 100 > feerate_2 * 125);
let txn = vec![node_txn[0].clone(), node_txn[1].clone()];
//// Verify bumped tx is different and 25% bump heuristic
assert_ne!(first, node_txn[0].txid());
let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value;
let feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
assert!(feerate_2 * 100 > feerate_1 * 125);
let txn = vec![node_txn[0].clone()];
node_txn.clear();
txn
};
@ -7022,7 +6987,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
connect_blocks(&nodes[0].block_notifier, 20, 145, true, header_145.bitcoin_hash());
{
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 2); //TODO: fix check_spend_remote_htlc lack of watch output
assert_eq!(node_txn.len(), 1); //TODO: fix check_spend_remote_htlc lack of watch output
node_txn.clear();
}
check_closed_broadcast!(nodes[0], false);
@ -7305,8 +7270,8 @@ fn test_bump_txn_sanitize_tracking_maps() {
{
let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
if let Some(monitor) = monitors.get(&OutPoint::new(chan.3.txid(), 0)) {
assert!(monitor.pending_claim_requests.is_empty());
assert!(monitor.claimable_outpoints.is_empty());
assert!(monitor.onchain_tx_handler.pending_claim_requests.is_empty());
assert!(monitor.onchain_tx_handler.claimable_outpoints.is_empty());
}
}
}

View file

@ -16,6 +16,7 @@ pub mod router;
pub mod peer_handler;
pub mod chan_utils;
pub mod features;
pub(crate) mod onchaintx;
#[cfg(feature = "fuzztarget")]
pub mod peer_channel_encryptor;

View file

@ -0,0 +1,698 @@
//! The logic to build claims and bump in-flight transactions until confirmations.
//!
//! OnchainTxHandler objetcs are fully-part of ChannelMonitor and encapsulates all
//! building, tracking, bumping and notifications functions.
use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, SigHashType};
use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
use bitcoin::blockdata::script::Script;
use bitcoin::util::bip143;
use bitcoin_hashes::sha256d::Hash as Sha256dHash;
use secp256k1::Secp256k1;
use secp256k1;
use ln::msgs::DecodeError;
use ln::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER, InputMaterial};
use ln::chan_utils::HTLCType;
use chain::chaininterface::{FeeEstimator, BroadcasterInterface, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
use chain::keysinterface::SpendableOutputDescriptor;
use util::logger::Logger;
use util::ser::{ReadableArgs, Readable, Writer, Writeable};
use util::byte_utils;
use std::collections::{HashMap, hash_map, HashSet};
use std::sync::Arc;
use std::cmp;
use std::ops::Deref;
const MAX_ALLOC_SIZE: usize = 64*1024;
/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
/// once they mature to enough confirmations (ANTI_REORG_DELAY)
#[derive(Clone, PartialEq)]
enum OnchainEvent {
/// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
/// bump-txn candidate buffer.
Claim {
claim_request: Sha256dHash,
},
/// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a remote party tx.
/// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking
/// the outpoint to be sure to resurect it back to the claim tx if reorgs happen.
ContentiousOutpoint {
outpoint: BitcoinOutPoint,
input_material: InputMaterial,
}
}
/// Higher-level cache structure needed to re-generate bumped claim txn if needed
#[derive(Clone, PartialEq)]
pub struct ClaimTxBumpMaterial {
// At every block tick, used to check if pending claiming tx is taking too
// much time for confirmation and we need to bump it.
height_timer: u32,
// Tracked in case of reorg to wipe out now-superflous bump material
feerate_previous: u64,
// Soonest timelocks among set of outpoints claimed, used to compute
// a priority of not feerate
soonest_timelock: u32,
// Cache of script, pubkey, sig or key to solve claimable outputs scriptpubkey.
per_input_material: HashMap<BitcoinOutPoint, InputMaterial>,
}
impl Writeable for ClaimTxBumpMaterial {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
writer.write_all(&byte_utils::be32_to_array(self.height_timer))?;
writer.write_all(&byte_utils::be64_to_array(self.feerate_previous))?;
writer.write_all(&byte_utils::be32_to_array(self.soonest_timelock))?;
writer.write_all(&byte_utils::be64_to_array(self.per_input_material.len() as u64))?;
for (outp, tx_material) in self.per_input_material.iter() {
outp.write(writer)?;
tx_material.write(writer)?;
}
Ok(())
}
}
impl Readable for ClaimTxBumpMaterial {
fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
let height_timer = Readable::read(reader)?;
let feerate_previous = Readable::read(reader)?;
let soonest_timelock = Readable::read(reader)?;
let per_input_material_len: u64 = Readable::read(reader)?;
let mut per_input_material = HashMap::with_capacity(cmp::min(per_input_material_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0 ..per_input_material_len {
let outpoint = Readable::read(reader)?;
let input_material = Readable::read(reader)?;
per_input_material.insert(outpoint, input_material);
}
Ok(Self { height_timer, feerate_previous, soonest_timelock, per_input_material })
}
}
#[derive(PartialEq)]
pub(super) enum InputDescriptors {
RevokedOfferedHTLC,
RevokedReceivedHTLC,
OfferedHTLC,
ReceivedHTLC,
RevokedOutput, // either a revoked to_local output on commitment tx, a revoked HTLC-Timeout output or a revoked HTLC-Success output
}
macro_rules! subtract_high_prio_fee {
($self: ident, $fee_estimator: expr, $value: expr, $predicted_weight: expr, $used_feerate: expr) => {
{
$used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority);
let mut fee = $used_feerate * ($predicted_weight as u64) / 1000;
if $value <= fee {
$used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
fee = $used_feerate * ($predicted_weight as u64) / 1000;
if $value <= fee {
$used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
fee = $used_feerate * ($predicted_weight as u64) / 1000;
if $value <= fee {
log_error!($self, "Failed to generate an on-chain punishment tx as even low priority fee ({} sat) was more than the entire claim balance ({} sat)",
fee, $value);
false
} else {
log_warn!($self, "Used low priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
$value);
$value -= fee;
true
}
} else {
log_warn!($self, "Used medium priority fee for on-chain punishment tx as high priority fee was more than the entire claim balance ({} sat)",
$value);
$value -= fee;
true
}
} else {
$value -= fee;
true
}
}
}
}
/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
/// do RBF bumping if possible.
#[derive(Clone)]
pub struct OnchainTxHandler {
destination_script: Script,
// Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
// it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
// another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
// same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
// block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
// equality between spending transaction and claim request. If true, it means transaction was one our claiming one
// after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
// we need to regenerate new claim request with reduced set of still-claimable outpoints.
// Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
// us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
// Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
#[cfg(test)] // Used in functional_test to verify sanitization
pub pending_claim_requests: HashMap<Sha256dHash, ClaimTxBumpMaterial>,
#[cfg(not(test))]
pending_claim_requests: HashMap<Sha256dHash, ClaimTxBumpMaterial>,
// Used to link outpoints claimed in a connected block to a pending claim request.
// Key is outpoint than monitor parsing has detected we have keys/scripts to claim
// Value is (pending claim request identifier, confirmation_block), identifier
// is txid of the initial claiming transaction and is immutable until outpoint is
// post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if
// block with output gets disconnected.
#[cfg(test)] // Used in functional_test to verify sanitization
pub claimable_outpoints: HashMap<BitcoinOutPoint, (Sha256dHash, u32)>,
#[cfg(not(test))]
claimable_outpoints: HashMap<BitcoinOutPoint, (Sha256dHash, u32)>,
onchain_events_waiting_threshold_conf: HashMap<u32, Vec<OnchainEvent>>,
secp_ctx: Secp256k1<secp256k1::All>,
logger: Arc<Logger>
}
impl Writeable for OnchainTxHandler {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
self.destination_script.write(writer)?;
writer.write_all(&byte_utils::be64_to_array(self.pending_claim_requests.len() as u64))?;
for (ref ancestor_claim_txid, claim_tx_data) in self.pending_claim_requests.iter() {
ancestor_claim_txid.write(writer)?;
claim_tx_data.write(writer)?;
}
writer.write_all(&byte_utils::be64_to_array(self.claimable_outpoints.len() as u64))?;
for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
outp.write(writer)?;
claim_and_height.0.write(writer)?;
claim_and_height.1.write(writer)?;
}
writer.write_all(&byte_utils::be64_to_array(self.onchain_events_waiting_threshold_conf.len() as u64))?;
for (ref target, ref events) in self.onchain_events_waiting_threshold_conf.iter() {
writer.write_all(&byte_utils::be32_to_array(**target))?;
writer.write_all(&byte_utils::be64_to_array(events.len() as u64))?;
for ev in events.iter() {
match *ev {
OnchainEvent::Claim { ref claim_request } => {
writer.write_all(&[0; 1])?;
claim_request.write(writer)?;
},
OnchainEvent::ContentiousOutpoint { ref outpoint, ref input_material } => {
writer.write_all(&[1; 1])?;
outpoint.write(writer)?;
input_material.write(writer)?;
}
}
}
}
Ok(())
}
}
impl ReadableArgs<Arc<Logger>> for OnchainTxHandler {
fn read<R: ::std::io::Read>(reader: &mut R, logger: Arc<Logger>) -> Result<Self, DecodeError> {
let destination_script = Readable::read(reader)?;
let pending_claim_requests_len: u64 = Readable::read(reader)?;
let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..pending_claim_requests_len {
pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
}
let claimable_outpoints_len: u64 = Readable::read(reader)?;
let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..claimable_outpoints_len {
let outpoint = Readable::read(reader)?;
let ancestor_claim_txid = Readable::read(reader)?;
let height = Readable::read(reader)?;
claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
}
let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
let mut onchain_events_waiting_threshold_conf = HashMap::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..waiting_threshold_conf_len {
let height_target = Readable::read(reader)?;
let events_len: u64 = Readable::read(reader)?;
let mut events = Vec::with_capacity(cmp::min(events_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..events_len {
let ev = match <u8 as Readable>::read(reader)? {
0 => {
let claim_request = Readable::read(reader)?;
OnchainEvent::Claim {
claim_request
}
},
1 => {
let outpoint = Readable::read(reader)?;
let input_material = Readable::read(reader)?;
OnchainEvent::ContentiousOutpoint {
outpoint,
input_material
}
}
_ => return Err(DecodeError::InvalidValue),
};
events.push(ev);
}
onchain_events_waiting_threshold_conf.insert(height_target, events);
}
Ok(OnchainTxHandler {
destination_script,
claimable_outpoints,
pending_claim_requests,
onchain_events_waiting_threshold_conf,
secp_ctx: Secp256k1::new(),
logger,
})
}
}
impl OnchainTxHandler {
pub(super) fn new(destination_script: Script, logger: Arc<Logger>) -> Self {
OnchainTxHandler {
destination_script,
pending_claim_requests: HashMap::new(),
claimable_outpoints: HashMap::new(),
onchain_events_waiting_threshold_conf: HashMap::new(),
secp_ctx: Secp256k1::new(),
logger,
}
}
pub(super) fn get_witnesses_weight(inputs: &[InputDescriptors]) -> usize {
let mut tx_weight = 2; // count segwit flags
for inp in inputs {
// We use expected weight (and not actual) as signatures and time lock delays may vary
tx_weight += match inp {
// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
&InputDescriptors::RevokedOfferedHTLC => {
1 + 1 + 73 + 1 + 33 + 1 + 133
},
// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
&InputDescriptors::RevokedReceivedHTLC => {
1 + 1 + 73 + 1 + 33 + 1 + 139
},
// number_of_witness_elements + sig_length + remotehtlc_sig + preimage_length + preimage + witness_script_length + witness_script
&InputDescriptors::OfferedHTLC => {
1 + 1 + 73 + 1 + 32 + 1 + 133
},
// number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
&InputDescriptors::ReceivedHTLC => {
1 + 1 + 73 + 1 + 1 + 1 + 139
},
// number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
&InputDescriptors::RevokedOutput => {
1 + 1 + 73 + 1 + 1 + 1 + 77
},
};
}
tx_weight
}
fn get_height_timer(current_height: u32, timelock_expiration: u32) -> u32 {
if timelock_expiration <= current_height + 3 {
return current_height + 1
} else if timelock_expiration - current_height <= 15 {
return current_height + 3
}
current_height + 15
}
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
/// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
fn generate_claim_tx<F: Deref>(&self, height: u32, cached_claim_datas: &ClaimTxBumpMaterial, fee_estimator: F) -> Option<(u32, u64, Transaction)>
where F::Target: FeeEstimator
{
if cached_claim_datas.per_input_material.len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
let mut inputs = Vec::new();
for outp in cached_claim_datas.per_input_material.keys() {
log_trace!(self, "Outpoint {}:{}", outp.txid, outp.vout);
inputs.push(TxIn {
previous_output: *outp,
script_sig: Script::new(),
sequence: 0xfffffffd,
witness: Vec::new(),
});
}
let mut bumped_tx = Transaction {
version: 2,
lock_time: 0,
input: inputs,
output: vec![TxOut {
script_pubkey: self.destination_script.clone(),
value: 0
}],
};
macro_rules! RBF_bump {
($amount: expr, $old_feerate: expr, $fee_estimator: expr, $predicted_weight: expr) => {
{
let mut used_feerate;
// If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
let new_fee = if $old_feerate < $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) {
let mut value = $amount;
if subtract_high_prio_fee!(self, $fee_estimator, value, $predicted_weight, used_feerate) {
// Overflow check is done in subtract_high_prio_fee
$amount - value
} else {
log_trace!(self, "Can't new-estimation bump new claiming tx, amount {} is too small", $amount);
return None;
}
// ...else just increase the previous feerate by 25% (because that's a nice number)
} else {
let fee = $old_feerate * $predicted_weight / 750;
if $amount <= fee {
log_trace!(self, "Can't 25% bump new claiming tx, amount {} is too small", $amount);
return None;
}
fee
};
let previous_fee = $old_feerate * $predicted_weight / 1000;
let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * $predicted_weight / 1000;
// BIP 125 Opt-in Full Replace-by-Fee Signaling
// * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions.
// * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting.
let new_fee = if new_fee < previous_fee + min_relay_fee {
new_fee + previous_fee + min_relay_fee - new_fee
} else {
new_fee
};
Some((new_fee, new_fee * 1000 / $predicted_weight))
}
}
}
let new_timer = Self::get_height_timer(height, cached_claim_datas.soonest_timelock);
let mut inputs_witnesses_weight = 0;
let mut amt = 0;
for per_outp_material in cached_claim_datas.per_input_material.values() {
match per_outp_material {
&InputMaterial::Revoked { ref script, ref is_htlc, ref amount, .. } => {
inputs_witnesses_weight += Self::get_witnesses_weight(if !is_htlc { &[InputDescriptors::RevokedOutput] } else if HTLCType::scriptlen_to_htlctype(script.len()) == Some(HTLCType::OfferedHTLC) { &[InputDescriptors::RevokedOfferedHTLC] } else if HTLCType::scriptlen_to_htlctype(script.len()) == Some(HTLCType::AcceptedHTLC) { &[InputDescriptors::RevokedReceivedHTLC] } else { unreachable!() });
amt += *amount;
},
&InputMaterial::RemoteHTLC { ref preimage, ref amount, .. } => {
inputs_witnesses_weight += Self::get_witnesses_weight(if preimage.is_some() { &[InputDescriptors::OfferedHTLC] } else { &[InputDescriptors::ReceivedHTLC] });
amt += *amount;
},
&InputMaterial::LocalHTLC { .. } => { return None; }
}
}
let predicted_weight = bumped_tx.get_weight() + inputs_witnesses_weight;
let mut new_feerate;
// If old feerate is 0, first iteration of this claim, use normal fee calculation
if cached_claim_datas.feerate_previous != 0 {
if let Some((new_fee, feerate)) = RBF_bump!(amt, cached_claim_datas.feerate_previous, fee_estimator, predicted_weight as u64) {
// If new computed fee is superior at the whole claimable amount burn all in fees
if new_fee > amt {
bumped_tx.output[0].value = 0;
} else {
bumped_tx.output[0].value = amt - new_fee;
}
new_feerate = feerate;
} else { return None; }
} else {
if subtract_high_prio_fee!(self, fee_estimator, amt, predicted_weight, new_feerate) {
bumped_tx.output[0].value = amt;
} else { return None; }
}
assert!(new_feerate != 0);
for (i, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() {
match per_outp_material {
&InputMaterial::Revoked { ref script, ref pubkey, ref key, ref is_htlc, ref amount } => {
let sighash_parts = bip143::SighashComponents::new(&bumped_tx);
let sighash = hash_to_message!(&sighash_parts.sighash_all(&bumped_tx.input[i], &script, *amount)[..]);
let sig = self.secp_ctx.sign(&sighash, &key);
bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
if *is_htlc {
bumped_tx.input[i].witness.push(pubkey.unwrap().clone().serialize().to_vec());
} else {
bumped_tx.input[i].witness.push(vec!(1));
}
bumped_tx.input[i].witness.push(script.clone().into_bytes());
log_trace!(self, "Going to broadcast Penalty Transaction {} claiming revoked {} output {} from {} with new feerate {}...", bumped_tx.txid(), if !is_htlc { "to_local" } else if HTLCType::scriptlen_to_htlctype(script.len()) == Some(HTLCType::OfferedHTLC) { "offered" } else if HTLCType::scriptlen_to_htlctype(script.len()) == Some(HTLCType::AcceptedHTLC) { "received" } else { "" }, outp.vout, outp.txid, new_feerate);
},
&InputMaterial::RemoteHTLC { ref script, ref key, ref preimage, ref amount, ref locktime } => {
if !preimage.is_some() { bumped_tx.lock_time = *locktime }; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
let sighash_parts = bip143::SighashComponents::new(&bumped_tx);
let sighash = hash_to_message!(&sighash_parts.sighash_all(&bumped_tx.input[i], &script, *amount)[..]);
let sig = self.secp_ctx.sign(&sighash, &key);
bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
if let &Some(preimage) = preimage {
bumped_tx.input[i].witness.push(preimage.clone().0.to_vec());
} else {
bumped_tx.input[i].witness.push(vec![0]);
}
bumped_tx.input[i].witness.push(script.clone().into_bytes());
log_trace!(self, "Going to broadcast Claim Transaction {} claiming remote {} htlc output {} from {} with new feerate {}...", bumped_tx.txid(), if preimage.is_some() { "offered" } else { "received" }, outp.vout, outp.txid, new_feerate);
},
&InputMaterial::LocalHTLC { .. } => {
//TODO : Given that Local Commitment Transaction and HTLC-Timeout/HTLC-Success are counter-signed by peer, we can't
// RBF them. Need a Lightning specs change and package relay modification :
// https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
return None;
}
}
}
log_trace!(self, "...with timer {}", new_timer);
assert!(predicted_weight >= bumped_tx.get_weight());
Some((new_timer, new_feerate, bumped_tx))
}
pub(super) fn block_connected<B: Deref, F: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<Vec<(u32, bool, BitcoinOutPoint, InputMaterial)>>, height: u32, broadcaster: B, fee_estimator: F) -> Vec<SpendableOutputDescriptor>
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator
{
let mut new_claims = Vec::new();
let mut aggregated_claim = HashMap::new();
let mut aggregated_soonest = ::std::u32::MAX;
let mut spendable_outputs = Vec::new();
// Try to aggregate outputs if they're 1) belong to same parent tx, 2) their
// timelock expiration isn't imminent (<= CLTV_SHARED_CLAIM_BUFFER).
for siblings_outpoints in claimable_outpoints {
for outp in siblings_outpoints {
// Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
if let Some(_) = self.claimable_outpoints.get(&outp.2) { log_trace!(self, "Bouncing off outpoint {}:{}, already registered its claiming request", outp.2.txid, outp.2.vout); } else {
log_trace!(self, "Test if outpoint can be aggregated with expiration {} against {}", outp.0, height + CLTV_SHARED_CLAIM_BUFFER);
if outp.0 <= height + CLTV_SHARED_CLAIM_BUFFER || !outp.1 { // Don't aggregate if outpoint absolute timelock is soon or marked as non-aggregable
let mut single_input = HashMap::new();
single_input.insert(outp.2, outp.3);
new_claims.push((outp.0, single_input));
} else {
aggregated_claim.insert(outp.2, outp.3);
if outp.0 < aggregated_soonest {
aggregated_soonest = outp.0;
}
}
}
}
}
new_claims.push((aggregated_soonest, aggregated_claim));
// Generate claim transactions and track them to bump if necessary at
// height timer expiration (i.e in how many blocks we're going to take action).
for claim in new_claims {
let mut claim_material = ClaimTxBumpMaterial { height_timer: 0, feerate_previous: 0, soonest_timelock: claim.0, per_input_material: claim.1.clone() };
if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator) {
claim_material.height_timer = new_timer;
claim_material.feerate_previous = new_feerate;
let txid = tx.txid();
self.pending_claim_requests.insert(txid, claim_material);
for k in claim.1.keys() {
log_trace!(self, "Registering claiming request for {}:{}", k.txid, k.vout);
self.claimable_outpoints.insert(k.clone(), (txid, height));
}
log_trace!(self, "Broadcast onchain {}", log_tx!(tx));
spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
outpoint: BitcoinOutPoint { txid: tx.txid(), vout: 0 },
output: tx.output[0].clone(),
});
broadcaster.broadcast_transaction(&tx);
}
}
let mut bump_candidates = HashSet::new();
for tx in txn_matched {
// Scan all input to verify is one of the outpoint spent is of interest for us
let mut claimed_outputs_material = Vec::new();
for inp in &tx.input {
if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) {
// If outpoint has claim request pending on it...
if let Some(claim_material) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) {
//... we need to verify equality between transaction outpoints and claim request
// outpoints to know if transaction is the original claim or a bumped one issued
// by us.
let mut set_equality = true;
if claim_material.per_input_material.len() != tx.input.len() {
set_equality = false;
} else {
for (claim_inp, tx_inp) in claim_material.per_input_material.keys().zip(tx.input.iter()) {
if *claim_inp != tx_inp.previous_output {
set_equality = false;
}
}
}
macro_rules! clean_claim_request_after_safety_delay {
() => {
let new_event = OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() };
match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
hash_map::Entry::Occupied(mut entry) => {
if !entry.get().contains(&new_event) {
entry.get_mut().push(new_event);
}
},
hash_map::Entry::Vacant(entry) => {
entry.insert(vec![new_event]);
}
}
}
}
// If this is our transaction (or our counterparty spent all the outputs
// before we could anyway with same inputs order than us), wait for
// ANTI_REORG_DELAY and clean the RBF tracking map.
if set_equality {
clean_claim_request_after_safety_delay!();
} else { // If false, generate new claim request with update outpoint set
for input in tx.input.iter() {
if let Some(input_material) = claim_material.per_input_material.remove(&input.previous_output) {
claimed_outputs_material.push((input.previous_output, input_material));
}
// If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
if claim_material.per_input_material.is_empty() {
clean_claim_request_after_safety_delay!();
}
}
//TODO: recompute soonest_timelock to avoid wasting a bit on fees
bump_candidates.insert(first_claim_txid_height.0.clone());
}
break; //No need to iterate further, either tx is our or their
} else {
panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
}
}
}
for (outpoint, input_material) in claimed_outputs_material.drain(..) {
let new_event = OnchainEvent::ContentiousOutpoint { outpoint, input_material };
match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
hash_map::Entry::Occupied(mut entry) => {
if !entry.get().contains(&new_event) {
entry.get_mut().push(new_event);
}
},
hash_map::Entry::Vacant(entry) => {
entry.insert(vec![new_event]);
}
}
}
}
// After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&height) {
for ev in events {
match ev {
OnchainEvent::Claim { claim_request } => {
// We may remove a whole set of claim outpoints here, as these one may have
// been aggregated in a single tx and claimed so atomically
if let Some(bump_material) = self.pending_claim_requests.remove(&claim_request) {
for outpoint in bump_material.per_input_material.keys() {
self.claimable_outpoints.remove(&outpoint);
}
}
},
OnchainEvent::ContentiousOutpoint { outpoint, .. } => {
self.claimable_outpoints.remove(&outpoint);
}
}
}
}
// Check if any pending claim request must be rescheduled
for (first_claim_txid, ref claim_data) in self.pending_claim_requests.iter() {
if claim_data.height_timer == height {
bump_candidates.insert(*first_claim_txid);
}
}
// Build, bump and rebroadcast tx accordingly
for first_claim_txid in bump_candidates.iter() {
if let Some((new_timer, new_feerate)) = {
if let Some(claim_material) = self.pending_claim_requests.get(first_claim_txid) {
if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator) {
log_trace!(self, "Broadcast onchain {}", log_tx!(bump_tx));
broadcaster.broadcast_transaction(&bump_tx);
Some((new_timer, new_feerate))
} else { None }
} else { unreachable!(); }
} {
if let Some(claim_material) = self.pending_claim_requests.get_mut(first_claim_txid) {
claim_material.height_timer = new_timer;
claim_material.feerate_previous = new_feerate;
} else { unreachable!(); }
}
}
spendable_outputs
}
pub(super) fn block_disconnected<B: Deref, F: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F)
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator
{
let mut bump_candidates = HashMap::new();
if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
//- our claim tx on a commitment tx output
//- resurect outpoint back in its claimable set and regenerate tx
for ev in events {
match ev {
OnchainEvent::ContentiousOutpoint { outpoint, input_material } => {
if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(&outpoint) {
if let Some(claim_material) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) {
claim_material.per_input_material.insert(outpoint, input_material);
// Using a HashMap guarantee us than if we have multiple outpoints getting
// resurrected only one bump claim tx is going to be broadcast
bump_candidates.insert(ancestor_claimable_txid.clone(), claim_material.clone());
}
}
},
_ => {},
}
}
}
for (_, claim_material) in bump_candidates.iter_mut() {
if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator) {
claim_material.height_timer = new_timer;
claim_material.feerate_previous = new_feerate;
broadcaster.broadcast_transaction(&bump_tx);
}
}
for (ancestor_claim_txid, claim_material) in bump_candidates.drain() {
self.pending_claim_requests.insert(ancestor_claim_txid.0, claim_material);
}
//TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
// right now if one of the outpoint get disconnected, just erase whole pending claim request.
let mut remove_request = Vec::new();
self.claimable_outpoints.retain(|_, ref v|
if v.1 == height {
remove_request.push(v.0.clone());
false
} else { true });
for req in remove_request {
self.pending_claim_requests.remove(&req);
}
}
}