Avoid generating redundant claims after initial confirmation

These claims will never be valid as a previous claim has already
confirmed. If a previous claim is reorged out of the chain, a new claim
will be generated bypassing the new behavior.

While this doesn't change much for our existing transaction-based
claims, as broadcasting an already confirmed transaction acts as a NOP,
it prevents us from yielding redundant event-based claims, which will be
introduced as part of the anchors patchset.
This commit is contained in:
Wilmer Paulino 2022-09-21 12:54:28 -07:00
parent 6957fb63f9
commit a0891368ee
No known key found for this signature in database
GPG key ID: 6DF57B9F9514972F
2 changed files with 37 additions and 35 deletions

View file

@ -430,7 +430,43 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
where F::Target: FeeEstimator,
L::Target: Logger,
{
if cached_request.outpoints().len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
let request_outpoints = cached_request.outpoints();
if request_outpoints.is_empty() {
// Don't prune pending claiming request yet, we may have to resurrect HTLCs. Untractable
// packages cannot be aggregated and will never be split, so we cannot end up with an
// empty claim.
debug_assert!(cached_request.is_malleable());
return None;
}
// If we've seen transaction inclusion in the chain for all outpoints in our request, we
// don't need to continue generating more claims. We'll keep tracking the request to fully
// remove it once it reaches the confirmation threshold, or to generate a new claim if the
// transaction is reorged out.
let mut all_inputs_have_confirmed_spend = true;
for outpoint in &request_outpoints {
if let Some(first_claim_txid_height) = self.claimable_outpoints.get(outpoint) {
// We check for outpoint spends within claims individually rather than as a set
// since requests can have outpoints split off.
if !self.onchain_events_awaiting_threshold_conf.iter()
.any(|event_entry| if let OnchainEvent::Claim { claim_request } = event_entry.event {
first_claim_txid_height.0 == claim_request
} else {
// The onchain event is not a claim, keep seeking until we find one.
false
})
{
// Either we had no `OnchainEvent::Claim`, or we did but none matched the
// outpoint's registered spend.
all_inputs_have_confirmed_spend = false;
}
} else {
// The request's outpoint spend does not exist yet.
all_inputs_have_confirmed_spend = false;
}
}
if all_inputs_have_confirmed_spend {
return None;
}
// Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
// didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).

View file

@ -2951,26 +2951,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
mine_transaction(&nodes[1], &timeout_tx);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
{
// B will rebroadcast a fee-bumped timeout transaction here.
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], commitment_tx[0]);
}
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
{
// B may rebroadcast its own holder commitment transaction here, as a safeguard against
// some incredibly unlikely partial-eclipse-attack scenarios. That said, because the
// original commitment_tx[0] (also spending chan_2.3) has reached ANTI_REORG_DELAY B really
// shouldn't broadcast anything here, and in some connect style scenarios we do not.
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
if node_txn.len() == 1 {
check_spends!(node_txn[0], chan_2.3);
} else {
assert_eq!(node_txn.len(), 0);
}
}
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
@ -8001,22 +7983,6 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
{
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 2); // 2 bumped penalty txn on revoked commitment tx
check_spends!(node_txn[0], revoked_local_txn[0]);
check_spends!(node_txn[1], revoked_local_txn[0]);
// Note that these are both bogus - they spend outputs already claimed in block 129:
if node_txn[0].input[0].previous_output == revoked_htlc_txn[0].input[0].previous_output {
assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
} else {
assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
}
node_txn.clear();
};
// Few more blocks to confirm penalty txn
connect_blocks(&nodes[0], 4);