mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 15:20:24 +01:00
Merge pull request #2797 from dunxen/2023-12-purgetochannelid
Remove `Outpoint::to_channel_id` method
This commit is contained in:
commit
7f177bb6dd
24 changed files with 337 additions and 232 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -13,3 +13,4 @@ lightning-rapid-gossip-sync/res/full_graph.lngossip
|
|||
lightning-custom-message/target
|
||||
lightning-transaction-sync/target
|
||||
no-std-check/target
|
||||
msrv-no-dev-deps-check/target
|
||||
|
|
|
@ -40,7 +40,7 @@ use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget,
|
|||
use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider};
|
||||
use lightning::events;
|
||||
use lightning::events::MessageSendEventsProvider;
|
||||
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
|
||||
use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
|
||||
use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
|
||||
use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
|
||||
use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
|
||||
|
@ -191,7 +191,7 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
|
|||
self.chain_monitor.update_channel(funding_txo, update)
|
||||
}
|
||||
|
||||
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
|
||||
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
|
||||
return self.chain_monitor.release_pending_monitor_events();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,8 +22,7 @@ use bitcoin::consensus::encode::deserialize;
|
|||
use bitcoin::network::constants::Network;
|
||||
|
||||
use bitcoin::hashes::hex::FromHex;
|
||||
use bitcoin::hashes::Hash as TraitImport;
|
||||
use bitcoin::hashes::HashEngine as TraitImportEngine;
|
||||
use bitcoin::hashes::Hash as _;
|
||||
use bitcoin::hashes::sha256::Hash as Sha256;
|
||||
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
|
||||
use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
|
||||
|
@ -651,7 +650,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
|
|||
if let None = loss_detector.txids_confirmed.get(&funding_txid) {
|
||||
let outpoint = OutPoint { txid: funding_txid, index: 0 };
|
||||
for chan in channelmanager.list_channels() {
|
||||
if chan.channel_id == outpoint.to_channel_id() {
|
||||
if chan.funding_txo == Some(outpoint) {
|
||||
tx.version += 1;
|
||||
continue 'search_loop;
|
||||
}
|
||||
|
|
|
@ -929,7 +929,7 @@ mod tests {
|
|||
use lightning::chain::transaction::OutPoint;
|
||||
use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
|
||||
use lightning::{get_event_msg, get_event};
|
||||
use lightning::ln::PaymentHash;
|
||||
use lightning::ln::{PaymentHash, ChannelId};
|
||||
use lightning::ln::channelmanager;
|
||||
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
|
||||
use lightning::ln::features::{ChannelFeatures, NodeFeatures};
|
||||
|
@ -1414,7 +1414,7 @@ mod tests {
|
|||
}
|
||||
|
||||
// Force-close the channel.
|
||||
nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
|
||||
nodes[0].node.force_close_broadcasting_latest_txn(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();
|
||||
|
||||
// Check that the force-close updates are persisted.
|
||||
check_persisted_data!(nodes[0].node, filepath.clone());
|
||||
|
|
|
@ -450,7 +450,7 @@ mod tests {
|
|||
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
|
||||
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
|
||||
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
|
||||
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
|
||||
let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
|
||||
|
||||
// Set the store's directory to read-only, which should result in
|
||||
// returning an unrecoverable failure when we then attempt to persist a
|
||||
|
@ -489,7 +489,7 @@ mod tests {
|
|||
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
|
||||
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
|
||||
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
|
||||
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
|
||||
let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
|
||||
|
||||
// Create the store with an invalid directory name and test that the
|
||||
// channel fails to open because the directories fail to be created. There
|
||||
|
|
|
@ -31,6 +31,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
|
|||
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
|
||||
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
|
||||
use crate::chain::transaction::{OutPoint, TransactionData};
|
||||
use crate::ln::ChannelId;
|
||||
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
|
||||
use crate::events;
|
||||
use crate::events::{Event, EventHandler};
|
||||
|
@ -158,7 +159,7 @@ pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
|
|||
///
|
||||
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
|
||||
/// [`Writeable::write`]: crate::util::ser::Writeable::write
|
||||
fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
|
||||
fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
|
||||
|
||||
/// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
|
||||
/// update.
|
||||
|
@ -193,7 +194,7 @@ pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
|
|||
/// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
|
||||
///
|
||||
/// [`Writeable::write`]: crate::util::ser::Writeable::write
|
||||
fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
|
||||
fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
|
||||
}
|
||||
|
||||
struct MonitorHolder<ChannelSigner: WriteableEcdsaChannelSigner> {
|
||||
|
@ -287,7 +288,7 @@ pub struct ChainMonitor<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T:
|
|||
persister: P,
|
||||
/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
|
||||
/// from the user and not from a [`ChannelMonitor`].
|
||||
pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
|
||||
pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
|
||||
/// The best block height seen, used as a proxy for the passage of time.
|
||||
highest_chain_height: AtomicUsize,
|
||||
|
||||
|
@ -471,12 +472,15 @@ where C::Target: chain::Filter,
|
|||
}
|
||||
}
|
||||
|
||||
/// Lists the funding outpoint of each [`ChannelMonitor`] being monitored.
|
||||
/// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored.
|
||||
///
|
||||
/// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
|
||||
/// monitoring for on-chain state resolutions.
|
||||
pub fn list_monitors(&self) -> Vec<OutPoint> {
|
||||
self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
|
||||
pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> {
|
||||
self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| {
|
||||
let channel_id = monitor_holder.monitor.channel_id();
|
||||
(*outpoint, channel_id)
|
||||
}).collect()
|
||||
}
|
||||
|
||||
#[cfg(not(c_bindings))]
|
||||
|
@ -542,8 +546,9 @@ where C::Target: chain::Filter,
|
|||
// Completed event.
|
||||
return Ok(());
|
||||
}
|
||||
self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
|
||||
funding_txo,
|
||||
let channel_id = monitor_data.monitor.channel_id();
|
||||
self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
|
||||
funding_txo, channel_id,
|
||||
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
|
||||
}], monitor_data.monitor.get_counterparty_node_id()));
|
||||
},
|
||||
|
@ -565,9 +570,14 @@ where C::Target: chain::Filter,
|
|||
#[cfg(any(test, fuzzing))]
|
||||
pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
|
||||
let monitors = self.monitors.read().unwrap();
|
||||
let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
|
||||
self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
|
||||
let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) {
|
||||
(m.monitor.get_counterparty_node_id(), m.monitor.channel_id())
|
||||
} else {
|
||||
(None, ChannelId::v1_from_funding_outpoint(funding_txo))
|
||||
};
|
||||
self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
|
||||
funding_txo,
|
||||
channel_id,
|
||||
monitor_update_id,
|
||||
}], counterparty_node_id));
|
||||
self.event_notifier.notify();
|
||||
|
@ -753,11 +763,14 @@ where C::Target: chain::Filter,
|
|||
}
|
||||
|
||||
fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
|
||||
// `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those
|
||||
// versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`.
|
||||
let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
|
||||
// Update the monitor that watches the channel referred to by the given outpoint.
|
||||
let monitors = self.monitors.read().unwrap();
|
||||
match monitors.get(&funding_txo) {
|
||||
None => {
|
||||
let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(funding_txo.to_channel_id()));
|
||||
let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id));
|
||||
log_error!(logger, "Failed to update channel monitor: no such monitor registered");
|
||||
|
||||
// We should never ever trigger this from within ChannelManager. Technically a
|
||||
|
@ -815,7 +828,7 @@ where C::Target: chain::Filter,
|
|||
}
|
||||
}
|
||||
|
||||
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
|
||||
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
|
||||
let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
|
||||
for monitor_state in self.monitors.read().unwrap().values() {
|
||||
let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
|
||||
|
@ -829,8 +842,9 @@ where C::Target: chain::Filter,
|
|||
let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
|
||||
if monitor_events.len() > 0 {
|
||||
let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
|
||||
let monitor_channel_id = monitor_state.monitor.channel_id();
|
||||
let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
|
||||
pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
|
||||
pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,6 +96,11 @@ pub struct ChannelMonitorUpdate {
|
|||
///
|
||||
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
|
||||
pub update_id: u64,
|
||||
/// The channel ID associated with these updates.
|
||||
///
|
||||
/// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and
|
||||
/// always `Some` otherwise.
|
||||
pub channel_id: Option<ChannelId>,
|
||||
}
|
||||
|
||||
/// The update ID used for a [`ChannelMonitorUpdate`] that is either:
|
||||
|
@ -118,6 +123,7 @@ impl Writeable for ChannelMonitorUpdate {
|
|||
}
|
||||
write_tlv_fields!(w, {
|
||||
(1, self.counterparty_node_id, option),
|
||||
(3, self.channel_id, option),
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
@ -134,10 +140,12 @@ impl Readable for ChannelMonitorUpdate {
|
|||
}
|
||||
}
|
||||
let mut counterparty_node_id = None;
|
||||
let mut channel_id = None;
|
||||
read_tlv_fields!(r, {
|
||||
(1, counterparty_node_id, option),
|
||||
(3, channel_id, option),
|
||||
});
|
||||
Ok(Self { update_id, counterparty_node_id, updates })
|
||||
Ok(Self { update_id, counterparty_node_id, updates, channel_id })
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -158,6 +166,8 @@ pub enum MonitorEvent {
|
|||
Completed {
|
||||
/// The funding outpoint of the [`ChannelMonitor`] that was updated
|
||||
funding_txo: OutPoint,
|
||||
/// The channel ID of the channel associated with the [`ChannelMonitor`]
|
||||
channel_id: ChannelId,
|
||||
/// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or
|
||||
/// [`ChannelMonitor::get_latest_update_id`].
|
||||
///
|
||||
|
@ -172,6 +182,7 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent,
|
|||
(0, Completed) => {
|
||||
(0, funding_txo, required),
|
||||
(2, monitor_update_id, required),
|
||||
(4, channel_id, required),
|
||||
},
|
||||
;
|
||||
(2, HTLCEvent),
|
||||
|
@ -772,6 +783,7 @@ pub(crate) struct ChannelMonitorImpl<Signer: WriteableEcdsaChannelSigner> {
|
|||
|
||||
channel_keys_id: [u8; 32],
|
||||
holder_revocation_basepoint: RevocationBasepoint,
|
||||
channel_id: ChannelId,
|
||||
funding_info: (OutPoint, ScriptBuf),
|
||||
current_counterparty_commitment_txid: Option<Txid>,
|
||||
prev_counterparty_commitment_txid: Option<Txid>,
|
||||
|
@ -1097,6 +1109,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signe
|
|||
(13, self.spendable_txids_confirmed, required_vec),
|
||||
(15, self.counterparty_fulfilled_htlcs, required),
|
||||
(17, self.initial_counterparty_commitment_info, option),
|
||||
(19, self.channel_id, required),
|
||||
});
|
||||
|
||||
Ok(())
|
||||
|
@ -1160,7 +1173,7 @@ impl<'a, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger {
|
|||
|
||||
pub(crate) fn from_impl<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>) -> Self {
|
||||
let peer_id = monitor_impl.counterparty_node_id;
|
||||
let channel_id = Some(monitor_impl.funding_info.0.to_channel_id());
|
||||
let channel_id = Some(monitor_impl.channel_id());
|
||||
WithChannelMonitor {
|
||||
logger, peer_id, channel_id,
|
||||
}
|
||||
|
@ -1181,7 +1194,8 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
|
|||
funding_redeemscript: ScriptBuf, channel_value_satoshis: u64,
|
||||
commitment_transaction_number_obscure_factor: u64,
|
||||
initial_holder_commitment_tx: HolderCommitmentTransaction,
|
||||
best_block: BestBlock, counterparty_node_id: PublicKey) -> ChannelMonitor<Signer> {
|
||||
best_block: BestBlock, counterparty_node_id: PublicKey, channel_id: ChannelId,
|
||||
) -> ChannelMonitor<Signer> {
|
||||
|
||||
assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
|
||||
let counterparty_payment_script = chan_utils::get_counterparty_payment_script(
|
||||
|
@ -1235,6 +1249,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
|
|||
|
||||
channel_keys_id,
|
||||
holder_revocation_basepoint,
|
||||
channel_id,
|
||||
funding_info,
|
||||
current_counterparty_commitment_txid: None,
|
||||
prev_counterparty_commitment_txid: None,
|
||||
|
@ -1386,6 +1401,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
|
|||
self.inner.lock().unwrap().get_funding_txo().clone()
|
||||
}
|
||||
|
||||
/// Gets the channel_id of the channel this ChannelMonitor is monitoring for.
|
||||
pub fn channel_id(&self) -> ChannelId {
|
||||
self.inner.lock().unwrap().channel_id()
|
||||
}
|
||||
|
||||
/// Gets a list of txids, with their output scripts (in the order they appear in the
|
||||
/// transaction), which we must learn about spends of via block_connected().
|
||||
pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, ScriptBuf)>)> {
|
||||
|
@ -2834,7 +2854,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
|||
self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
|
||||
} else if !self.holder_tx_signed {
|
||||
log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
|
||||
log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
|
||||
log_error!(logger, " in channel monitor for channel {}!", &self.channel_id());
|
||||
log_error!(logger, " Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!");
|
||||
} else {
|
||||
// If we generated a MonitorEvent::HolderForceClosed, the ChannelManager
|
||||
|
@ -2880,6 +2900,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
|||
&self.funding_info
|
||||
}
|
||||
|
||||
pub fn channel_id(&self) -> ChannelId {
|
||||
self.channel_id
|
||||
}
|
||||
|
||||
fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
|
||||
// If we've detected a counterparty commitment tx on chain, we must include it in the set
|
||||
// of outputs to watch for spends of, otherwise we're likely to lose user funds. Because
|
||||
|
@ -3642,7 +3666,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
|||
if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
|
||||
let mut balance_spendable_csv = None;
|
||||
log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
|
||||
&self.funding_info.0.to_channel_id(), txid);
|
||||
&self.channel_id(), txid);
|
||||
self.funding_spend_seen = true;
|
||||
let mut commitment_tx_to_counterparty_output = None;
|
||||
if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.to_consensus_u32() >> 8*3) as u8 == 0x20 {
|
||||
|
@ -3812,7 +3836,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
|||
log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
|
||||
self.pending_events.push(Event::SpendableOutputs {
|
||||
outputs: vec![descriptor],
|
||||
channel_id: Some(self.funding_info.0.to_channel_id()),
|
||||
channel_id: Some(self.channel_id()),
|
||||
});
|
||||
self.spendable_txids_confirmed.push(entry.txid);
|
||||
},
|
||||
|
@ -4557,6 +4581,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
|
|||
let mut spendable_txids_confirmed = Some(Vec::new());
|
||||
let mut counterparty_fulfilled_htlcs = Some(HashMap::new());
|
||||
let mut initial_counterparty_commitment_info = None;
|
||||
let mut channel_id = None;
|
||||
read_tlv_fields!(reader, {
|
||||
(1, funding_spend_confirmed, option),
|
||||
(3, htlcs_resolved_on_chain, optional_vec),
|
||||
|
@ -4567,6 +4592,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
|
|||
(13, spendable_txids_confirmed, optional_vec),
|
||||
(15, counterparty_fulfilled_htlcs, option),
|
||||
(17, initial_counterparty_commitment_info, option),
|
||||
(19, channel_id, option),
|
||||
});
|
||||
|
||||
// Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
|
||||
|
@ -4591,6 +4617,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
|
|||
|
||||
channel_keys_id,
|
||||
holder_revocation_basepoint,
|
||||
channel_id: channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)),
|
||||
funding_info,
|
||||
current_counterparty_commitment_txid,
|
||||
prev_counterparty_commitment_txid,
|
||||
|
@ -4665,7 +4692,7 @@ mod tests {
|
|||
use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
|
||||
use crate::chain::transaction::OutPoint;
|
||||
use crate::sign::InMemorySigner;
|
||||
use crate::ln::{PaymentPreimage, PaymentHash};
|
||||
use crate::ln::{PaymentPreimage, PaymentHash, ChannelId};
|
||||
use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey};
|
||||
use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
|
||||
use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields};
|
||||
|
@ -4841,6 +4868,7 @@ mod tests {
|
|||
htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap()))
|
||||
};
|
||||
let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
|
||||
let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
|
||||
let channel_parameters = ChannelTransactionParameters {
|
||||
holder_pubkeys: keys.holder_channel_pubkeys.clone(),
|
||||
holder_selected_contest_delay: 66,
|
||||
|
@ -4860,7 +4888,7 @@ mod tests {
|
|||
Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
|
||||
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
|
||||
&channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
|
||||
best_block, dummy_key);
|
||||
best_block, dummy_key, channel_id);
|
||||
|
||||
let mut htlcs = preimages_slice_to_htlcs!(preimages[0..10]);
|
||||
let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs);
|
||||
|
@ -5090,6 +5118,7 @@ mod tests {
|
|||
htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())),
|
||||
};
|
||||
let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
|
||||
let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
|
||||
let channel_parameters = ChannelTransactionParameters {
|
||||
holder_pubkeys: keys.holder_channel_pubkeys.clone(),
|
||||
holder_selected_contest_delay: 66,
|
||||
|
@ -5107,9 +5136,9 @@ mod tests {
|
|||
Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
|
||||
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
|
||||
&channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
|
||||
best_block, dummy_key);
|
||||
best_block, dummy_key, channel_id);
|
||||
|
||||
let chan_id = monitor.inner.lock().unwrap().funding_info.0.to_channel_id().clone();
|
||||
let chan_id = monitor.inner.lock().unwrap().channel_id();
|
||||
let context_logger = WithChannelMonitor::from(&logger, &monitor);
|
||||
log_error!(context_logger, "This is an error");
|
||||
log_warn!(context_logger, "This is an error");
|
||||
|
|
|
@ -17,6 +17,7 @@ use bitcoin::network::constants::Network;
|
|||
use bitcoin::secp256k1::PublicKey;
|
||||
|
||||
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent};
|
||||
use crate::ln::ChannelId;
|
||||
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
|
||||
use crate::chain::transaction::{OutPoint, TransactionData};
|
||||
|
||||
|
@ -297,7 +298,7 @@ pub trait Watch<ChannelSigner: WriteableEcdsaChannelSigner> {
|
|||
///
|
||||
/// For details on asynchronous [`ChannelMonitor`] updating and returning
|
||||
/// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`].
|
||||
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>;
|
||||
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>;
|
||||
}
|
||||
|
||||
/// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
|
||||
|
|
|
@ -9,9 +9,7 @@
|
|||
|
||||
//! Types describing on-chain transactions.
|
||||
|
||||
use crate::ln::ChannelId;
|
||||
use bitcoin::hash_types::Txid;
|
||||
use bitcoin::hashes::Hash;
|
||||
use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
|
||||
use bitcoin::blockdata::transaction::Transaction;
|
||||
|
||||
|
@ -58,11 +56,6 @@ pub struct OutPoint {
|
|||
}
|
||||
|
||||
impl OutPoint {
|
||||
/// Convert an `OutPoint` to a lightning channel id.
|
||||
pub fn to_channel_id(&self) -> ChannelId {
|
||||
ChannelId::v1_from_funding_txid(self.txid.as_byte_array(), self.index)
|
||||
}
|
||||
|
||||
/// Converts this OutPoint into the OutPoint field as used by rust-bitcoin
|
||||
///
|
||||
/// This is not exported to bindings users as the same type is used universally in the C bindings
|
||||
|
@ -86,6 +79,7 @@ impl_writeable!(OutPoint, { txid, index });
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::chain::transaction::OutPoint;
|
||||
use crate::ln::ChannelId;
|
||||
|
||||
use bitcoin::blockdata::transaction::Transaction;
|
||||
use bitcoin::consensus::encode;
|
||||
|
@ -94,13 +88,13 @@ mod tests {
|
|||
#[test]
|
||||
fn test_channel_id_calculation() {
|
||||
let tx: Transaction = encode::deserialize(&<Vec<u8>>::from_hex("020000000001010e0adef48412e4361325ac1c6e36411299ab09d4f083b9d8ddb55fbc06e1b0c00000000000feffffff0220a1070000000000220020f81d95e040bd0a493e38bae27bff52fe2bb58b93b293eb579c01c31b05c5af1dc072cfee54a3000016001434b1d6211af5551905dc2642d05f5b04d25a8fe80247304402207f570e3f0de50546aad25a872e3df059d277e776dda4269fa0d2cc8c2ee6ec9a022054e7fae5ca94d47534c86705857c24ceea3ad51c69dd6051c5850304880fc43a012103cb11a1bacc223d98d91f1946c6752e358a5eb1a1c983b3e6fb15378f453b76bd00000000").unwrap()[..]).unwrap();
|
||||
assert_eq!(&OutPoint {
|
||||
assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint {
|
||||
txid: tx.txid(),
|
||||
index: 0
|
||||
}.to_channel_id().0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
|
||||
assert_eq!(&OutPoint {
|
||||
}).0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
|
||||
assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint {
|
||||
txid: tx.txid(),
|
||||
index: 1
|
||||
}.to_channel_id().0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
|
||||
}).0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
|
|||
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
|
||||
use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
|
||||
use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
|
||||
use crate::ln::msgs;
|
||||
use crate::ln::{msgs, ChannelId};
|
||||
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
|
||||
use crate::util::test_channel_signer::TestChannelSigner;
|
||||
use crate::util::errors::APIError;
|
||||
|
@ -1861,7 +1861,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
|
|||
|
||||
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
|
||||
let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
|
||||
let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
|
||||
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
|
|
|
@ -816,7 +816,7 @@ pub(super) struct ReestablishResponses {
|
|||
pub(crate) struct ShutdownResult {
|
||||
pub(crate) closure_reason: ClosureReason,
|
||||
/// A channel monitor update to apply.
|
||||
pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
|
||||
pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
|
||||
/// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
|
||||
pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
|
||||
/// An unbroadcasted batch funding transaction id. The closure of this channel should be
|
||||
|
@ -2394,10 +2394,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
|
|||
};
|
||||
if generate_monitor_update {
|
||||
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
|
||||
Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
|
||||
Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
|
||||
update_id: self.latest_monitor_update_id,
|
||||
counterparty_node_id: Some(self.counterparty_node_id),
|
||||
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
|
||||
channel_id: Some(self.channel_id()),
|
||||
}))
|
||||
} else { None }
|
||||
} else { None };
|
||||
|
@ -2777,6 +2778,7 @@ impl<SP: Deref> Channel<SP> where
|
|||
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
|
||||
payment_preimage: payment_preimage_arg.clone(),
|
||||
}],
|
||||
channel_id: Some(self.context.channel_id()),
|
||||
};
|
||||
|
||||
if self.context.channel_state.should_force_holding_cell() {
|
||||
|
@ -3515,7 +3517,8 @@ impl<SP: Deref> Channel<SP> where
|
|||
htlc_outputs: htlcs_and_sigs,
|
||||
claimed_htlcs,
|
||||
nondust_htlc_sources,
|
||||
}]
|
||||
}],
|
||||
channel_id: Some(self.context.channel_id()),
|
||||
};
|
||||
|
||||
self.context.cur_holder_commitment_transaction_number -= 1;
|
||||
|
@ -3591,6 +3594,7 @@ impl<SP: Deref> Channel<SP> where
|
|||
update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
|
||||
counterparty_node_id: Some(self.context.counterparty_node_id),
|
||||
updates: Vec::new(),
|
||||
channel_id: Some(self.context.channel_id()),
|
||||
};
|
||||
|
||||
let mut htlc_updates = Vec::new();
|
||||
|
@ -3769,6 +3773,7 @@ impl<SP: Deref> Channel<SP> where
|
|||
idx: self.context.cur_counterparty_commitment_transaction_number + 1,
|
||||
secret: msg.per_commitment_secret,
|
||||
}],
|
||||
channel_id: Some(self.context.channel_id()),
|
||||
};
|
||||
|
||||
// Update state now that we've passed all the can-fail calls...
|
||||
|
@ -4826,6 +4831,7 @@ impl<SP: Deref> Channel<SP> where
|
|||
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
|
||||
scriptpubkey: self.get_closing_scriptpubkey(),
|
||||
}],
|
||||
channel_id: Some(self.context.channel_id()),
|
||||
};
|
||||
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
|
||||
self.push_ret_blockable_mon_update(monitor_update)
|
||||
|
@ -5965,7 +5971,8 @@ impl<SP: Deref> Channel<SP> where
|
|||
feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
|
||||
to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
|
||||
to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
|
||||
}]
|
||||
}],
|
||||
channel_id: Some(self.context.channel_id()),
|
||||
};
|
||||
self.context.channel_state.set_awaiting_remote_revoke();
|
||||
monitor_update
|
||||
|
@ -6159,6 +6166,7 @@ impl<SP: Deref> Channel<SP> where
|
|||
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
|
||||
scriptpubkey: self.get_closing_scriptpubkey(),
|
||||
}],
|
||||
channel_id: Some(self.context.channel_id()),
|
||||
};
|
||||
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
|
||||
self.push_ret_blockable_mon_update(monitor_update)
|
||||
|
@ -6475,7 +6483,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
|
|||
// Now that we're past error-generating stuff, update our local state:
|
||||
|
||||
self.context.channel_state = ChannelState::FundingNegotiated;
|
||||
self.context.channel_id = funding_txo.to_channel_id();
|
||||
self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
|
||||
|
||||
// If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
|
||||
// We can skip this if it is a zero-conf channel.
|
||||
|
@ -6814,7 +6822,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
|
|||
&self.context.channel_transaction_parameters,
|
||||
funding_redeemscript.clone(), self.context.channel_value_satoshis,
|
||||
obscure_factor,
|
||||
holder_commitment_tx, best_block, self.context.counterparty_node_id);
|
||||
holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
|
||||
channel_monitor.provide_initial_counterparty_commitment_tx(
|
||||
counterparty_initial_bitcoin_tx.txid, Vec::new(),
|
||||
self.context.cur_counterparty_commitment_transaction_number,
|
||||
|
@ -7356,7 +7364,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
|
|||
// Now that we're past error-generating stuff, update our local state:
|
||||
|
||||
self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
|
||||
self.context.channel_id = funding_txo.to_channel_id();
|
||||
self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
|
||||
self.context.cur_counterparty_commitment_transaction_number -= 1;
|
||||
self.context.cur_holder_commitment_transaction_number -= 1;
|
||||
|
||||
|
@ -7374,7 +7382,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
|
|||
&self.context.channel_transaction_parameters,
|
||||
funding_redeemscript.clone(), self.context.channel_value_satoshis,
|
||||
obscure_factor,
|
||||
holder_commitment_tx, best_block, self.context.counterparty_node_id);
|
||||
holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
|
||||
channel_monitor.provide_initial_counterparty_commitment_tx(
|
||||
counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
|
||||
self.context.cur_counterparty_commitment_transaction_number + 1,
|
||||
|
|
|
@ -9,11 +9,13 @@
|
|||
|
||||
//! ChannelId definition.
|
||||
|
||||
use crate::chain::transaction::OutPoint;
|
||||
use crate::io;
|
||||
use crate::ln::msgs::DecodeError;
|
||||
use crate::sign::EntropySource;
|
||||
use crate::util::ser::{Readable, Writeable, Writer};
|
||||
|
||||
use crate::io;
|
||||
use bitcoin::hashes::Hash as _;
|
||||
use core::fmt;
|
||||
use core::ops::Deref;
|
||||
|
||||
|
@ -40,6 +42,11 @@ impl ChannelId {
|
|||
Self(res)
|
||||
}
|
||||
|
||||
/// Create _v1_ channel ID from a funding tx outpoint
|
||||
pub fn v1_from_funding_outpoint(outpoint: OutPoint) -> Self {
|
||||
Self::v1_from_funding_txid(outpoint.txid.as_byte_array(), outpoint.index)
|
||||
}
|
||||
|
||||
/// Create a _temporary_ channel ID randomly, based on an entropy source.
|
||||
pub fn temporary_from_entropy_source<ES: Deref>(entropy_source: &ES) -> Self
|
||||
where ES::Target: EntropySource {
|
||||
|
|
|
@ -288,6 +288,7 @@ pub(super) struct PendingAddHTLCInfo {
|
|||
// Note that this may be an outbound SCID alias for the associated channel.
|
||||
prev_short_channel_id: u64,
|
||||
prev_htlc_id: u64,
|
||||
prev_channel_id: ChannelId,
|
||||
prev_funding_outpoint: OutPoint,
|
||||
prev_user_channel_id: u128,
|
||||
}
|
||||
|
@ -328,6 +329,7 @@ pub(crate) struct HTLCPreviousHopData {
|
|||
incoming_packet_shared_secret: [u8; 32],
|
||||
phantom_shared_secret: Option<[u8; 32]>,
|
||||
blinded_failure: Option<BlindedFailure>,
|
||||
channel_id: ChannelId,
|
||||
|
||||
// This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
|
||||
// channel with a preimage provided by the forward channel.
|
||||
|
@ -368,7 +370,7 @@ struct ClaimableHTLC {
|
|||
impl From<&ClaimableHTLC> for events::ClaimedHTLC {
|
||||
fn from(val: &ClaimableHTLC) -> Self {
|
||||
events::ClaimedHTLC {
|
||||
channel_id: val.prev_hop.outpoint.to_channel_id(),
|
||||
channel_id: val.prev_hop.channel_id,
|
||||
user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
|
||||
cltv_expiry: val.cltv_expiry,
|
||||
value_msat: val.value,
|
||||
|
@ -707,7 +709,7 @@ enum BackgroundEvent {
|
|||
///
|
||||
/// Note that any such events are lost on shutdown, so in general they must be updates which
|
||||
/// are regenerated on startup.
|
||||
ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
|
||||
ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
|
||||
/// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
|
||||
/// channel to continue normal operation.
|
||||
///
|
||||
|
@ -721,6 +723,7 @@ enum BackgroundEvent {
|
|||
MonitorUpdateRegeneratedOnStartup {
|
||||
counterparty_node_id: PublicKey,
|
||||
funding_txo: OutPoint,
|
||||
channel_id: ChannelId,
|
||||
update: ChannelMonitorUpdate
|
||||
},
|
||||
/// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
|
||||
|
@ -749,7 +752,7 @@ pub(crate) enum MonitorUpdateCompletionAction {
|
|||
/// outbound edge.
|
||||
EmitEventAndFreeOtherChannel {
|
||||
event: events::Event,
|
||||
downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
|
||||
downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>,
|
||||
},
|
||||
/// Indicates we should immediately resume the operation of another channel, unless there is
|
||||
/// some other reason why the channel is blocked. In practice this simply means immediately
|
||||
|
@ -767,6 +770,7 @@ pub(crate) enum MonitorUpdateCompletionAction {
|
|||
downstream_counterparty_node_id: PublicKey,
|
||||
downstream_funding_outpoint: OutPoint,
|
||||
blocking_action: RAAMonitorUpdateBlockingAction,
|
||||
downstream_channel_id: ChannelId,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -778,6 +782,9 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
|
|||
(0, downstream_counterparty_node_id, required),
|
||||
(2, downstream_funding_outpoint, required),
|
||||
(4, blocking_action, required),
|
||||
// Note that by the time we get past the required read above, downstream_funding_outpoint will be
|
||||
// filled in, so we can safely unwrap it here.
|
||||
(5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
|
||||
},
|
||||
(2, EmitEventAndFreeOtherChannel) => {
|
||||
(0, event, upgradable_required),
|
||||
|
@ -795,12 +802,16 @@ pub(crate) enum EventCompletionAction {
|
|||
ReleaseRAAChannelMonitorUpdate {
|
||||
counterparty_node_id: PublicKey,
|
||||
channel_funding_outpoint: OutPoint,
|
||||
channel_id: ChannelId,
|
||||
},
|
||||
}
|
||||
impl_writeable_tlv_based_enum!(EventCompletionAction,
|
||||
(0, ReleaseRAAChannelMonitorUpdate) => {
|
||||
(0, channel_funding_outpoint, required),
|
||||
(2, counterparty_node_id, required),
|
||||
// Note that by the time we get past the required read above, channel_funding_outpoint will be
|
||||
// filled in, so we can safely unwrap it here.
|
||||
(3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
|
||||
};
|
||||
);
|
||||
|
||||
|
@ -822,7 +833,7 @@ pub(crate) enum RAAMonitorUpdateBlockingAction {
|
|||
impl RAAMonitorUpdateBlockingAction {
|
||||
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
|
||||
Self::ForwardedPaymentInboundClaim {
|
||||
channel_id: prev_hop.outpoint.to_channel_id(),
|
||||
channel_id: prev_hop.channel_id,
|
||||
htlc_id: prev_hop.htlc_id,
|
||||
}
|
||||
}
|
||||
|
@ -1626,8 +1637,8 @@ pub struct ChannelDetails {
|
|||
/// The Channel's funding transaction output, if we've negotiated the funding transaction with
|
||||
/// our counterparty already.
|
||||
///
|
||||
/// Note that, if this has been set, `channel_id` will be equivalent to
|
||||
/// `funding_txo.unwrap().to_channel_id()`.
|
||||
/// Note that, if this has been set, `channel_id` for V1-established channels will be equivalent to
|
||||
/// `ChannelId::v1_from_funding_outpoint(funding_txo.unwrap())`.
|
||||
pub funding_txo: Option<OutPoint>,
|
||||
/// The features which this channel operates with. See individual features for more info.
|
||||
///
|
||||
|
@ -2285,7 +2296,7 @@ macro_rules! handle_new_monitor_update {
|
|||
handle_new_monitor_update!($self, $update_res, $chan, _internal,
|
||||
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
|
||||
};
|
||||
($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
|
||||
($self: ident, $funding_txo: expr, $channel_id: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
|
||||
let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
|
||||
.or_insert_with(Vec::new);
|
||||
// During startup, we push monitor updates as background events through to here in
|
||||
|
@ -2742,7 +2753,7 @@ where
|
|||
|
||||
// Update the monitor with the shutdown script if necessary.
|
||||
if let Some(monitor_update) = monitor_update_opt.take() {
|
||||
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
|
||||
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), *channel_id, monitor_update,
|
||||
peer_state_lock, peer_state, per_peer_state, chan);
|
||||
}
|
||||
} else {
|
||||
|
@ -2853,7 +2864,7 @@ where
|
|||
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
|
||||
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
|
||||
}
|
||||
if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
|
||||
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
|
||||
// There isn't anything we can do if we get an update failure - we're already
|
||||
// force-closing. The monitor update on the required in-memory copy should broadcast
|
||||
// the latest local state, which is the best we can do anyway. Thus, it is safe to
|
||||
|
@ -3403,7 +3414,7 @@ where
|
|||
}, onion_packet, None, &self.fee_estimator, &&logger);
|
||||
match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
|
||||
Some(monitor_update) => {
|
||||
match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
|
||||
match handle_new_monitor_update!(self, funding_txo, channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
|
||||
false => {
|
||||
// Note that MonitorUpdateInProgress here indicates (per function
|
||||
// docs) that we will resend the commitment update once monitor
|
||||
|
@ -3952,7 +3963,10 @@ where
|
|||
}
|
||||
let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
|
||||
if let Some(funding_batch_state) = funding_batch_state.as_mut() {
|
||||
funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
|
||||
// TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
|
||||
// need to fix this somehow to not rely on using the outpoint for the channel ID if we
|
||||
// want to support V2 batching here as well.
|
||||
funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
|
||||
}
|
||||
Ok(outpoint)
|
||||
})
|
||||
|
@ -4177,6 +4191,7 @@ where
|
|||
let mut per_source_pending_forward = [(
|
||||
payment.prev_short_channel_id,
|
||||
payment.prev_funding_outpoint,
|
||||
payment.prev_channel_id,
|
||||
payment.prev_user_channel_id,
|
||||
vec![(pending_htlc_info, payment.prev_htlc_id)]
|
||||
)];
|
||||
|
@ -4204,6 +4219,7 @@ where
|
|||
short_channel_id: payment.prev_short_channel_id,
|
||||
user_channel_id: Some(payment.prev_user_channel_id),
|
||||
outpoint: payment.prev_funding_outpoint,
|
||||
channel_id: payment.prev_channel_id,
|
||||
htlc_id: payment.prev_htlc_id,
|
||||
incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
|
||||
phantom_shared_secret: None,
|
||||
|
@ -4227,7 +4243,7 @@ where
|
|||
|
||||
let mut new_events = VecDeque::new();
|
||||
let mut failed_forwards = Vec::new();
|
||||
let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
|
||||
let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
|
||||
{
|
||||
let mut forward_htlcs = HashMap::new();
|
||||
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
|
||||
|
@ -4240,20 +4256,21 @@ where
|
|||
for forward_info in pending_forwards.drain(..) {
|
||||
match forward_info {
|
||||
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
|
||||
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
|
||||
forward_info: PendingHTLCInfo {
|
||||
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
|
||||
prev_user_channel_id, forward_info: PendingHTLCInfo {
|
||||
routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
|
||||
outgoing_cltv_value, ..
|
||||
}
|
||||
}) => {
|
||||
macro_rules! failure_handler {
|
||||
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
|
||||
let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
|
||||
let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
|
||||
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
|
||||
|
||||
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
user_channel_id: Some(prev_user_channel_id),
|
||||
channel_id: prev_channel_id,
|
||||
outpoint: prev_funding_outpoint,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: incoming_shared_secret,
|
||||
|
@ -4317,7 +4334,7 @@ where
|
|||
outgoing_cltv_value, Some(phantom_shared_secret), false, None,
|
||||
current_height, self.default_configuration.accept_mpp_keysend)
|
||||
{
|
||||
Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
|
||||
Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])),
|
||||
Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
|
||||
}
|
||||
},
|
||||
|
@ -4362,8 +4379,8 @@ where
|
|||
for forward_info in pending_forwards.drain(..) {
|
||||
let queue_fail_htlc_res = match forward_info {
|
||||
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
|
||||
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
|
||||
forward_info: PendingHTLCInfo {
|
||||
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
|
||||
prev_user_channel_id, forward_info: PendingHTLCInfo {
|
||||
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
|
||||
routing: PendingHTLCRouting::Forward {
|
||||
onion_packet, blinded, ..
|
||||
|
@ -4374,6 +4391,7 @@ where
|
|||
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
user_channel_id: Some(prev_user_channel_id),
|
||||
channel_id: prev_channel_id,
|
||||
outpoint: prev_funding_outpoint,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: incoming_shared_secret,
|
||||
|
@ -4445,8 +4463,8 @@ where
|
|||
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
|
||||
match forward_info {
|
||||
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
|
||||
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
|
||||
forward_info: PendingHTLCInfo {
|
||||
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
|
||||
prev_user_channel_id, forward_info: PendingHTLCInfo {
|
||||
routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
|
||||
skimmed_fee_msat, ..
|
||||
}
|
||||
|
@ -4480,6 +4498,7 @@ where
|
|||
prev_hop: HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
user_channel_id: Some(prev_user_channel_id),
|
||||
channel_id: prev_channel_id,
|
||||
outpoint: prev_funding_outpoint,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: incoming_shared_secret,
|
||||
|
@ -4511,6 +4530,7 @@ where
|
|||
failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
short_channel_id: $htlc.prev_hop.short_channel_id,
|
||||
user_channel_id: $htlc.prev_hop.user_channel_id,
|
||||
channel_id: prev_channel_id,
|
||||
outpoint: prev_funding_outpoint,
|
||||
htlc_id: $htlc.prev_hop.htlc_id,
|
||||
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
|
||||
|
@ -4591,7 +4611,6 @@ where
|
|||
#[allow(unused_assignments)] {
|
||||
committed_to_claimable = true;
|
||||
}
|
||||
let prev_channel_id = prev_funding_outpoint.to_channel_id();
|
||||
htlcs.push(claimable_htlc);
|
||||
let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
|
||||
htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
|
||||
|
@ -4735,23 +4754,23 @@ where
|
|||
|
||||
for event in background_events.drain(..) {
|
||||
match event {
|
||||
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
|
||||
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
|
||||
// The channel has already been closed, so no use bothering to care about the
|
||||
// monitor updating completing.
|
||||
let _ = self.chain_monitor.update_channel(funding_txo, &update);
|
||||
},
|
||||
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
|
||||
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
|
||||
let mut updated_chan = false;
|
||||
{
|
||||
let per_peer_state = self.per_peer_state.read().unwrap();
|
||||
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
|
||||
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
|
||||
let peer_state = &mut *peer_state_lock;
|
||||
match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
|
||||
match peer_state.channel_by_id.entry(channel_id) {
|
||||
hash_map::Entry::Occupied(mut chan_phase) => {
|
||||
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
|
||||
updated_chan = true;
|
||||
handle_new_monitor_update!(self, funding_txo, update.clone(),
|
||||
handle_new_monitor_update!(self, funding_txo, channel_id, update.clone(),
|
||||
peer_state_lock, peer_state, per_peer_state, chan);
|
||||
} else {
|
||||
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
|
||||
|
@ -5286,10 +5305,10 @@ where
|
|||
},
|
||||
HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
|
||||
ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
|
||||
ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
|
||||
}) => {
|
||||
log_trace!(
|
||||
WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
|
||||
WithContext::from(&self.logger, None, Some(*channel_id)),
|
||||
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
|
||||
if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
|
||||
);
|
||||
|
@ -5333,7 +5352,7 @@ where
|
|||
if push_forward_ev { self.push_pending_forwards_ev(); }
|
||||
let mut pending_events = self.pending_events.lock().unwrap();
|
||||
pending_events.push_back((events::Event::HTLCHandlingFailed {
|
||||
prev_channel_id: outpoint.to_channel_id(),
|
||||
prev_channel_id: *channel_id,
|
||||
failed_next_destination: destination,
|
||||
}, None));
|
||||
},
|
||||
|
@ -5474,7 +5493,7 @@ where
|
|||
}
|
||||
if valid_mpp {
|
||||
for htlc in sources.drain(..) {
|
||||
let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
|
||||
let prev_hop_chan_id = htlc.prev_hop.channel_id;
|
||||
if let Err((pk, err)) = self.claim_funds_from_hop(
|
||||
htlc.prev_hop, payment_preimage,
|
||||
|_, definitely_duplicate| {
|
||||
|
@ -5527,7 +5546,7 @@ where
|
|||
|
||||
{
|
||||
let per_peer_state = self.per_peer_state.read().unwrap();
|
||||
let chan_id = prev_hop.outpoint.to_channel_id();
|
||||
let chan_id = prev_hop.channel_id;
|
||||
let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
|
||||
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
|
||||
None => None
|
||||
|
@ -5555,7 +5574,7 @@ where
|
|||
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
|
||||
}
|
||||
if !during_init {
|
||||
handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
|
||||
handle_new_monitor_update!(self, prev_hop.outpoint, prev_hop.channel_id, monitor_update, peer_state_lock,
|
||||
peer_state, per_peer_state, chan);
|
||||
} else {
|
||||
// If we're running during init we cannot update a monitor directly -
|
||||
|
@ -5565,6 +5584,7 @@ where
|
|||
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
|
||||
counterparty_node_id,
|
||||
funding_txo: prev_hop.outpoint,
|
||||
channel_id: prev_hop.channel_id,
|
||||
update: monitor_update.clone(),
|
||||
});
|
||||
}
|
||||
|
@ -5579,13 +5599,13 @@ where
|
|||
|
||||
log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
|
||||
chan_id, action);
|
||||
let (node_id, funding_outpoint, blocker) =
|
||||
let (node_id, _funding_outpoint, channel_id, blocker) =
|
||||
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
|
||||
downstream_counterparty_node_id: node_id,
|
||||
downstream_funding_outpoint: funding_outpoint,
|
||||
blocking_action: blocker,
|
||||
blocking_action: blocker, downstream_channel_id: channel_id,
|
||||
} = action {
|
||||
(node_id, funding_outpoint, blocker)
|
||||
(node_id, funding_outpoint, channel_id, blocker)
|
||||
} else {
|
||||
debug_assert!(false,
|
||||
"Duplicate claims should always free another channel immediately");
|
||||
|
@ -5595,7 +5615,7 @@ where
|
|||
let mut peer_state = peer_state_mtx.lock().unwrap();
|
||||
if let Some(blockers) = peer_state
|
||||
.actions_blocking_raa_monitor_updates
|
||||
.get_mut(&funding_outpoint.to_channel_id())
|
||||
.get_mut(&channel_id)
|
||||
{
|
||||
let mut found_blocker = false;
|
||||
blockers.retain(|iter| {
|
||||
|
@ -5624,6 +5644,7 @@ where
|
|||
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
|
||||
payment_preimage,
|
||||
}],
|
||||
channel_id: Some(prev_hop.channel_id),
|
||||
};
|
||||
|
||||
if !during_init {
|
||||
|
@ -5635,7 +5656,8 @@ where
|
|||
// with a preimage we *must* somehow manage to propagate it to the upstream
|
||||
// channel, or we must have an ability to receive the same event and try
|
||||
// again on restart.
|
||||
log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
|
||||
log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
|
||||
"Critical error: failed to update channel monitor with preimage {:?}: {:?}",
|
||||
payment_preimage, update_res);
|
||||
}
|
||||
} else {
|
||||
|
@ -5651,7 +5673,7 @@ where
|
|||
// complete the monitor update completion action from `completion_action`.
|
||||
self.pending_background_events.lock().unwrap().push(
|
||||
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
|
||||
prev_hop.outpoint, preimage_update,
|
||||
prev_hop.outpoint, prev_hop.channel_id, preimage_update,
|
||||
)));
|
||||
}
|
||||
// Note that we do process the completion action here. This totally could be a
|
||||
|
@ -5669,7 +5691,8 @@ where
|
|||
|
||||
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
|
||||
forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
|
||||
next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
|
||||
next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint,
|
||||
next_channel_id: ChannelId,
|
||||
) {
|
||||
match source {
|
||||
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
|
||||
|
@ -5679,7 +5702,7 @@ where
|
|||
debug_assert_eq!(pubkey, path.hops[0].pubkey);
|
||||
}
|
||||
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
|
||||
channel_funding_outpoint: next_channel_outpoint,
|
||||
channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
|
||||
counterparty_node_id: path.hops[0].pubkey,
|
||||
};
|
||||
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
|
||||
|
@ -5687,15 +5710,17 @@ where
|
|||
&self.logger);
|
||||
},
|
||||
HTLCSource::PreviousHopData(hop_data) => {
|
||||
let prev_outpoint = hop_data.outpoint;
|
||||
let prev_channel_id = hop_data.channel_id;
|
||||
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
|
||||
#[cfg(debug_assertions)]
|
||||
let claiming_chan_funding_outpoint = hop_data.outpoint;
|
||||
#[cfg(debug_assertions)]
|
||||
let claiming_channel_id = hop_data.channel_id;
|
||||
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|
||||
|htlc_claim_value_msat, definitely_duplicate| {
|
||||
let chan_to_release =
|
||||
if let Some(node_id) = next_channel_counterparty_node_id {
|
||||
Some((node_id, next_channel_outpoint, completed_blocker))
|
||||
Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker))
|
||||
} else {
|
||||
// We can only get `None` here if we are processing a
|
||||
// `ChannelMonitor`-originated event, in which case we
|
||||
|
@ -5732,7 +5757,7 @@ where
|
|||
},
|
||||
// or the channel we'd unblock is already closed,
|
||||
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
|
||||
(funding_txo, monitor_update)
|
||||
(funding_txo, _channel_id, monitor_update)
|
||||
) => {
|
||||
if *funding_txo == next_channel_outpoint {
|
||||
assert_eq!(monitor_update.updates.len(), 1);
|
||||
|
@ -5748,7 +5773,7 @@ where
|
|||
BackgroundEvent::MonitorUpdatesComplete {
|
||||
channel_id, ..
|
||||
} =>
|
||||
*channel_id == claiming_chan_funding_outpoint.to_channel_id(),
|
||||
*channel_id == claiming_channel_id,
|
||||
}
|
||||
}), "{:?}", *background_events);
|
||||
}
|
||||
|
@ -5758,7 +5783,8 @@ where
|
|||
Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
|
||||
downstream_counterparty_node_id: other_chan.0,
|
||||
downstream_funding_outpoint: other_chan.1,
|
||||
blocking_action: other_chan.2,
|
||||
downstream_channel_id: other_chan.2,
|
||||
blocking_action: other_chan.3,
|
||||
})
|
||||
} else { None }
|
||||
} else {
|
||||
|
@ -5771,8 +5797,8 @@ where
|
|||
event: events::Event::PaymentForwarded {
|
||||
fee_earned_msat,
|
||||
claim_from_onchain_tx: from_onchain,
|
||||
prev_channel_id: Some(prev_outpoint.to_channel_id()),
|
||||
next_channel_id: Some(next_channel_outpoint.to_channel_id()),
|
||||
prev_channel_id: Some(prev_channel_id),
|
||||
next_channel_id: Some(next_channel_id),
|
||||
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
|
||||
},
|
||||
downstream_counterparty_and_funding_outpoint: chan_to_release,
|
||||
|
@ -5822,16 +5848,17 @@ where
|
|||
event, downstream_counterparty_and_funding_outpoint
|
||||
} => {
|
||||
self.pending_events.lock().unwrap().push_back((event, None));
|
||||
if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
|
||||
self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
|
||||
if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint {
|
||||
self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
|
||||
}
|
||||
},
|
||||
MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
|
||||
downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action,
|
||||
downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
|
||||
} => {
|
||||
self.handle_monitor_update_release(
|
||||
downstream_counterparty_node_id,
|
||||
downstream_funding_outpoint,
|
||||
downstream_channel_id,
|
||||
Some(blocking_action),
|
||||
);
|
||||
},
|
||||
|
@ -5846,7 +5873,7 @@ where
|
|||
commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
|
||||
pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
|
||||
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
|
||||
-> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
|
||||
-> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> {
|
||||
let logger = WithChannelContext::from(&self.logger, &channel.context);
|
||||
log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
|
||||
&channel.context.channel_id(),
|
||||
|
@ -5861,7 +5888,7 @@ where
|
|||
let counterparty_node_id = channel.context.get_counterparty_node_id();
|
||||
if !pending_forwards.is_empty() {
|
||||
htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
|
||||
channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
|
||||
channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
|
||||
}
|
||||
|
||||
if let Some(msg) = channel_ready {
|
||||
|
@ -5915,7 +5942,7 @@ where
|
|||
htlc_forwards
|
||||
}
|
||||
|
||||
fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
|
||||
fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
|
||||
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
|
||||
|
||||
let counterparty_node_id = match counterparty_node_id {
|
||||
|
@ -5937,11 +5964,11 @@ where
|
|||
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
|
||||
let peer_state = &mut *peer_state_lock;
|
||||
let channel =
|
||||
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
|
||||
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
|
||||
chan
|
||||
} else {
|
||||
let update_actions = peer_state.monitor_update_blocked_actions
|
||||
.remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
|
||||
.remove(&channel_id).unwrap_or(Vec::new());
|
||||
mem::drop(peer_state_lock);
|
||||
mem::drop(per_peer_state);
|
||||
self.handle_monitor_update_completion_actions(update_actions);
|
||||
|
@ -6543,7 +6570,7 @@ where
|
|||
}
|
||||
// Update the monitor with the shutdown script if necessary.
|
||||
if let Some(monitor_update) = monitor_update_opt {
|
||||
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
|
||||
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), chan.context.channel_id(), monitor_update,
|
||||
peer_state_lock, peer_state, per_peer_state, chan);
|
||||
}
|
||||
},
|
||||
|
@ -6750,7 +6777,8 @@ where
|
|||
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
|
||||
}
|
||||
};
|
||||
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo);
|
||||
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value),
|
||||
false, false, Some(*counterparty_node_id), funding_txo, msg.channel_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -6824,7 +6852,7 @@ where
|
|||
let funding_txo = chan.context.get_funding_txo();
|
||||
let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
|
||||
if let Some(monitor_update) = monitor_update_opt {
|
||||
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
|
||||
handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock,
|
||||
peer_state, per_peer_state, chan);
|
||||
}
|
||||
Ok(())
|
||||
|
@ -6838,8 +6866,8 @@ where
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
|
||||
for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
|
||||
fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
|
||||
for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
|
||||
let mut push_forward_event = false;
|
||||
let mut new_intercept_events = VecDeque::new();
|
||||
let mut failed_intercept_forwards = Vec::new();
|
||||
|
@ -6858,7 +6886,7 @@ where
|
|||
match forward_htlcs.entry(scid) {
|
||||
hash_map::Entry::Occupied(mut entry) => {
|
||||
entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
|
||||
prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
|
||||
prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }));
|
||||
},
|
||||
hash_map::Entry::Vacant(entry) => {
|
||||
if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
|
||||
|
@ -6876,15 +6904,16 @@ where
|
|||
intercept_id
|
||||
}, None));
|
||||
entry.insert(PendingAddHTLCInfo {
|
||||
prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
|
||||
prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
|
||||
},
|
||||
hash_map::Entry::Occupied(_) => {
|
||||
let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
|
||||
let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
|
||||
log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
|
||||
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
user_channel_id: Some(prev_user_channel_id),
|
||||
outpoint: prev_funding_outpoint,
|
||||
channel_id: prev_channel_id,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
||||
phantom_shared_secret: None,
|
||||
|
@ -6904,7 +6933,7 @@ where
|
|||
push_forward_event = true;
|
||||
}
|
||||
entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
|
||||
prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
|
||||
prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6948,13 +6977,14 @@ where
|
|||
/// the [`ChannelMonitorUpdate`] in question.
|
||||
fn raa_monitor_updates_held(&self,
|
||||
actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
|
||||
channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
|
||||
channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
|
||||
) -> bool {
|
||||
actions_blocking_raa_monitor_updates
|
||||
.get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
|
||||
.get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
|
||||
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
|
||||
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
|
||||
channel_funding_outpoint,
|
||||
channel_id,
|
||||
counterparty_node_id,
|
||||
})
|
||||
})
|
||||
|
@ -6971,7 +7001,7 @@ where
|
|||
|
||||
if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
|
||||
return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
|
||||
chan.context().get_funding_txo().unwrap(), counterparty_node_id);
|
||||
chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
|
||||
}
|
||||
}
|
||||
false
|
||||
|
@ -6993,7 +7023,7 @@ where
|
|||
let funding_txo_opt = chan.context.get_funding_txo();
|
||||
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
|
||||
self.raa_monitor_updates_held(
|
||||
&peer_state.actions_blocking_raa_monitor_updates, funding_txo,
|
||||
&peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
|
||||
*counterparty_node_id)
|
||||
} else { false };
|
||||
let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
|
||||
|
@ -7001,7 +7031,7 @@ where
|
|||
if let Some(monitor_update) = monitor_update_opt {
|
||||
let funding_txo = funding_txo_opt
|
||||
.expect("Funding outpoint must have been set for RAA handling to succeed");
|
||||
handle_new_monitor_update!(self, funding_txo, monitor_update,
|
||||
handle_new_monitor_update!(self, funding_txo, chan.context.channel_id(), monitor_update,
|
||||
peer_state_lock, peer_state, per_peer_state, chan);
|
||||
}
|
||||
htlcs_to_fail
|
||||
|
@ -7239,22 +7269,22 @@ where
|
|||
let mut failed_channels = Vec::new();
|
||||
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
|
||||
let has_pending_monitor_events = !pending_monitor_events.is_empty();
|
||||
for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
|
||||
for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
|
||||
for monitor_event in monitor_events.drain(..) {
|
||||
match monitor_event {
|
||||
MonitorEvent::HTLCEvent(htlc_update) => {
|
||||
let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
|
||||
let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
|
||||
if let Some(preimage) = htlc_update.payment_preimage {
|
||||
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
|
||||
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
|
||||
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint, channel_id);
|
||||
} else {
|
||||
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
|
||||
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
|
||||
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
|
||||
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
|
||||
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
|
||||
}
|
||||
},
|
||||
MonitorEvent::HolderForceClosed(funding_outpoint) => {
|
||||
MonitorEvent::HolderForceClosed(_funding_outpoint) => {
|
||||
let counterparty_node_id_opt = match counterparty_node_id {
|
||||
Some(cp_id) => Some(cp_id),
|
||||
None => {
|
||||
|
@ -7270,7 +7300,7 @@ where
|
|||
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
|
||||
let peer_state = &mut *peer_state_lock;
|
||||
let pending_msg_events = &mut peer_state.pending_msg_events;
|
||||
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
|
||||
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
|
||||
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
|
||||
failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
|
||||
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
|
||||
|
@ -7289,8 +7319,8 @@ where
|
|||
}
|
||||
}
|
||||
},
|
||||
MonitorEvent::Completed { funding_txo, monitor_update_id } => {
|
||||
self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref());
|
||||
MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
|
||||
self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -7342,7 +7372,7 @@ where
|
|||
if let Some(monitor_update) = monitor_opt {
|
||||
has_monitor_update = true;
|
||||
|
||||
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
|
||||
handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update,
|
||||
peer_state_lock, peer_state, per_peer_state, chan);
|
||||
continue 'peer_loop;
|
||||
}
|
||||
|
@ -7507,14 +7537,14 @@ where
|
|||
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
|
||||
// so we track the update internally and handle it when the user next calls
|
||||
// timer_tick_occurred, guaranteeing we're running normally.
|
||||
if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
|
||||
if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() {
|
||||
assert_eq!(update.updates.len(), 1);
|
||||
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
|
||||
assert!(should_broadcast);
|
||||
} else { unreachable!(); }
|
||||
self.pending_background_events.lock().unwrap().push(
|
||||
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
|
||||
counterparty_node_id, funding_txo, update
|
||||
counterparty_node_id, funding_txo, update, channel_id,
|
||||
});
|
||||
}
|
||||
self.finish_close_channel(failure);
|
||||
|
@ -8073,9 +8103,12 @@ where
|
|||
/// [`Event`] being handled) completes, this should be called to restore the channel to normal
|
||||
/// operation. It will double-check that nothing *else* is also blocking the same channel from
|
||||
/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
|
||||
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
|
||||
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
|
||||
channel_funding_outpoint: OutPoint, channel_id: ChannelId,
|
||||
mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
|
||||
|
||||
let logger = WithContext::from(
|
||||
&self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
|
||||
&self.logger, Some(counterparty_node_id), Some(channel_id),
|
||||
);
|
||||
loop {
|
||||
let per_peer_state = self.per_peer_state.read().unwrap();
|
||||
|
@ -8085,29 +8118,30 @@ where
|
|||
if let Some(blocker) = completed_blocker.take() {
|
||||
// Only do this on the first iteration of the loop.
|
||||
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
|
||||
.get_mut(&channel_funding_outpoint.to_channel_id())
|
||||
.get_mut(&channel_id)
|
||||
{
|
||||
blockers.retain(|iter| iter != &blocker);
|
||||
}
|
||||
}
|
||||
|
||||
if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
|
||||
channel_funding_outpoint, counterparty_node_id) {
|
||||
channel_funding_outpoint, channel_id, counterparty_node_id) {
|
||||
// Check that, while holding the peer lock, we don't have anything else
|
||||
// blocking monitor updates for this channel. If we do, release the monitor
|
||||
// update(s) when those blockers complete.
|
||||
log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
|
||||
&channel_funding_outpoint.to_channel_id());
|
||||
&channel_id);
|
||||
break;
|
||||
}
|
||||
|
||||
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
|
||||
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
|
||||
channel_id) {
|
||||
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
|
||||
debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
|
||||
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
|
||||
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
|
||||
channel_funding_outpoint.to_channel_id());
|
||||
handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
|
||||
channel_id);
|
||||
handle_new_monitor_update!(self, channel_funding_outpoint, channel_id, monitor_update,
|
||||
peer_state_lck, peer_state, per_peer_state, chan);
|
||||
if further_update_exists {
|
||||
// If there are more `ChannelMonitorUpdate`s to process, restart at the
|
||||
|
@ -8116,7 +8150,7 @@ where
|
|||
}
|
||||
} else {
|
||||
log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
|
||||
channel_funding_outpoint.to_channel_id());
|
||||
channel_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8133,9 +8167,9 @@ where
|
|||
for action in actions {
|
||||
match action {
|
||||
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
|
||||
channel_funding_outpoint, counterparty_node_id
|
||||
channel_funding_outpoint, channel_id, counterparty_node_id
|
||||
} => {
|
||||
self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
|
||||
self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8531,6 +8565,7 @@ where
|
|||
incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
|
||||
phantom_shared_secret: None,
|
||||
outpoint: htlc.prev_funding_outpoint,
|
||||
channel_id: htlc.prev_channel_id,
|
||||
blinded_failure: htlc.forward_info.routing.blinded_failure(),
|
||||
});
|
||||
|
||||
|
@ -8542,7 +8577,7 @@ where
|
|||
HTLCFailReason::from_failure_code(0x2000 | 2),
|
||||
HTLCDestination::InvalidForward { requested_forward_scid }));
|
||||
let logger = WithContext::from(
|
||||
&self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
|
||||
&self.logger, None, Some(htlc.prev_channel_id)
|
||||
);
|
||||
log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
|
||||
false
|
||||
|
@ -9627,6 +9662,9 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
|
|||
(4, htlc_id, required),
|
||||
(6, incoming_packet_shared_secret, required),
|
||||
(7, user_channel_id, option),
|
||||
// Note that by the time we get past the required read for type 2 above, outpoint will be
|
||||
// filled in, so we can safely unwrap it here.
|
||||
(9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
|
||||
});
|
||||
|
||||
impl Writeable for ClaimableHTLC {
|
||||
|
@ -9778,6 +9816,9 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, {
|
|||
(2, prev_short_channel_id, required),
|
||||
(4, prev_htlc_id, required),
|
||||
(6, prev_funding_outpoint, required),
|
||||
// Note that by the time we get past the required read for type 2 above, prev_funding_outpoint will be
|
||||
// filled in, so we can safely unwrap it here.
|
||||
(7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
|
||||
});
|
||||
|
||||
impl Writeable for HTLCForwardInfo {
|
||||
|
@ -10291,12 +10332,14 @@ where
|
|||
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
|
||||
let mut channel_closures = VecDeque::new();
|
||||
let mut close_background_events = Vec::new();
|
||||
let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize);
|
||||
for _ in 0..channel_count {
|
||||
let mut channel: Channel<SP> = Channel::read(reader, (
|
||||
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
|
||||
))?;
|
||||
let logger = WithChannelContext::from(&args.logger, &channel.context);
|
||||
let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
|
||||
funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
|
||||
funding_txo_set.insert(funding_txo.clone());
|
||||
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
|
||||
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
|
||||
|
@ -10326,9 +10369,9 @@ where
|
|||
if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
|
||||
return Err(DecodeError::InvalidValue);
|
||||
}
|
||||
if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
|
||||
if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update {
|
||||
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
|
||||
counterparty_node_id, funding_txo, update
|
||||
counterparty_node_id, funding_txo, channel_id, update
|
||||
});
|
||||
}
|
||||
failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
|
||||
|
@ -10407,14 +10450,16 @@ where
|
|||
for (funding_txo, monitor) in args.channel_monitors.iter() {
|
||||
if !funding_txo_set.contains(funding_txo) {
|
||||
let logger = WithChannelMonitor::from(&args.logger, monitor);
|
||||
let channel_id = monitor.channel_id();
|
||||
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
|
||||
&funding_txo.to_channel_id());
|
||||
&channel_id);
|
||||
let monitor_update = ChannelMonitorUpdate {
|
||||
update_id: CLOSED_CHANNEL_UPDATE_ID,
|
||||
counterparty_node_id: None,
|
||||
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
|
||||
channel_id: Some(monitor.channel_id()),
|
||||
};
|
||||
close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
|
||||
close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10591,12 +10636,13 @@ where
|
|||
$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
|
||||
for update in $chan_in_flight_upds.iter() {
|
||||
log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
|
||||
update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
|
||||
update.update_id, $channel_info_log, &$monitor.channel_id());
|
||||
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
|
||||
pending_background_events.push(
|
||||
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
|
||||
counterparty_node_id: $counterparty_node_id,
|
||||
funding_txo: $funding_txo,
|
||||
channel_id: $monitor.channel_id(),
|
||||
update: update.clone(),
|
||||
});
|
||||
}
|
||||
|
@ -10607,7 +10653,7 @@ where
|
|||
pending_background_events.push(
|
||||
BackgroundEvent::MonitorUpdatesComplete {
|
||||
counterparty_node_id: $counterparty_node_id,
|
||||
channel_id: $funding_txo.to_channel_id(),
|
||||
channel_id: $monitor.channel_id(),
|
||||
});
|
||||
}
|
||||
if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
|
||||
|
@ -10661,7 +10707,8 @@ where
|
|||
|
||||
if let Some(in_flight_upds) = in_flight_monitor_updates {
|
||||
for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
|
||||
let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
|
||||
let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
|
||||
let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
|
||||
if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
|
||||
// Now that we've removed all the in-flight monitor updates for channels that are
|
||||
// still open, we need to replay any monitor updates that are for closed channels,
|
||||
|
@ -10674,8 +10721,8 @@ where
|
|||
funding_txo, monitor, peer_state, logger, "closed ");
|
||||
} else {
|
||||
log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
|
||||
log_error!(logger, " The ChannelMonitor for channel {} is missing.",
|
||||
&funding_txo.to_channel_id());
|
||||
log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
|
||||
channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
|
||||
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
|
||||
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
|
||||
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
|
||||
|
@ -10763,7 +10810,7 @@ where
|
|||
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
|
||||
if pending_forward_matches_htlc(&htlc_info) {
|
||||
log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
|
||||
&htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
|
||||
&htlc.payment_hash, &monitor.channel_id());
|
||||
false
|
||||
} else { true }
|
||||
} else { true }
|
||||
|
@ -10773,7 +10820,7 @@ where
|
|||
pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
|
||||
if pending_forward_matches_htlc(&htlc_info) {
|
||||
log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
|
||||
&htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
|
||||
&htlc.payment_hash, &monitor.channel_id());
|
||||
pending_events_read.retain(|(event, _)| {
|
||||
if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
|
||||
intercepted_id != ev_id
|
||||
|
@ -10797,6 +10844,7 @@ where
|
|||
let compl_action =
|
||||
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
|
||||
channel_funding_outpoint: monitor.get_funding_txo().0,
|
||||
channel_id: monitor.channel_id(),
|
||||
counterparty_node_id: path.hops[0].pubkey,
|
||||
};
|
||||
pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
|
||||
|
@ -10822,7 +10870,7 @@ where
|
|||
// channel_id -> peer map entry).
|
||||
counterparty_opt.is_none(),
|
||||
counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
|
||||
monitor.get_funding_txo().0))
|
||||
monitor.get_funding_txo().0, monitor.channel_id()))
|
||||
} else { None }
|
||||
} else {
|
||||
// If it was an outbound payment, we've handled it above - if a preimage
|
||||
|
@ -10995,7 +11043,7 @@ where
|
|||
// this channel as well. On the flip side, there's no harm in restarting
|
||||
// without the new monitor persisted - we'll end up right back here on
|
||||
// restart.
|
||||
let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
|
||||
let previous_channel_id = claimable_htlc.prev_hop.channel_id;
|
||||
if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
|
||||
let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
|
||||
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
|
||||
|
@ -11028,14 +11076,15 @@ where
|
|||
for action in actions.iter() {
|
||||
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
|
||||
downstream_counterparty_and_funding_outpoint:
|
||||
Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
|
||||
Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
|
||||
} = action {
|
||||
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
|
||||
let channel_id = blocked_channel_id;
|
||||
log_trace!(logger,
|
||||
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
|
||||
blocked_channel_outpoint.to_channel_id());
|
||||
channel_id);
|
||||
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
|
||||
.entry(blocked_channel_outpoint.to_channel_id())
|
||||
.entry(*channel_id)
|
||||
.or_insert_with(Vec::new).push(blocking_action.clone());
|
||||
} else {
|
||||
// If the channel we were blocking has closed, we don't need to
|
||||
|
@ -11115,12 +11164,12 @@ where
|
|||
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
|
||||
}
|
||||
|
||||
for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
|
||||
for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
|
||||
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
|
||||
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
|
||||
// channel is closed we just assume that it probably came from an on-chain claim.
|
||||
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
|
||||
downstream_closed, true, downstream_node_id, downstream_funding);
|
||||
downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
|
||||
}
|
||||
|
||||
//TODO: Broadcast channel update for closed channels, but only after we've made a
|
||||
|
|
|
@ -252,7 +252,7 @@ pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block)
|
|||
|
||||
fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
|
||||
// Ensure `get_claimable_balances`' self-tests never panic
|
||||
for funding_outpoint in node.chain_monitor.chain_monitor.list_monitors() {
|
||||
for (funding_outpoint, _channel_id) in node.chain_monitor.chain_monitor.list_monitors() {
|
||||
node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances();
|
||||
}
|
||||
}
|
||||
|
@ -601,7 +601,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
|
|||
let feeest = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
|
||||
let mut deserialized_monitors = Vec::new();
|
||||
{
|
||||
for outpoint in self.chain_monitor.chain_monitor.list_monitors() {
|
||||
for (outpoint, _channel_id) in self.chain_monitor.chain_monitor.list_monitors() {
|
||||
let mut w = test_utils::TestVecWriter(Vec::new());
|
||||
self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap();
|
||||
let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
|
||||
|
@ -644,7 +644,8 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
|
|||
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
|
||||
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
|
||||
for deserialized_monitor in deserialized_monitors.drain(..) {
|
||||
if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) {
|
||||
let funding_outpoint = deserialized_monitor.get_funding_txo().0;
|
||||
if chain_monitor.watch_channel(funding_outpoint, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) {
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
|
@ -1068,7 +1069,8 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User
|
|||
assert!(node_read.is_empty());
|
||||
|
||||
for monitor in monitors_read.drain(..) {
|
||||
assert_eq!(node.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
|
||||
let funding_outpoint = monitor.get_funding_txo().0;
|
||||
assert_eq!(node.chain_monitor.watch_channel(funding_outpoint, monitor),
|
||||
Ok(ChannelMonitorUpdateStatus::Completed));
|
||||
check_added_monitors!(node, 1);
|
||||
}
|
||||
|
|
|
@ -8684,7 +8684,7 @@ fn test_pre_lockin_no_chan_closed_update() {
|
|||
check_added_monitors!(nodes[0], 0);
|
||||
|
||||
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
|
||||
let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
|
||||
let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
|
||||
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
|
||||
assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
|
||||
check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
|
||||
|
@ -9028,7 +9028,7 @@ fn test_peer_funding_sidechannel() {
|
|||
check_added_monitors!(nodes[1], 1);
|
||||
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
|
||||
let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
|
||||
check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_output.to_channel_id(), true, reason)]);
|
||||
check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
|
||||
|
||||
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
|
||||
|
@ -9089,7 +9089,7 @@ fn test_duplicate_funding_err_in_funding() {
|
|||
|
||||
let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
|
||||
let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
|
||||
assert_eq!(real_chan_funding_txo.to_channel_id(), real_channel_id);
|
||||
assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
|
||||
|
||||
nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
|
||||
let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
|
||||
|
@ -9181,7 +9181,7 @@ fn test_duplicate_chan_id() {
|
|||
let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
|
||||
|
||||
let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
|
||||
let channel_id = funding_outpoint.to_channel_id();
|
||||
let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
|
||||
|
||||
// Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
|
||||
// temporary one).
|
||||
|
@ -10635,7 +10635,7 @@ fn test_batch_channel_open() {
|
|||
|
||||
// Complete the persistence of the monitor.
|
||||
nodes[0].chain_monitor.complete_sole_pending_chan_update(
|
||||
&OutPoint { txid: tx.txid(), index: 1 }.to_channel_id()
|
||||
&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
|
||||
);
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
|
||||
|
@ -10692,8 +10692,8 @@ fn test_disconnect_in_funding_batch() {
|
|||
// The channels in the batch will close immediately.
|
||||
let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
|
||||
let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
|
||||
let channel_id_1 = funding_txo_1.to_channel_id();
|
||||
let channel_id_2 = funding_txo_2.to_channel_id();
|
||||
let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
|
||||
let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
|
||||
check_closed_events(&nodes[0], &[
|
||||
ExpectedCloseEvent {
|
||||
channel_id: Some(channel_id_1),
|
||||
|
@ -10766,8 +10766,8 @@ fn test_batch_funding_close_after_funding_signed() {
|
|||
// Force-close the channel for which we've completed the initial monitor.
|
||||
let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
|
||||
let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
|
||||
let channel_id_1 = funding_txo_1.to_channel_id();
|
||||
let channel_id_2 = funding_txo_2.to_channel_id();
|
||||
let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
|
||||
let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
|
||||
nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
|
||||
check_added_monitors(&nodes[0], 2);
|
||||
{
|
||||
|
@ -10827,7 +10827,7 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen
|
|||
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
|
||||
let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id();
|
||||
let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
|
||||
|
||||
assert_eq!(nodes[0].node.list_channels().len(), 1);
|
||||
assert_eq!(nodes[1].node.list_channels().len(), 1);
|
||||
|
|
|
@ -15,7 +15,7 @@ use crate::chain::transaction::OutPoint;
|
|||
use crate::chain::chaininterface::{LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight};
|
||||
use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource};
|
||||
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
|
||||
use crate::ln::channel;
|
||||
use crate::ln::{channel, ChannelId};
|
||||
use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId, RecipientOnionFields};
|
||||
use crate::ln::msgs::ChannelMessageHandler;
|
||||
use crate::util::config::UserConfig;
|
||||
|
@ -176,7 +176,7 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) {
|
|||
let (_, _, chan_id, funding_tx) =
|
||||
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
|
||||
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
|
||||
assert_eq!(funding_outpoint.to_channel_id(), chan_id);
|
||||
assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
|
||||
|
||||
let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64;
|
||||
let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_id);
|
||||
|
@ -327,7 +327,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) {
|
|||
let (_, _, chan_id, funding_tx) =
|
||||
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
|
||||
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
|
||||
assert_eq!(funding_outpoint.to_channel_id(), chan_id);
|
||||
assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
|
||||
|
||||
// This HTLC is immediately claimed, giving node B the preimage
|
||||
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
|
||||
|
@ -1121,7 +1121,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_
|
|||
let (_, _, chan_id, funding_tx) =
|
||||
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
|
||||
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
|
||||
assert_eq!(funding_outpoint.to_channel_id(), chan_id);
|
||||
assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
|
||||
|
||||
// We create five HTLCs for B to claim against A's revoked commitment transaction:
|
||||
//
|
||||
|
@ -1403,7 +1403,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) {
|
|||
let (_, _, chan_id, funding_tx) =
|
||||
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 12_000_000);
|
||||
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
|
||||
assert_eq!(funding_outpoint.to_channel_id(), chan_id);
|
||||
assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
|
||||
|
||||
let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
|
||||
let failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 1_000_000).1;
|
||||
|
@ -1705,7 +1705,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) {
|
|||
let (_, _, chan_id, funding_tx) =
|
||||
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
|
||||
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
|
||||
assert_eq!(funding_outpoint.to_channel_id(), chan_id);
|
||||
assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
|
||||
|
||||
// We create two HTLCs, one which we will give A the preimage to to generate an HTLC-Success
|
||||
// transaction, and one which we will not, allowing B to claim the HTLC output in an aggregated
|
||||
|
|
|
@ -24,6 +24,7 @@ use crate::ln::ChannelId;
|
|||
use crate::ln::features::{InitFeatures, NodeFeatures};
|
||||
use crate::ln::msgs;
|
||||
use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler};
|
||||
use crate::util::macro_logger::DebugFundingChannelId;
|
||||
use crate::util::ser::{VecWriter, Writeable, Writer};
|
||||
use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE};
|
||||
use crate::ln::wire;
|
||||
|
@ -2006,7 +2007,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
|
|||
log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
|
||||
log_pubkey!(node_id),
|
||||
&msg.temporary_channel_id,
|
||||
log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
|
||||
DebugFundingChannelId(&msg.funding_txid, msg.funding_output_index));
|
||||
// TODO: If the peer is gone we should generate a DiscardFunding event
|
||||
// indicating to the wallet that they should just throw away this funding transaction
|
||||
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
|
||||
|
|
|
@ -18,7 +18,7 @@ use crate::ln::channelmanager::{MIN_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnion
|
|||
use crate::routing::gossip::RoutingFees;
|
||||
use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop};
|
||||
use crate::ln::features::ChannelTypeFeatures;
|
||||
use crate::ln::msgs;
|
||||
use crate::ln::{msgs, ChannelId};
|
||||
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ChannelUpdate, ErrorAction};
|
||||
use crate::ln::wire::Encode;
|
||||
use crate::util::config::{UserConfig, MaxDustHTLCExposure};
|
||||
|
@ -617,7 +617,7 @@ fn test_0conf_channel_with_async_monitor() {
|
|||
check_added_monitors!(nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||
|
||||
let channel_id = funding_output.to_channel_id();
|
||||
let channel_id = ChannelId::v1_from_funding_outpoint(funding_output);
|
||||
nodes[1].chain_monitor.complete_sole_pending_chan_update(&channel_id);
|
||||
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ use crate::sign::EntropySource;
|
|||
use crate::chain::transaction::OutPoint;
|
||||
use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
|
||||
use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
|
||||
use crate::ln::msgs;
|
||||
use crate::ln::{msgs, ChannelId};
|
||||
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
|
||||
use crate::util::test_channel_signer::TestChannelSigner;
|
||||
use crate::util::test_utils;
|
||||
|
@ -201,7 +201,7 @@ fn test_no_txn_manager_serialize_deserialize() {
|
|||
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
|
||||
|
||||
let chan_0_monitor_serialized =
|
||||
get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
|
||||
get_monitor!(nodes[0], ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 })).encode();
|
||||
reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
|
||||
|
||||
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
|
||||
|
@ -446,7 +446,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
|
|||
assert!(nodes_0_read.is_empty());
|
||||
|
||||
for monitor in node_0_monitors.drain(..) {
|
||||
assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
|
||||
let funding_outpoint = monitor.get_funding_txo().0;
|
||||
assert_eq!(nodes[0].chain_monitor.watch_channel(funding_outpoint, monitor),
|
||||
Ok(ChannelMonitorUpdateStatus::Completed));
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
}
|
||||
|
@ -829,8 +830,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
|
|||
// monitors and ChannelManager, for use later, if we don't want to persist both monitors.
|
||||
let mut original_monitor = test_utils::TestVecWriter(Vec::new());
|
||||
if !persist_both_monitors {
|
||||
for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
|
||||
if outpoint.to_channel_id() == chan_id_not_persisted {
|
||||
for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
|
||||
if channel_id == chan_id_not_persisted {
|
||||
assert!(original_monitor.0.is_empty());
|
||||
nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
|
||||
}
|
||||
|
@ -849,16 +850,16 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
|
|||
// crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
|
||||
// with the old ChannelManager.
|
||||
let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
|
||||
for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
|
||||
if outpoint.to_channel_id() == chan_id_persisted {
|
||||
for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
|
||||
if channel_id == chan_id_persisted {
|
||||
assert!(updated_monitor.0.is_empty());
|
||||
nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
|
||||
}
|
||||
}
|
||||
// If `persist_both_monitors` is set, get the second monitor here as well
|
||||
if persist_both_monitors {
|
||||
for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
|
||||
if outpoint.to_channel_id() == chan_id_not_persisted {
|
||||
for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
|
||||
if channel_id == chan_id_not_persisted {
|
||||
assert!(original_monitor.0.is_empty());
|
||||
nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
|
||||
}
|
||||
|
@ -1220,7 +1221,7 @@ fn test_reload_partial_funding_batch() {
|
|||
assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
|
||||
|
||||
// Reload the node while a subset of the channels in the funding batch have persisted monitors.
|
||||
let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
|
||||
let channel_id_1 = ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 });
|
||||
let node_encoded = nodes[0].node.encode();
|
||||
let channel_monitor_1_serialized = get_monitor!(nodes[0], channel_id_1).encode();
|
||||
reload_node!(nodes[0], node_encoded, &[&channel_monitor_1_serialized], new_persister, new_chain_monitor, new_channel_manager);
|
||||
|
|
|
@ -25,7 +25,7 @@ use bitcoin::secp256k1::Secp256k1;
|
|||
|
||||
use crate::prelude::*;
|
||||
|
||||
use crate::ln::functional_test_utils::*;
|
||||
use crate::ln::{functional_test_utils::*, ChannelId};
|
||||
|
||||
fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
|
||||
// Our on-chain HTLC-claim learning has a few properties worth testing:
|
||||
|
@ -531,7 +531,7 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) {
|
|||
let (_, _, chan_id, funding_tx) =
|
||||
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
|
||||
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
|
||||
assert_eq!(funding_outpoint.to_channel_id(), chan_id);
|
||||
assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
|
||||
|
||||
let remote_txn_a = get_local_commitment_txn!(nodes[0], chan_id);
|
||||
let remote_txn_b = get_local_commitment_txn!(nodes[1], chan_id);
|
||||
|
|
|
@ -51,7 +51,7 @@ fn pre_funding_lock_shutdown_test() {
|
|||
mine_transaction(&nodes[0], &tx);
|
||||
mine_transaction(&nodes[1], &tx);
|
||||
|
||||
nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
|
||||
nodes[0].node.close_channel(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();
|
||||
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
|
||||
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
|
@ -721,7 +721,7 @@ fn test_upfront_shutdown_script() {
|
|||
|
||||
// We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
|
||||
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000);
|
||||
nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[2].node.get_our_node_id()).unwrap();
|
||||
nodes[0].node.close_channel(&chan.2, &nodes[2].node.get_our_node_id()).unwrap();
|
||||
let node_0_orig_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
|
||||
let mut node_0_shutdown = node_0_orig_shutdown.clone();
|
||||
node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
|
||||
|
@ -736,7 +736,7 @@ fn test_upfront_shutdown_script() {
|
|||
|
||||
// We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
|
||||
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000);
|
||||
nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[2].node.get_our_node_id()).unwrap();
|
||||
nodes[0].node.close_channel(&chan.2, &nodes[2].node.get_our_node_id()).unwrap();
|
||||
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
|
||||
// We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
|
||||
nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
|
||||
|
@ -750,7 +750,7 @@ fn test_upfront_shutdown_script() {
|
|||
// We test that if case of peer non-signaling we don't enforce committed script at channel opening
|
||||
*nodes[0].override_init_features.borrow_mut() = Some(nodes[0].node.init_features().clear_upfront_shutdown_script());
|
||||
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
|
||||
nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
|
||||
nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
|
||||
let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
@ -765,7 +765,7 @@ fn test_upfront_shutdown_script() {
|
|||
// channel smoothly, opt-out is from channel initiator here
|
||||
*nodes[0].override_init_features.borrow_mut() = None;
|
||||
let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000);
|
||||
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
|
||||
nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
|
||||
|
@ -779,7 +779,7 @@ fn test_upfront_shutdown_script() {
|
|||
//// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
|
||||
//// channel smoothly
|
||||
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
|
||||
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
|
||||
nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
|
||||
|
@ -894,7 +894,7 @@ fn test_segwit_v0_shutdown_script() {
|
|||
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
|
||||
nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
// Use a segwit v0 script supported even without option_shutdown_anysegwit
|
||||
|
@ -929,7 +929,7 @@ fn test_anysegwit_shutdown_script() {
|
|||
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
|
||||
nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
// Use a non-v0 segwit script supported by option_shutdown_anysegwit
|
||||
|
@ -975,14 +975,14 @@ fn test_unsupported_anysegwit_shutdown_script() {
|
|||
.expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script });
|
||||
|
||||
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()) {
|
||||
match nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()) {
|
||||
Err(APIError::IncompatibleShutdownScript { script }) => {
|
||||
assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner());
|
||||
},
|
||||
Err(e) => panic!("Unexpected error: {:?}", e),
|
||||
Ok(_) => panic!("Expected error"),
|
||||
}
|
||||
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
|
||||
nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
// Use a non-v0 segwit script unsupported without option_shutdown_anysegwit
|
||||
|
@ -1007,7 +1007,7 @@ fn test_invalid_shutdown_script() {
|
|||
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
|
||||
nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
// Use a segwit v0 script with an unsupported witness program
|
||||
|
@ -1041,7 +1041,7 @@ fn test_user_shutdown_script() {
|
|||
let shutdown_script = ShutdownScript::try_from(script.clone()).unwrap();
|
||||
|
||||
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap();
|
||||
nodes[1].node.close_channel_with_feerate_and_script(&chan.2, &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
|
@ -1068,7 +1068,7 @@ fn test_already_set_user_shutdown_script() {
|
|||
let shutdown_script = ShutdownScript::try_from(script).unwrap();
|
||||
|
||||
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
let result = nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script));
|
||||
let result = nodes[1].node.close_channel_with_feerate_and_script(&chan.2, &nodes[0].node.get_our_node_id(), None, Some(shutdown_script));
|
||||
|
||||
assert_eq!(result, Err(APIError::APIMisuseError { err: "Cannot override shutdown script for a channel with one already set".to_string() }));
|
||||
}
|
||||
|
@ -1200,7 +1200,7 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) {
|
|||
*feerate_lock *= 10;
|
||||
}
|
||||
|
||||
nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
|
||||
nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
|
||||
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
|
||||
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
|
@ -1240,7 +1240,7 @@ fn simple_target_feerate_shutdown() {
|
|||
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
let chan_id = OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id();
|
||||
let chan_id = chan.2;
|
||||
|
||||
nodes[0].node.close_channel_with_feerate_and_script(&chan_id, &nodes[1].node.get_our_node_id(), Some(253 * 10), None).unwrap();
|
||||
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
// licenses.
|
||||
|
||||
use crate::chain::transaction::OutPoint;
|
||||
use crate::ln::ChannelId;
|
||||
use crate::sign::SpendableOutputDescriptor;
|
||||
|
||||
use bitcoin::hash_types::Txid;
|
||||
|
@ -41,24 +42,21 @@ macro_rules! log_bytes {
|
|||
pub(crate) struct DebugFundingChannelId<'a>(pub &'a Txid, pub u16);
|
||||
impl<'a> core::fmt::Display for DebugFundingChannelId<'a> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
|
||||
(OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().fmt(f)
|
||||
}
|
||||
}
|
||||
macro_rules! log_funding_channel_id {
|
||||
($funding_txid: expr, $funding_txo: expr) => {
|
||||
$crate::util::macro_logger::DebugFundingChannelId(&$funding_txid, $funding_txo)
|
||||
ChannelId::v1_from_funding_outpoint(OutPoint { txid: self.0.clone(), index: self.1 }).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct DebugFundingInfo<'a, T: 'a>(pub &'a (OutPoint, T));
|
||||
impl<'a, T> core::fmt::Display for DebugFundingInfo<'a, T> {
|
||||
pub(crate) struct DebugFundingInfo<'a>(pub &'a ChannelId);
|
||||
impl<'a> core::fmt::Display for DebugFundingInfo<'a> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
|
||||
(self.0).0.to_channel_id().fmt(f)
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
macro_rules! log_funding_info {
|
||||
($key_storage: expr) => {
|
||||
$crate::util::macro_logger::DebugFundingInfo(&$key_storage.get_funding_txo())
|
||||
$crate::util::macro_logger::DebugFundingInfo(
|
||||
&$key_storage.channel_id()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1052,9 +1052,9 @@ mod tests {
|
|||
{
|
||||
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
|
||||
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
|
||||
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
|
||||
let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
|
||||
let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
|
||||
let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
|
||||
let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
|
||||
let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
|
||||
|
||||
let ro_persister = MonitorUpdatingPersister {
|
||||
|
|
|
@ -341,7 +341,7 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
|
|||
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
|
||||
&mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
|
||||
assert!(new_monitor == monitor);
|
||||
self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
|
||||
self.latest_monitor_update_id.lock().unwrap().insert(monitor.channel_id(),
|
||||
(funding_txo, monitor.get_latest_update_id(), MonitorUpdateId::from_new_monitor(&monitor)));
|
||||
self.added_monitors.lock().unwrap().push((funding_txo, monitor));
|
||||
self.chain_monitor.watch_channel(funding_txo, new_monitor)
|
||||
|
@ -353,18 +353,19 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
|
|||
update.write(&mut w).unwrap();
|
||||
assert!(channelmonitor::ChannelMonitorUpdate::read(
|
||||
&mut io::Cursor::new(&w.0)).unwrap() == *update);
|
||||
let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
|
||||
|
||||
self.monitor_updates.lock().unwrap().entry(funding_txo.to_channel_id()).or_insert(Vec::new()).push(update.clone());
|
||||
self.monitor_updates.lock().unwrap().entry(channel_id).or_insert(Vec::new()).push(update.clone());
|
||||
|
||||
if let Some(exp) = self.expect_channel_force_closed.lock().unwrap().take() {
|
||||
assert_eq!(funding_txo.to_channel_id(), exp.0);
|
||||
assert_eq!(channel_id, exp.0);
|
||||
assert_eq!(update.updates.len(), 1);
|
||||
if let channelmonitor::ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
|
||||
assert_eq!(should_broadcast, exp.1);
|
||||
} else { panic!(); }
|
||||
}
|
||||
|
||||
self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
|
||||
self.latest_monitor_update_id.lock().unwrap().insert(channel_id,
|
||||
(funding_txo, update.update_id, MonitorUpdateId::from_monitor_update(update)));
|
||||
let update_res = self.chain_monitor.update_channel(funding_txo, update);
|
||||
// At every point where we get a monitor update, we should be able to send a useful monitor
|
||||
|
@ -375,7 +376,7 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
|
|||
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
|
||||
&mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
|
||||
if let Some(chan_id) = self.expect_monitor_round_trip_fail.lock().unwrap().take() {
|
||||
assert_eq!(chan_id, funding_txo.to_channel_id());
|
||||
assert_eq!(chan_id, channel_id);
|
||||
assert!(new_monitor != *monitor);
|
||||
} else {
|
||||
assert!(new_monitor == *monitor);
|
||||
|
@ -384,7 +385,7 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
|
|||
update_res
|
||||
}
|
||||
|
||||
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
|
||||
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
|
||||
return self.chain_monitor.release_pending_monitor_events();
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue