//! The logic to monitor for on-chain transactions and create the relevant claim responses lives //! here. //! //! ChannelMonitor objects are generated by ChannelManager in response to relevant //! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can //! be made in responding to certain messages, see ManyChannelMonitor for more. //! //! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the //! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date //! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other //! security-domain-separated system design, you should consider having multiple paths for //! ChannelMonitors to get out of the HSM and onto monitoring devices. use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::{TxIn,TxOut,SigHashType,Transaction}; use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint; use bitcoin::blockdata::script::{Script, Builder}; use bitcoin::blockdata::opcodes; use bitcoin::consensus::encode; use bitcoin::util::hash::BitcoinHash; use bitcoin::util::bip143; use bitcoin_hashes::Hash; use bitcoin_hashes::sha256::Hash as Sha256; use bitcoin_hashes::hash160::Hash as Hash160; use bitcoin_hashes::sha256d::Hash as Sha256dHash; use secp256k1::{Secp256k1,Signature}; use secp256k1::key::{SecretKey,PublicKey}; use secp256k1; use ln::msgs::DecodeError; use ln::chan_utils; use ln::chan_utils::{HTLCOutputInCommitment, LocalCommitmentTransaction, HTLCType}; use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash}; use chain::chaininterface::{ChainListener, ChainWatchInterface, BroadcasterInterface, FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT}; use chain::transaction::OutPoint; use chain::keysinterface::SpendableOutputDescriptor; use util::logger::Logger; use util::ser::{ReadableArgs, Readable, Writer, Writeable, U48}; use util::{byte_utils, events}; use std::collections::{HashMap, hash_map, HashSet}; use std::sync::{Arc,Mutex}; use std::{hash,cmp, mem}; /// An error enum representing a failure to persist a channel monitor update. #[derive(Clone)] pub enum ChannelMonitorUpdateErr { /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of /// our state failed, but is expected to succeed at some point in the future). /// /// Such a failure will "freeze" a channel, preventing us from revoking old states or /// submitting new commitment transactions to the remote party. /// ChannelManager::test_restore_channel_monitor can be used to retry the update(s) and restore /// the channel to an operational state. /// /// Note that continuing to operate when no copy of the updated ChannelMonitor could be /// persisted is unsafe - if you failed to store the update on your own local disk you should /// instead return PermanentFailure to force closure of the channel ASAP. /// /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting /// to claim it on this channel) and those updates must be applied wherever they can be. At /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to /// the channel which would invalidate previous ChannelMonitors are not made when a channel has /// been "frozen". /// /// Note that even if updates made after TemporaryFailure succeed you must still call /// test_restore_channel_monitor to ensure you have the latest monitor and re-enable normal /// channel operation. /// /// For deployments where a copy of ChannelMonitors and other local state are backed up in a /// remote location (with local copies persisted immediately), it is anticipated that all /// updates will return TemporaryFailure until the remote copies could be updated. TemporaryFailure, /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a /// different watchtower and cannot update with all watchtowers that were previously informed /// of this channel). This will force-close the channel in question. /// /// Should also be used to indicate a failure to update the local copy of the channel monitor. PermanentFailure, } /// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is /// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::insert_combine this /// means you tried to merge two monitors for different channels or for a channel which was /// restored from a backup and then generated new commitment updates. /// Contains a human-readable error message. #[derive(Debug)] pub struct MonitorUpdateError(pub &'static str); /// Simple structure send back by ManyChannelMonitor in case of HTLC detected onchain from a /// forward channel and from which info are needed to update HTLC in a backward channel. pub struct HTLCUpdate { pub(super) payment_hash: PaymentHash, pub(super) payment_preimage: Option, pub(super) source: HTLCSource } /// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between /// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing /// events to it, while also taking any add_update_monitor events and passing them to some remote /// server(s). /// /// Note that any updates to a channel's monitor *must* be applied to each instance of the /// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If /// an update occurs and a remote watchtower is left with old state, it may broadcast transactions /// which we have revoked, allowing our counterparty to claim all funds in the channel! /// /// User needs to notify implementors of ManyChannelMonitor when a new block is connected or /// disconnected using their `block_connected` and `block_disconnected` methods. However, rather /// than calling these methods directly, the user should register implementors as listeners to the /// BlockNotifier and call the BlockNotifier's `block_(dis)connected` methods, which will notify /// all registered listeners in one go. pub trait ManyChannelMonitor: Send + Sync { /// Adds or updates a monitor for the given `funding_txo`. /// /// Implementor must also ensure that the funding_txo outpoint is registered with any relevant /// ChainWatchInterfaces such that the provided monitor receives block_connected callbacks with /// any spends of it. fn add_update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>; /// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated /// with success or failure backward fn fetch_pending_htlc_updated(&self) -> Vec; } /// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a /// watchtower or watch our own channels. /// /// Note that you must provide your own key by which to refer to channels. /// /// If you're accepting remote monitors (ie are implementing a watchtower), you must verify that /// users cannot overwrite a given channel by providing a duplicate key. ie you should probably /// index by a PublicKey which is required to sign any updates. /// /// If you're using this for local monitoring of your own channels, you probably want to use /// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation. pub struct SimpleManyChannelMonitor { #[cfg(test)] // Used in ChannelManager tests to manipulate channels directly pub monitors: Mutex>, #[cfg(not(test))] monitors: Mutex>, chain_monitor: Arc, broadcaster: Arc, pending_events: Mutex>, pending_htlc_updated: Mutex)>>>, logger: Arc, fee_estimator: Arc } impl<'a, Key : Send + cmp::Eq + hash::Hash> ChainListener for SimpleManyChannelMonitor { fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], _indexes_of_txn_matched: &[u32]) { let block_hash = header.bitcoin_hash(); let mut new_events: Vec = Vec::with_capacity(0); let mut htlc_updated_infos = Vec::new(); { let mut monitors = self.monitors.lock().unwrap(); for monitor in monitors.values_mut() { let (txn_outputs, spendable_outputs, mut htlc_updated) = monitor.block_connected(txn_matched, height, &block_hash, &*self.broadcaster, &*self.fee_estimator); if spendable_outputs.len() > 0 { new_events.push(events::Event::SpendableOutputs { outputs: spendable_outputs, }); } for (ref txid, ref outputs) in txn_outputs { for (idx, output) in outputs.iter().enumerate() { self.chain_monitor.install_watch_outpoint((txid.clone(), idx as u32), &output.script_pubkey); } } htlc_updated_infos.append(&mut htlc_updated); } } { // ChannelManager will just need to fetch pending_htlc_updated and pass state backward let mut pending_htlc_updated = self.pending_htlc_updated.lock().unwrap(); for htlc in htlc_updated_infos.drain(..) { match pending_htlc_updated.entry(htlc.2) { hash_map::Entry::Occupied(mut e) => { // In case of reorg we may have htlc outputs solved in a different way so // we prefer to keep claims but don't store duplicate updates for a given // (payment_hash, HTLCSource) pair. let mut existing_claim = false; e.get_mut().retain(|htlc_data| { if htlc.0 == htlc_data.0 { if htlc_data.1.is_some() { existing_claim = true; true } else { false } } else { true } }); if !existing_claim { e.get_mut().push((htlc.0, htlc.1)); } } hash_map::Entry::Vacant(e) => { e.insert(vec![(htlc.0, htlc.1)]); } } } } let mut pending_events = self.pending_events.lock().unwrap(); pending_events.append(&mut new_events); } fn block_disconnected(&self, header: &BlockHeader, disconnected_height: u32) { let block_hash = header.bitcoin_hash(); let mut monitors = self.monitors.lock().unwrap(); for monitor in monitors.values_mut() { monitor.block_disconnected(disconnected_height, &block_hash, &*self.broadcaster, &*self.fee_estimator); } } } impl SimpleManyChannelMonitor { /// Creates a new object which can be used to monitor several channels given the chain /// interface with which to register to receive notifications. pub fn new(chain_monitor: Arc, broadcaster: Arc, logger: Arc, feeest: Arc) -> Arc> { let res = Arc::new(SimpleManyChannelMonitor { monitors: Mutex::new(HashMap::new()), chain_monitor, broadcaster, pending_events: Mutex::new(Vec::new()), pending_htlc_updated: Mutex::new(HashMap::new()), logger, fee_estimator: feeest, }); res } /// Adds or updates the monitor which monitors the channel referred to by the given key. pub fn add_update_monitor_by_key(&self, key: Key, monitor: ChannelMonitor) -> Result<(), MonitorUpdateError> { let mut monitors = self.monitors.lock().unwrap(); match monitors.get_mut(&key) { Some(orig_monitor) => { log_trace!(self, "Updating Channel Monitor for channel {}", log_funding_info!(monitor.key_storage)); return orig_monitor.insert_combine(monitor); }, None => {} }; match monitor.key_storage { Storage::Local { ref funding_info, .. } => { match funding_info { &None => { return Err(MonitorUpdateError("Try to update a useless monitor without funding_txo !")); }, &Some((ref outpoint, ref script)) => { log_trace!(self, "Got new Channel Monitor for channel {}", log_bytes!(outpoint.to_channel_id()[..])); self.chain_monitor.install_watch_tx(&outpoint.txid, script); self.chain_monitor.install_watch_outpoint((outpoint.txid, outpoint.index as u32), script); }, } }, Storage::Watchtower { .. } => { self.chain_monitor.watch_all_txn(); } } monitors.insert(key, monitor); Ok(()) } } impl ManyChannelMonitor for SimpleManyChannelMonitor { fn add_update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { match self.add_update_monitor_by_key(funding_txo, monitor) { Ok(_) => Ok(()), Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure), } } fn fetch_pending_htlc_updated(&self) -> Vec { let mut updated = self.pending_htlc_updated.lock().unwrap(); let mut pending_htlcs_updated = Vec::with_capacity(updated.len()); for (k, v) in updated.drain() { for htlc_data in v { pending_htlcs_updated.push(HTLCUpdate { payment_hash: k, payment_preimage: htlc_data.1, source: htlc_data.0, }); } } pending_htlcs_updated } } impl events::EventsProvider for SimpleManyChannelMonitor { fn get_and_clear_pending_events(&self) -> Vec { let mut pending_events = self.pending_events.lock().unwrap(); let mut ret = Vec::new(); mem::swap(&mut ret, &mut *pending_events); ret } } /// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction, /// instead claiming it in its own individual transaction. const CLTV_SHARED_CLAIM_BUFFER: u32 = 12; /// If an HTLC expires within this many blocks, force-close the channel to broadcast the /// HTLC-Success transaction. /// In other words, this is an upper bound on how many blocks we think it can take us to get a /// transaction confirmed (and we use it in a few more, equivalent, places). pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6; /// Number of blocks by which point we expect our counterparty to have seen new blocks on the /// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our /// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing /// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he /// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our /// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures /// due to expiration but increase the cost of funds being locked longuer in case of failure. /// This delay also cover a low-power peer being slow to process blocks and so being behind us on /// accurate block height. /// In case of onchain failure to be pass backward we may see the last block of ANTI_REORG_DELAY /// with at worst this delay, so we are not only using this value as a mercy for them but also /// us as a safeguard to delay with enough time. pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3; /// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound /// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money. /// We use also this delay to be sure we can remove our in-flight claim txn from bump candidates buffer. /// It may cause spurrious generation of bumped claim txn but that's allright given the outpoint is already /// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not /// keeping bumping another claim tx to solve the outpoint. pub(crate) const ANTI_REORG_DELAY: u32 = 6; #[derive(Clone, PartialEq)] enum Storage { Local { funding_key: SecretKey, revocation_base_key: SecretKey, htlc_base_key: SecretKey, delayed_payment_base_key: SecretKey, payment_base_key: SecretKey, shutdown_pubkey: PublicKey, funding_info: Option<(OutPoint, Script)>, current_remote_commitment_txid: Option, prev_remote_commitment_txid: Option, }, Watchtower { revocation_base_key: PublicKey, htlc_base_key: PublicKey, } } #[derive(Clone, PartialEq)] struct LocalSignedTx { /// txid of the transaction in tx, just used to make comparison faster txid: Sha256dHash, tx: LocalCommitmentTransaction, revocation_key: PublicKey, a_htlc_key: PublicKey, b_htlc_key: PublicKey, delayed_payment_key: PublicKey, per_commitment_point: PublicKey, feerate_per_kw: u64, htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, } #[derive(PartialEq)] enum InputDescriptors { RevokedOfferedHTLC, RevokedReceivedHTLC, OfferedHTLC, ReceivedHTLC, RevokedOutput, // either a revoked to_local output on commitment tx, a revoked HTLC-Timeout output or a revoked HTLC-Success output } /// When ChannelMonitor discovers an onchain outpoint being a step of a channel and that it needs /// to generate a tx to push channel state forward, we cache outpoint-solving tx material to build /// a new bumped one in case of lenghty confirmation delay #[derive(Clone, PartialEq)] enum InputMaterial { Revoked { script: Script, pubkey: Option, key: SecretKey, is_htlc: bool, amount: u64, }, RemoteHTLC { script: Script, key: SecretKey, preimage: Option, amount: u64, locktime: u32, }, LocalHTLC { script: Script, sigs: (Signature, Signature), preimage: Option, amount: u64, } } impl Writeable for InputMaterial { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { match self { &InputMaterial::Revoked { ref script, ref pubkey, ref key, ref is_htlc, ref amount} => { writer.write_all(&[0; 1])?; script.write(writer)?; pubkey.write(writer)?; writer.write_all(&key[..])?; if *is_htlc { writer.write_all(&[0; 1])?; } else { writer.write_all(&[1; 1])?; } writer.write_all(&byte_utils::be64_to_array(*amount))?; }, &InputMaterial::RemoteHTLC { ref script, ref key, ref preimage, ref amount, ref locktime } => { writer.write_all(&[1; 1])?; script.write(writer)?; key.write(writer)?; preimage.write(writer)?; writer.write_all(&byte_utils::be64_to_array(*amount))?; writer.write_all(&byte_utils::be32_to_array(*locktime))?; }, &InputMaterial::LocalHTLC { ref script, ref sigs, ref preimage, ref amount } => { writer.write_all(&[2; 1])?; script.write(writer)?; sigs.0.write(writer)?; sigs.1.write(writer)?; preimage.write(writer)?; writer.write_all(&byte_utils::be64_to_array(*amount))?; } } Ok(()) } } impl Readable for InputMaterial { fn read(reader: &mut R) -> Result { let input_material = match >::read(reader)? { 0 => { let script = Readable::read(reader)?; let pubkey = Readable::read(reader)?; let key = Readable::read(reader)?; let is_htlc = match >::read(reader)? { 0 => true, 1 => false, _ => return Err(DecodeError::InvalidValue), }; let amount = Readable::read(reader)?; InputMaterial::Revoked { script, pubkey, key, is_htlc, amount } }, 1 => { let script = Readable::read(reader)?; let key = Readable::read(reader)?; let preimage = Readable::read(reader)?; let amount = Readable::read(reader)?; let locktime = Readable::read(reader)?; InputMaterial::RemoteHTLC { script, key, preimage, amount, locktime } }, 2 => { let script = Readable::read(reader)?; let their_sig = Readable::read(reader)?; let our_sig = Readable::read(reader)?; let preimage = Readable::read(reader)?; let amount = Readable::read(reader)?; InputMaterial::LocalHTLC { script, sigs: (their_sig, our_sig), preimage, amount } } _ => return Err(DecodeError::InvalidValue), }; Ok(input_material) } } /// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it /// once they mature to enough confirmations (ANTI_REORG_DELAY) #[derive(Clone, PartialEq)] enum OnchainEvent { /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from /// bump-txn candidate buffer. Claim { claim_request: Sha256dHash, }, /// HTLC output getting solved by a timeout, at maturation we pass upstream payment source information to solve /// inbound HTLC in backward channel. Note, in case of preimage, we pass info to upstream without delay as we can /// only win from it, so it's never an OnchainEvent HTLCUpdate { htlc_update: (HTLCSource, PaymentHash), }, /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a remote party tx. /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen. ContentiousOutpoint { outpoint: BitcoinOutPoint, input_material: InputMaterial, } } /// Higher-level cache structure needed to re-generate bumped claim txn if needed #[derive(Clone, PartialEq)] pub struct ClaimTxBumpMaterial { // At every block tick, used to check if pending claiming tx is taking too // much time for confirmation and we need to bump it. height_timer: u32, // Tracked in case of reorg to wipe out now-superflous bump material feerate_previous: u64, // Soonest timelocks among set of outpoints claimed, used to compute // a priority of not feerate soonest_timelock: u32, // Cache of script, pubkey, sig or key to solve claimable outputs scriptpubkey. per_input_material: HashMap, } impl Writeable for ClaimTxBumpMaterial { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { writer.write_all(&byte_utils::be32_to_array(self.height_timer))?; writer.write_all(&byte_utils::be64_to_array(self.feerate_previous))?; writer.write_all(&byte_utils::be32_to_array(self.soonest_timelock))?; writer.write_all(&byte_utils::be64_to_array(self.per_input_material.len() as u64))?; for (outp, tx_material) in self.per_input_material.iter() { outp.write(writer)?; tx_material.write(writer)?; } Ok(()) } } impl Readable for ClaimTxBumpMaterial { fn read(reader: &mut R) -> Result { let height_timer = Readable::read(reader)?; let feerate_previous = Readable::read(reader)?; let soonest_timelock = Readable::read(reader)?; let per_input_material_len: u64 = Readable::read(reader)?; let mut per_input_material = HashMap::with_capacity(cmp::min(per_input_material_len as usize, MAX_ALLOC_SIZE / 128)); for _ in 0 ..per_input_material_len { let outpoint = Readable::read(reader)?; let input_material = Readable::read(reader)?; per_input_material.insert(outpoint, input_material); } Ok(Self { height_timer, feerate_previous, soonest_timelock, per_input_material }) } } const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; /// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates /// on-chain transactions to ensure no loss of funds occurs. /// /// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date /// information and are actively monitoring the chain. #[derive(Clone)] pub struct ChannelMonitor { commitment_transaction_number_obscure_factor: u64, key_storage: Storage, their_htlc_base_key: Option, their_delayed_payment_base_key: Option, funding_redeemscript: Option